From c0826d818e47d5b98d3c2310543f3e6ef284aa36 Mon Sep 17 00:00:00 2001 From: Keenan Weaver Date: Mon, 5 May 2025 19:17:59 -0500 Subject: [PATCH 001/220] soundfont-generaluser: 1.471 -> 2.0.2-unstable-2025-04-21, adopt and modernize --- .../so/soundfont-generaluser/package.nix | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/pkgs/by-name/so/soundfont-generaluser/package.nix b/pkgs/by-name/so/soundfont-generaluser/package.nix index df1a24af9589..72be24893163 100644 --- a/pkgs/by-name/so/soundfont-generaluser/package.nix +++ b/pkgs/by-name/so/soundfont-generaluser/package.nix @@ -1,29 +1,31 @@ { lib, stdenv, - fetchzip, + fetchFromGitHub, }: -stdenv.mkDerivation rec { +stdenv.mkDerivation (finalAttrs: { pname = "generaluser"; - version = "1.471"; + version = "2.0.2-unstable-2025-04-21"; - # we can't use fetchurl since stdenv does not handle unpacking *.zip's by default. - src = fetchzip { - # Linked on https://www.schristiancollins.com/generaluser.php: - url = "https://www.dropbox.com/s/4x27l49kxcwamp5/GeneralUser_GS_${version}.zip"; - sha256 = "sha256-lwUlWubXiVZ8fijKuNF54YQjT0uigjNAbjKaNjmC51s="; + src = fetchFromGitHub { + owner = "mrbumpy409"; + repo = "GeneralUser-GS"; + rev = "74d4cfe4042a61ddab17d4f86dbccd9d2570eb2a"; + hash = "sha256-I27l8F/BFAo6YSNbtAV14AKVsPIJTHFG2eGudseWmjo="; }; installPhase = '' - install -Dm644 GeneralUser*.sf2 $out/share/soundfonts/GeneralUser-GS.sf2 + runHook preInstall + install -Dm644 $src/GeneralUser-GS.sf2 $out/share/soundfonts/GeneralUser-GS.sf2 + runHook postInstall ''; - meta = with lib; { - description = "SoundFont bank featuring 259 instrument presets and 11 drum kits"; + meta = { + description = "General MIDI SoundFont with a low memory footprint"; homepage = "https://www.schristiancollins.com/generaluser.php"; - license = licenses.generaluser; - platforms = platforms.all; - maintainers = [ ]; + license = lib.licenses.generaluser; + maintainers = with lib.maintainers; [ keenanweaver ]; + platforms = lib.platforms.all; }; -} +}) From ae078850cc437c3301b7b93c3820247dcf5dec0c Mon Sep 17 00:00:00 2001 From: emaryn Date: Fri, 9 May 2025 12:56:30 +0800 Subject: [PATCH 002/220] museeks: 0.13.1 -> 0.20.9 Diff: https://github.com/martpie/museeks/compare/0.13.1...0.20.9 --- pkgs/by-name/mu/museeks/package.nix | 63 +++++++++++++++++++---------- 1 file changed, 41 insertions(+), 22 deletions(-) diff --git a/pkgs/by-name/mu/museeks/package.nix b/pkgs/by-name/mu/museeks/package.nix index eb551c222a8b..86d0f0e85459 100644 --- a/pkgs/by-name/mu/museeks/package.nix +++ b/pkgs/by-name/mu/museeks/package.nix @@ -1,41 +1,60 @@ { lib, + stdenv, fetchurl, - appimageTools, + dpkg, + autoPatchelfHook, + webkitgtk_4_1, + libsoup_3, + glib, + gtk3, + cairo, + dbus, + gdk-pixbuf, + nix-update-script, }: -let +stdenv.mkDerivation (finalAttrs: { pname = "museeks"; - version = "0.13.1"; + version = "0.20.9"; src = fetchurl { - url = "https://github.com/martpie/museeks/releases/download/${version}/museeks-x86_64.AppImage"; - hash = "sha256-LvunhCFmpv00TnXzWjp3kQUAhoKpmp6pqKgcaUqZV+o="; + url = "https://github.com/martpie/museeks/releases/download/${finalAttrs.version}/Museeks_${finalAttrs.version}_amd64.deb"; + hash = "sha256-7jRgMpfQTJr3yW3YAPTnPSvtrqumScN3Tr7YXQX3Fi8="; }; - appimageContents = appimageTools.extractType2 { - inherit pname version src; - }; -in -appimageTools.wrapType2 { - inherit pname version src; + nativeBuildInputs = [ + dpkg + autoPatchelfHook + ]; - extraInstallCommands = '' - mkdir -p $out/share/${pname} - cp -a ${appimageContents}/{locales,resources} $out/share/${pname} - cp -a ${appimageContents}/usr/share/icons $out/share/ - install -Dm 444 ${appimageContents}/${pname}.desktop -t $out/share/applications + buildInputs = [ + dbus + webkitgtk_4_1 + libsoup_3 + gtk3 + cairo + gdk-pixbuf + glib + (lib.getLib stdenv.cc.cc) + ]; - substituteInPlace $out/share/applications/${pname}.desktop \ - --replace 'Exec=AppRun' 'Exec=${pname}' + installPhase = '' + runHook preInstall + + cp -r usr $out + + runHook postInstall ''; - meta = with lib; { + passthru.updateScript = nix-update-script { }; + + meta = { description = "Simple, clean and cross-platform music player"; homepage = "https://github.com/martpie/museeks"; - license = licenses.mit; + license = lib.licenses.mit; platforms = [ "x86_64-linux" ]; - maintainers = with maintainers; [ zendo ]; + maintainers = with lib.maintainers; [ zendo ]; mainProgram = "museeks"; }; -} +}) From a9895197dc99fc92e2f79a7ff6ce315c53f07c43 Mon Sep 17 00:00:00 2001 From: Sinjin23000 Date: Fri, 9 May 2025 23:50:09 -0700 Subject: [PATCH 003/220] maintainers: add sinjin2300 --- maintainers/maintainer-list.nix | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/maintainers/maintainer-list.nix b/maintainers/maintainer-list.nix index 52c741b7c370..7bec710e4575 100644 --- a/maintainers/maintainer-list.nix +++ b/maintainers/maintainer-list.nix @@ -22705,6 +22705,11 @@ matrix = "@c3n21:matrix.org"; githubId = 37077738; }; + sinjin2300 = { + name = "Sinjin"; + github = "Sinjin2300"; + githubId = 35543336; + }; sioodmy = { name = "Antoni Sokołowski"; github = "sioodmy"; From e3352ec0b8ba3f14e345810796c668527e05fa0e Mon Sep 17 00:00:00 2001 From: Sinjin23000 Date: Fri, 9 May 2025 23:06:24 -0700 Subject: [PATCH 004/220] nnd: init at 0.19 Co-authored-by: Tom van Dijk <18gatenmaker6@gmail.com> --- pkgs/by-name/nn/nnd/package.nix | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 pkgs/by-name/nn/nnd/package.nix diff --git a/pkgs/by-name/nn/nnd/package.nix b/pkgs/by-name/nn/nnd/package.nix new file mode 100644 index 000000000000..d0cb7818c71c --- /dev/null +++ b/pkgs/by-name/nn/nnd/package.nix @@ -0,0 +1,31 @@ +{ + lib, + fetchFromGitHub, + pkgsCross, +}: +let + inherit (pkgsCross.musl64) rustPlatform; +in +rustPlatform.buildRustPackage (finalAttrs: { + pname = "nnd"; + version = "0.19"; + + src = fetchFromGitHub { + owner = "al13n321"; + repo = "nnd"; + tag = "v${finalAttrs.version}"; + hash = "sha256-DjJDNsyOrDLaMruGLP3arfrCs/7hW24wfjvPncndY+Q="; + }; + + useFetchCargoVendor = true; + cargoHash = "sha256-Iwipxy0xKDyFLMmdB2FQve6DULX+46Pi9rOaK0bDTB0="; + + meta = { + description = "Debugger for Linux"; + homepage = "https://github.com/al13n321/nnd/tree/main"; + license = lib.licenses.asl20; + platforms = [ "x86_64-linux" ]; + maintainers = with lib.maintainers; [ sinjin2300 ]; + mainProgram = "nnd"; + }; +}) From 2549b34e16e76b694309775fbc2dde9469d2e887 Mon Sep 17 00:00:00 2001 From: Zaechus Date: Sat, 10 May 2025 16:00:03 -0600 Subject: [PATCH 005/220] rbdoom-3-bfg: 1.5.1 -> 1.6.0 --- pkgs/by-name/rb/rbdoom-3-bfg/package.nix | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/pkgs/by-name/rb/rbdoom-3-bfg/package.nix b/pkgs/by-name/rb/rbdoom-3-bfg/package.nix index 137be74c7a0d..7dbf12a24e54 100644 --- a/pkgs/by-name/rb/rbdoom-3-bfg/package.nix +++ b/pkgs/by-name/rb/rbdoom-3-bfg/package.nix @@ -2,12 +2,10 @@ lib, stdenv, fetchFromGitHub, - fetchpatch, cmake, directx-shader-compiler, - libGLU, - libpng, - libjpeg_turbo, + ispc, + ncurses, openal, rapidjson, SDL2, @@ -18,22 +16,16 @@ stdenv.mkDerivation rec { pname = "rbdoom-3-bfg"; - version = "1.5.1"; + version = "1.6.0"; src = fetchFromGitHub { owner = "RobertBeckebans"; repo = pname; rev = "v${version}"; - hash = "sha256-bjjeTdbQDWTibSrIWhCnr6F0Ef17efLgWGQAAwezjUw="; + hash = "sha256-9BZEFO+e5IG6hv9+QI9OJecQ84rLTWBDz4k0GU6SeDE="; fetchSubmodules = true; }; - patches = fetchpatch { - name = "replace-HLSL-ternary-operators.patch"; - url = "https://github.com/RobertBeckebans/RBDOOM-3-BFG/commit/feffa4a4dd9a2a5f3c608f720cde41bea37797d3.patch"; - hash = "sha256-aR1eoWZL3+ps7P7yFXFvGsMFxpUSBDiyBsja/ISin4I="; - }; - postPatch = '' substituteInPlace neo/extern/nvrhi/tools/shaderCompiler/CMakeLists.txt \ --replace "AppleClang" "Clang" @@ -42,12 +34,11 @@ stdenv.mkDerivation rec { nativeBuildInputs = [ cmake directx-shader-compiler + ispc ]; buildInputs = [ - libGLU - libpng - libjpeg_turbo + ncurses openal rapidjson SDL2 @@ -60,9 +51,6 @@ stdenv.mkDerivation rec { cmakeFlags = [ "-DFFMPEG=OFF" "-DBINKDEC=ON" - "-DUSE_SYSTEM_LIBGLEW=ON" - "-DUSE_SYSTEM_LIBPNG=ON" - "-DUSE_SYSTEM_LIBJPEG=ON" "-DUSE_SYSTEM_RAPIDJSON=ON" "-DUSE_SYSTEM_ZLIB=ON" ]; From b6739b829a505a401b24f6f2285999bb85e45224 Mon Sep 17 00:00:00 2001 From: emaryn Date: Sun, 11 May 2025 12:48:13 +0800 Subject: [PATCH 006/220] ncgopher: 0.5.0 -> 0.7.0 Diff: https://github.com/jansc/ncgopher/compare/v0.5.0...v0.7.0 --- pkgs/by-name/nc/ncgopher/package.nix | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/pkgs/by-name/nc/ncgopher/package.nix b/pkgs/by-name/nc/ncgopher/package.nix index 50deded81024..a4652d0c2f25 100644 --- a/pkgs/by-name/nc/ncgopher/package.nix +++ b/pkgs/by-name/nc/ncgopher/package.nix @@ -6,35 +6,40 @@ ncurses6, openssl, sqlite, + perl, }: -rustPlatform.buildRustPackage rec { +rustPlatform.buildRustPackage (finalAttrs: { pname = "ncgopher"; - version = "0.5.0"; + version = "0.7.0"; src = fetchFromGitHub { owner = "jansc"; repo = "ncgopher"; - rev = "v${version}"; - sha256 = "sha256-KrvTwcIeINIBkia6PTnKXp4jFd6GEMBh/xbn0Ot/wmE="; + tag = "v${finalAttrs.version}"; + hash = "sha256-9bwQgFZkwOV28qflWL7ZyUE3SLvPhf77sjomurqMK6E="; }; useFetchCargoVendor = true; - cargoHash = "sha256-QhkYyvoWMGtLH5HhpOJ3JsBIWeZN/CllcXwNW+iM3WI="; + cargoHash = "sha256-wfodxA1fvdsvWvmnzYmL4GzgdIiQbXuhGq/U9spM+0s="; + + nativeBuildInputs = [ + pkg-config + perl + ]; - nativeBuildInputs = [ pkg-config ]; buildInputs = [ ncurses6 openssl sqlite ]; - meta = with lib; { + meta = { description = "Gopher and gemini client for the modern internet"; homepage = "https://github.com/jansc/ncgopher"; - license = licenses.bsd2; - maintainers = with maintainers; [ shamilton ]; - platforms = platforms.linux; + license = lib.licenses.bsd2; + maintainers = with lib.maintainers; [ shamilton ]; + platforms = lib.platforms.linux; mainProgram = "ncgopher"; }; -} +}) From 6fc882a95d55a392db5474ea7b84acbbf559a9df Mon Sep 17 00:00:00 2001 From: cy Date: Sun, 11 May 2025 19:45:27 -0400 Subject: [PATCH 007/220] pixelflasher: 7.11.4.0 -> 8.0.1.0 Changelog: https://github.com/badabing2005/PixelFlasher/compare/v7.11.4.0...v8.0.1.0 Upstream added a dependency on `polib` in https://github.com/badabing2005/PixelFlasher/commit/e11422f1f80317bb082711abb4bd480303e8fcae --- pkgs/by-name/pi/pixelflasher/package.nix | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/pi/pixelflasher/package.nix b/pkgs/by-name/pi/pixelflasher/package.nix index 5b20e7213998..71d9e9c4dc6b 100644 --- a/pkgs/by-name/pi/pixelflasher/package.nix +++ b/pkgs/by-name/pi/pixelflasher/package.nix @@ -10,14 +10,14 @@ }: python3Packages.buildPythonApplication rec { pname = "pixelflasher"; - version = "7.11.4.0"; + version = "8.0.1.0"; format = "other"; src = fetchFromGitHub { owner = "badabing2005"; repo = "PixelFlasher"; tag = "v${version}"; - hash = "sha256-TFvMxYGiRNpuwQyDmSqnksQ31azucZzXq9mZHvl/C4U="; + hash = "sha256-3cIrQ5MbYfWpxFZBJTg0h/Q8PHsWP4KYZvGUnhMEjK4="; }; desktopItems = [ @@ -43,6 +43,7 @@ python3Packages.buildPythonApplication rec { lz4 markdown platformdirs + polib protobuf4 psutil pyperclip From cd21e4131cda290d289dd914ac2be4f8f89762b7 Mon Sep 17 00:00:00 2001 From: Jeremy Schlatter Date: Tue, 13 May 2025 14:24:01 -0700 Subject: [PATCH 008/220] pm2: 5.4.2 -> 6.0.6 --- pkgs/by-name/pm/pm2/package.nix | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/pm/pm2/package.nix b/pkgs/by-name/pm/pm2/package.nix index 6884ea8d91eb..731c0c8b0561 100644 --- a/pkgs/by-name/pm/pm2/package.nix +++ b/pkgs/by-name/pm/pm2/package.nix @@ -2,20 +2,26 @@ lib, buildNpmPackage, fetchFromGitHub, + npm-lockfile-fix, }: buildNpmPackage rec { pname = "pm2"; - version = "5.4.2"; + version = "6.0.6"; src = fetchFromGitHub { owner = "Unitech"; repo = "pm2"; rev = "v${version}"; - hash = "sha256-8Fsh7rld7rtT55qVgj3/XbujNpZx0BfzTRcLjdPLFSA="; + hash = "sha256-ji6IOlPSEj+qpSusF3OX056KuZDL3JjvaTNT/UQTiqA="; + + # Requested patch upstream: https://github.com/Unitech/pm2/pull/5985 + postFetch = '' + ${lib.getExe npm-lockfile-fix} $out/package-lock.json + ''; }; - npmDepsHash = "sha256-Rp3euhURkZgVyszyAwrIftL7lY4aoP+Q4kSQBFxwTcs="; + npmDepsHash = "sha256-b+SSal4eNruQOMNAFoLLJdzfFhz1T3EieDv4kTwwA1Y="; dontNpmBuild = true; From a94dd454e915e7c6308ebd51788d43468c079c25 Mon Sep 17 00:00:00 2001 From: Alex James Date: Fri, 28 Feb 2025 19:44:22 -0600 Subject: [PATCH 009/220] convmv: add dependencies to support additional encodings This adds support for the following encodings: ```diff diff --git a/a b/b --- a/a +++ b/b @@ -4,8 +4,21 @@ AdobeSymbol AdobeZdingbat ascii ascii-ctrl +big5-1984 +big5-2003 big5-eten big5-hkscs +big5ext +big5plus +cccii +cns11643-1 +cns11643-2 +cns11643-3 +cns11643-4 +cns11643-5 +cns11643-6 +cns11643-7 +cns11643-f cp1006 cp1026 cp1047 @@ -46,15 +59,20 @@ cp949 cp950 dingbats euc-cn +euc-jisx0213 euc-jp euc-kr +euc-tw gb12345-raw +gb18030 gb2312-raw gsm0338 hp-roman8 hz +IMAP-UTF-7 iso-2022-jp iso-2022-jp-1 +iso-2022-jp-3 iso-2022-kr iso-8859-1 iso-8859-10 @@ -75,6 +93,8 @@ iso-ir-165 jis0201-raw jis0208-raw jis0212-raw +jis0213-1-raw +jis0213-2-raw johab koi8-f koi8-r @@ -109,9 +129,11 @@ nextstep null posix-bc shiftjis +shiftjisx0213 symbol UCS-2BE UCS-2LE +unisys UTF-16 UTF-16BE UTF-16LE ``` --- pkgs/by-name/co/convmv/package.nix | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/co/convmv/package.nix b/pkgs/by-name/co/convmv/package.nix index 9670d85dcd64..9b13a336a56c 100644 --- a/pkgs/by-name/co/convmv/package.nix +++ b/pkgs/by-name/co/convmv/package.nix @@ -2,7 +2,9 @@ lib, stdenv, fetchzip, + makeWrapper, perl, + perlPackages, }: stdenv.mkDerivation (finalAttrs: { @@ -21,9 +23,17 @@ stdenv.mkDerivation (finalAttrs: { strictDeps = true; - nativeBuildInputs = [ perl ]; + nativeBuildInputs = [ + makeWrapper + perl + ]; - buildInputs = [ perl ]; + buildInputs = [ + perl + perlPackages.EncodeHanExtra + perlPackages.EncodeIMAPUTF7 + perlPackages.EncodeJIS2K + ]; makeFlags = [ "PREFIX=${placeholder "out"}" @@ -46,6 +56,10 @@ stdenv.mkDerivation (finalAttrs: { dontPatchShebangs = true; + postFixup = '' + wrapProgram "$out/bin/convmv" --prefix PERL5LIB : "$PERL5LIB" + ''; + meta = with lib; { description = "Converts filenames from one encoding to another"; downloadPage = "https://www.j3e.de/linux/convmv/"; From 07587e8c9d75e47ac9dfa83663fded4bb61c068c Mon Sep 17 00:00:00 2001 From: Alex James Date: Sun, 16 Mar 2025 14:07:54 -0500 Subject: [PATCH 010/220] convmv: add split `bin` output --- pkgs/by-name/co/convmv/package.nix | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/co/convmv/package.nix b/pkgs/by-name/co/convmv/package.nix index 9b13a336a56c..50036f87495f 100644 --- a/pkgs/by-name/co/convmv/package.nix +++ b/pkgs/by-name/co/convmv/package.nix @@ -12,8 +12,9 @@ stdenv.mkDerivation (finalAttrs: { version = "2.06"; outputs = [ - "out" + "bin" "man" + "out" ]; src = fetchzip { @@ -36,7 +37,7 @@ stdenv.mkDerivation (finalAttrs: { ]; makeFlags = [ - "PREFIX=${placeholder "out"}" + "PREFIX=${placeholder "bin"}" "MANDIR=${placeholder "man"}/share/man" ]; @@ -57,7 +58,7 @@ stdenv.mkDerivation (finalAttrs: { dontPatchShebangs = true; postFixup = '' - wrapProgram "$out/bin/convmv" --prefix PERL5LIB : "$PERL5LIB" + wrapProgram "$bin/bin/convmv" --prefix PERL5LIB : "$PERL5LIB" ''; meta = with lib; { From 53d9e997d491586581cfa0961cc3f913ea53a609 Mon Sep 17 00:00:00 2001 From: Willi Carlsen Date: Thu, 8 May 2025 20:13:24 +0200 Subject: [PATCH 011/220] ec2-instance-selector: init at 3.1.1 Signed-off-by: Willi Carlsen --- .../ec/ec2-instance-selector/package.nix | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 pkgs/by-name/ec/ec2-instance-selector/package.nix diff --git a/pkgs/by-name/ec/ec2-instance-selector/package.nix b/pkgs/by-name/ec/ec2-instance-selector/package.nix new file mode 100644 index 000000000000..c6620d0af98f --- /dev/null +++ b/pkgs/by-name/ec/ec2-instance-selector/package.nix @@ -0,0 +1,46 @@ +{ + lib, + buildGoModule, + fetchFromGitHub, + versionCheckHook, + nix-update-script, +}: + +buildGoModule (finalAttrs: { + pname = "ec2-instance-selector"; + version = "3.1.1"; + + src = fetchFromGitHub { + owner = "aws"; + repo = "amazon-ec2-instance-selector"; + tag = "v${finalAttrs.version}"; + hash = "sha256-4J66/LiFFeUW20du2clqjz9ozLV+Sn2VVqF9VISXpb0="; + }; + + vendorHash = "sha256-ocysHrbkmFQ96dEVJvc5YuuBiaXToAcMUUPFiLpMCpU="; + + ldflags = [ + "-s" + "-w" + "-X=main.versionID=${finalAttrs.version}" + "-X=github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector.versionID=${finalAttrs.version}" + ]; + + postInstall = '' + rm $out/bin/readme-test + mv $out/bin/cmd $out/bin/ec2-instance-selector + ''; + + doInstallCheck = true; + + passthru.updateScript = nix-update-script { }; + + meta = { + description = "Recommends instance types based on resource criteria like vcpus and memory"; + homepage = "https://github.com/aws/amazon-ec2-instance-selector"; + changelog = "https://github.com/aws/amazon-ec2-instance-selector/tags/v${finalAttrs.version}"; + license = lib.licenses.asl20; + maintainers = with lib.maintainers; [ wcarlsen ]; + mainProgram = "ec2-instance-selector"; + }; +}) From c3c67e15f5e1e15fa9f336719a77d52525a15954 Mon Sep 17 00:00:00 2001 From: Willi Carlsen Date: Fri, 9 May 2025 21:03:30 +0200 Subject: [PATCH 012/220] maintainer-list: add wcarlsen Signed-off-by: Willi Carlsen --- maintainers/maintainer-list.nix | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/maintainers/maintainer-list.nix b/maintainers/maintainer-list.nix index 55d3c017e5dd..06ad7ca2024b 100644 --- a/maintainers/maintainer-list.nix +++ b/maintainers/maintainer-list.nix @@ -26105,6 +26105,12 @@ github = "waynr"; githubId = 1441126; }; + wcarlsen = { + name = "Willi Carlsen"; + email = "carlsenwilli@gmail.com"; + github = "wcarlsen"; + githubId = 17003032; + }; wchresta = { email = "wchresta.nix@chrummibei.ch"; github = "wchresta"; From bed600685bb42f4562f4e7a49b363185b754b588 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Thu, 15 May 2025 18:38:18 +0000 Subject: [PATCH 013/220] python3Packages.kopf: 1.37.5 -> 1.38.0 --- pkgs/development/python-modules/kopf/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/development/python-modules/kopf/default.nix b/pkgs/development/python-modules/kopf/default.nix index b36d8a983962..e003a6bef744 100644 --- a/pkgs/development/python-modules/kopf/default.nix +++ b/pkgs/development/python-modules/kopf/default.nix @@ -24,14 +24,14 @@ buildPythonPackage rec { pname = "kopf"; - version = "1.37.5"; + version = "1.38.0"; pyproject = true; src = fetchFromGitHub { owner = "nolar"; repo = "kopf"; tag = version; - hash = "sha256-FwQnt5UoK+Qx7suFACwEtTIvBneJQ19/WmdelWmf+Z0="; + hash = "sha256-H2Q5nDIODp2VFtMIJ0g8b+/SMZzLueRGBkh1g6LBbgc="; }; build-system = [ @@ -79,7 +79,7 @@ buildPythonPackage rec { meta = { description = "Python framework to write Kubernetes operators"; homepage = "https://kopf.readthedocs.io/"; - changelog = "https://github.com/nolar/kopf/releases/tag/${version}"; + changelog = "https://github.com/nolar/kopf/releases/tag/${src.tag}"; license = lib.licenses.mit; maintainers = with lib.maintainers; [ genga898 ]; }; From c22d65b91674d478ed6faaba8889a69dd5063b86 Mon Sep 17 00:00:00 2001 From: Thomas Gerbet Date: Fri, 16 May 2025 00:52:05 +0200 Subject: [PATCH 014/220] quickjs: 2024-01-13 -> 2025-04-26 Fixes CVE-2025-46687. https://bellard.org/quickjs/Changelog --- pkgs/by-name/qu/quickjs/package.nix | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/pkgs/by-name/qu/quickjs/package.nix b/pkgs/by-name/qu/quickjs/package.nix index c589035588e8..339195f5db76 100644 --- a/pkgs/by-name/qu/quickjs/package.nix +++ b/pkgs/by-name/qu/quickjs/package.nix @@ -8,11 +8,11 @@ stdenv.mkDerivation (finalAttrs: { pname = "quickjs"; - version = "2024-01-13"; + version = "2025-04-26"; src = fetchurl { url = "https://bellard.org/quickjs/quickjs-${finalAttrs.version}.tar.xz"; - hash = "sha256-PEv4+JW/pUvrSGyNEhgRJ3Hs/FrDvhA2hR70FWghLgM="; + hash = "sha256-LyAHTCUWbvb3gfOBxQ1XtQLLhdRw1jmrzOu+95VMg78="; }; outputs = [ @@ -38,6 +38,7 @@ stdenv.mkDerivation (finalAttrs: { ''; postBuild = '' + make doc/version.texi pushd doc makeinfo *texi popd @@ -61,7 +62,6 @@ stdenv.mkDerivation (finalAttrs: { '' set +o pipefail qjs --help 2>&1 | grep "QuickJS version" - qjscalc --help 2>&1 | grep "QuickJS version" set -o pipefail '' @@ -93,10 +93,6 @@ stdenv.mkDerivation (finalAttrs: { ES2023 specification including modules, asynchronous generators, proxies and BigInt. - It optionally supports mathematical extensions such as big decimal - floating point numbers (BigDecimal), big binary floating point numbers - (BigFloat) and operator overloading. - Main Features: - Small and easily embeddable: just a few C files, no external @@ -112,8 +108,6 @@ stdenv.mkDerivation (finalAttrs: { - Can compile Javascript sources to executables with no external dependency. - Garbage collection using reference counting (to reduce memory usage and have deterministic behavior) with cycle removal. - - Mathematical extensions: BigDecimal, BigFloat, operator overloading, - bigint mode, math mode. - Command line interpreter with contextual colorization implemented in Javascript. - Small built-in standard library with C library wrappers. From e50edd162befd42b359bc3eae2dad1e63719c7ba Mon Sep 17 00:00:00 2001 From: Thomas Gerbet Date: Fri, 16 May 2025 01:49:19 +0200 Subject: [PATCH 015/220] connman: apply patches for CVE-2025-32366 and CVE-2025-32743 https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=8d3be0285f1d4667bfe85dba555c663eb3d704b4 https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=d90b911f6760959bdf1393c39fe8d1118315490f --- pkgs/by-name/co/connman/package.nix | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/pkgs/by-name/co/connman/package.nix b/pkgs/by-name/co/connman/package.nix index e62def2dccab..1995a54fa143 100644 --- a/pkgs/by-name/co/connman/package.nix +++ b/pkgs/by-name/co/connman/package.nix @@ -2,6 +2,7 @@ lib, stdenv, fetchurl, + fetchpatch, autoreconfHook, dbus, file, @@ -78,13 +79,26 @@ stdenv.mkDerivation (finalAttrs: { hash = "sha256-ElfOvjJ+eQC34rhMD7MwqpCBXkVYmM0vlB9DCO0r47w="; }; - patches = optionals stdenv.hostPlatform.isMusl [ - # Fix Musl build by avoiding a Glibc-only API. - (fetchurl { - url = "https://git.alpinelinux.org/aports/plain/community/connman/libresolv.patch?id=e393ea84386878cbde3cccadd36a30396e357d1e"; - hash = "sha256-7Q1bp8rD/gGVYUqnIXqjr9vypR8jlC926p3KYWl9kLw="; - }) - ]; + patches = + [ + (fetchpatch { + name = "CVE-2025-32366.patch"; + url = "https://git.kernel.org/pub/scm/network/connman/connman.git/patch/?id=8d3be0285f1d4667bfe85dba555c663eb3d704b4"; + hash = "sha256-kPb4pZVWvnvTUcpc4wRc8x/pMUTXGIywj3w8IYKRTBs="; + }) + (fetchpatch { + name = "CVE-2025-32743.patch"; + url = "https://git.kernel.org/pub/scm/network/connman/connman.git/patch/?id=d90b911f6760959bdf1393c39fe8d1118315490f"; + hash = "sha256-odkjYC/iM6dTIJx2WM/KKotXdTtgv8NMFNJMzx5+YU4="; + }) + ] + ++ optionals stdenv.hostPlatform.isMusl [ + # Fix Musl build by avoiding a Glibc-only API. + (fetchurl { + url = "https://git.alpinelinux.org/aports/plain/community/connman/libresolv.patch?id=e393ea84386878cbde3cccadd36a30396e357d1e"; + hash = "sha256-7Q1bp8rD/gGVYUqnIXqjr9vypR8jlC926p3KYWl9kLw="; + }) + ]; nativeBuildInputs = [ autoreconfHook From 448b3a121bbf6eda284e4daa6f9debd720375c42 Mon Sep 17 00:00:00 2001 From: SchweGELBin Date: Fri, 16 May 2025 13:47:04 +0200 Subject: [PATCH 016/220] mautrix-gmessages: 0.6.1 -> 0.6.2 --- pkgs/by-name/ma/mautrix-gmessages/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ma/mautrix-gmessages/package.nix b/pkgs/by-name/ma/mautrix-gmessages/package.nix index d96347f9ec1f..ddbd96cbabcb 100644 --- a/pkgs/by-name/ma/mautrix-gmessages/package.nix +++ b/pkgs/by-name/ma/mautrix-gmessages/package.nix @@ -10,16 +10,16 @@ buildGoModule rec { pname = "mautrix-gmessages"; - version = "0.6.1"; + version = "0.6.2"; src = fetchFromGitHub { owner = "mautrix"; repo = "gmessages"; tag = "v${version}"; - hash = "sha256-qpqFWQ4ZhgzG7SG6phW6LnS52Ve1S+Ky6YtjzfBkBmE="; + hash = "sha256-NzLHCVJaYl8q5meKZDy8St8J9c8oyASLLrXhWG7K+yw="; }; - vendorHash = "sha256-Ps9I8WtTtrc3gSMxt4XZ/IUipZL2+kbgNfbY2PYFoa8="; + vendorHash = "sha256-+aX0r7IvsjXwmz5d6X0yzhG28mBYKvyDGoCbKMwkvk8="; ldflags = [ "-s" From 46fab4562740b07b519f245b0133946dc654f24a Mon Sep 17 00:00:00 2001 From: SchweGELBin Date: Fri, 16 May 2025 13:52:42 +0200 Subject: [PATCH 017/220] meowlnir: 0.3.0 -> 0.5.0 --- pkgs/by-name/me/meowlnir/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/me/meowlnir/package.nix b/pkgs/by-name/me/meowlnir/package.nix index 13c8d3d0c8ca..6c9fda7e91ba 100644 --- a/pkgs/by-name/me/meowlnir/package.nix +++ b/pkgs/by-name/me/meowlnir/package.nix @@ -9,18 +9,18 @@ buildGoModule rec { pname = "meowlnir"; - version = "0.3.0"; + version = "0.5.0"; src = fetchFromGitHub { owner = "maunium"; repo = "meowlnir"; tag = "v${version}"; - hash = "sha256-ig803e4onU3E4Nj5aJo2+QfwZt12iKIJ7fS/BjXsojc="; + hash = "sha256-1YuSXKRiMUCRbxGIDOQKGKK7CxM3VD0LLEeULQJ/zRo="; }; buildInputs = [ olm ]; - vendorHash = "sha256-+P7tlpGTo9N+uSn22uAlzyB36hu3re+KfOe3a/uzLZE="; + vendorHash = "sha256-g0be4ftBRV6Ver1kULfhnVBAF+iL3+/4e25sozpJ7+s="; doCheck = true; doInstallCheck = true; From ae335624fc2ccda599d4ad7b9987d059aeb12ce9 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 16 May 2025 16:55:38 +0000 Subject: [PATCH 018/220] jellyfin-ffmpeg: 7.1.1-1 -> 7.1.1-3 --- pkgs/by-name/je/jellyfin-ffmpeg/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/je/jellyfin-ffmpeg/package.nix b/pkgs/by-name/je/jellyfin-ffmpeg/package.nix index d419d6b38f42..aa6749446c02 100644 --- a/pkgs/by-name/je/jellyfin-ffmpeg/package.nix +++ b/pkgs/by-name/je/jellyfin-ffmpeg/package.nix @@ -5,7 +5,7 @@ }: let - version = "7.1.1-1"; + version = "7.1.1-3"; in (ffmpeg_7-full.override { @@ -14,7 +14,7 @@ in owner = "jellyfin"; repo = "jellyfin-ffmpeg"; rev = "v${version}"; - hash = "sha256-gu6+fOCcrGIZiR2hMl9tk97OmCmewOVJibz52DNpL1Q="; + hash = "sha256-pJLIhXDPDRhEqzmc1bXViSTSnRifFhMlixkEbGA0GRE="; }; }).overrideAttrs (old: { From 26013484522904d61e8b8f14f16a734647ac734b Mon Sep 17 00:00:00 2001 From: dansbandit <4530687+dansbandit@users.noreply.github.com> Date: Tue, 18 Feb 2025 17:55:13 +0100 Subject: [PATCH 019/220] pdftitle: 0.18 -> 0.20 --- pkgs/by-name/pd/pdftitle/package.nix | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/pd/pdftitle/package.nix b/pkgs/by-name/pd/pdftitle/package.nix index a8a134e3b645..7ddc73feb785 100644 --- a/pkgs/by-name/pd/pdftitle/package.nix +++ b/pkgs/by-name/pd/pdftitle/package.nix @@ -2,24 +2,33 @@ lib, fetchFromGitHub, python3Packages, + openai, pdfminer, + + withOpenai ? false, }: python3Packages.buildPythonApplication rec { pname = "pdftitle"; - version = "0.18"; + version = "0.20"; pyproject = true; src = fetchFromGitHub { owner = "metebalci"; repo = "pdftitle"; tag = "v${version}"; - hash = "sha256-rGGO4Cy+DZRU3ywb6Jq55JiM8ALgs/9wQmeXcSbPpG0="; + hash = "sha256-05SaAXYJ7l0ZldYufj0x9mYRwwGT7vlmq9a+ZF4pYiA="; }; build-system = with python3Packages; [ setuptools ]; - dependencies = with python3Packages; [ pdfminer ]; + dependencies = + with python3Packages; + [ + pdfminer + python-dotenv + ] + ++ lib.optional withOpenai openai; pythonImportsCheck = [ "pdftitle" ]; From eb6d1a086e5d4c002cc054563ffb3c18439cb9de Mon Sep 17 00:00:00 2001 From: TomaSajt <62384384+TomaSajt@users.noreply.github.com> Date: Sat, 17 May 2025 04:21:47 +0200 Subject: [PATCH 020/220] cargo-tauri.hook: fix installation when $out already exists --- pkgs/by-name/ca/cargo-tauri/hook.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/ca/cargo-tauri/hook.nix b/pkgs/by-name/ca/cargo-tauri/hook.nix index bc1d57f52356..4683e5a2ac5c 100644 --- a/pkgs/by-name/ca/cargo-tauri/hook.nix +++ b/pkgs/by-name/ca/cargo-tauri/hook.nix @@ -42,7 +42,8 @@ makeSetupHook { ''; linux = '' - mv "$targetDir"/bundle/deb/*/data/usr $out + mkdir -p $out + mv "$targetDir"/bundle/deb/*/data/usr/* $out/ ''; } .${kernelName} or (throw "${kernelName} is not supported by cargo-tauri.hook"); From 596df5fc813f11540b6ad646029d2eeea0e983e2 Mon Sep 17 00:00:00 2001 From: Sizhe Zhao Date: Sat, 17 May 2025 23:16:43 +0800 Subject: [PATCH 021/220] waypaper: add prince213 to maintainers --- pkgs/by-name/wa/waypaper/package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/wa/waypaper/package.nix b/pkgs/by-name/wa/waypaper/package.nix index 8d3b0018582f..5db5c9e1e2c8 100644 --- a/pkgs/by-name/wa/waypaper/package.nix +++ b/pkgs/by-name/wa/waypaper/package.nix @@ -57,7 +57,10 @@ python3.pkgs.buildPythonApplication rec { ''; homepage = "https://github.com/anufrievroman/waypaper"; license = licenses.gpl3Only; - maintainers = with maintainers; [ totalchaos ]; + maintainers = with maintainers; [ + prince213 + totalchaos + ]; platforms = platforms.linux; }; } From 3a79524cfb0cdc6e52cc702cf92b71864878a9f0 Mon Sep 17 00:00:00 2001 From: Sizhe Zhao Date: Sat, 17 May 2025 23:03:14 +0800 Subject: [PATCH 022/220] waypaper: 2.5 -> 2.6 --- pkgs/by-name/wa/waypaper/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/wa/waypaper/package.nix b/pkgs/by-name/wa/waypaper/package.nix index 5db5c9e1e2c8..e9beb2a05828 100644 --- a/pkgs/by-name/wa/waypaper/package.nix +++ b/pkgs/by-name/wa/waypaper/package.nix @@ -9,14 +9,14 @@ python3.pkgs.buildPythonApplication rec { pname = "waypaper"; - version = "2.5"; + version = "2.6"; pyproject = true; src = fetchFromGitHub { owner = "anufrievroman"; repo = "waypaper"; tag = version; - hash = "sha256-g1heJUBVJzRZXcNQCwRcqp6cTUaroKVpcTjG0KldlxU="; + hash = "sha256-MGfTuQcVChI4g7RONiTZZ4a5uX5SDjfLeMxbLIZ7VH4="; }; nativeBuildInputs = [ From b685d78a3a33b3f13955c71239d79d24e3acb2dd Mon Sep 17 00:00:00 2001 From: Sizhe Zhao Date: Sat, 17 May 2025 23:13:24 +0800 Subject: [PATCH 023/220] waypaper: use python3Packages --- pkgs/by-name/wa/waypaper/package.nix | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkgs/by-name/wa/waypaper/package.nix b/pkgs/by-name/wa/waypaper/package.nix index e9beb2a05828..65b0814015da 100644 --- a/pkgs/by-name/wa/waypaper/package.nix +++ b/pkgs/by-name/wa/waypaper/package.nix @@ -1,13 +1,13 @@ { lib, - python3, + python3Packages, fetchFromGitHub, gobject-introspection, wrapGAppsHook3, killall, }: -python3.pkgs.buildPythonApplication rec { +python3Packages.buildPythonApplication rec { pname = "waypaper"; version = "2.6"; pyproject = true; @@ -24,15 +24,15 @@ python3.pkgs.buildPythonApplication rec { wrapGAppsHook3 ]; - build-system = [ python3.pkgs.setuptools ]; + build-system = with python3Packages; [ setuptools ]; - dependencies = [ - python3.pkgs.pygobject3 - python3.pkgs.platformdirs - python3.pkgs.pillow - python3.pkgs.imageio - python3.pkgs.imageio-ffmpeg - python3.pkgs.screeninfo + dependencies = with python3Packages; [ + imageio + imageio-ffmpeg + pillow + platformdirs + pygobject3 + screeninfo ]; propagatedBuildInputs = [ killall ]; From 11ac85f54cb8a82eca45aa40f359019a261cfa7c Mon Sep 17 00:00:00 2001 From: Sizhe Zhao Date: Sat, 17 May 2025 23:15:22 +0800 Subject: [PATCH 024/220] waypaper: avoid with lib; --- pkgs/by-name/wa/waypaper/package.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/wa/waypaper/package.nix b/pkgs/by-name/wa/waypaper/package.nix index 65b0814015da..bca1f3039aa0 100644 --- a/pkgs/by-name/wa/waypaper/package.nix +++ b/pkgs/by-name/wa/waypaper/package.nix @@ -46,7 +46,7 @@ python3Packages.buildPythonApplication rec { makeWrapperArgs+=("''${gappsWrapperArgs[@]}") ''; - meta = with lib; { + meta = { changelog = "https://github.com/anufrievroman/waypaper/releases/tag/${version}"; description = "GUI wallpaper setter for Wayland-based window managers"; mainProgram = "waypaper"; @@ -56,11 +56,11 @@ python3Packages.buildPythonApplication rec { If wallpaper does not change, make sure that swaybg or swww is installed. ''; homepage = "https://github.com/anufrievroman/waypaper"; - license = licenses.gpl3Only; - maintainers = with maintainers; [ + license = lib.licenses.gpl3Only; + maintainers = with lib.maintainers; [ prince213 totalchaos ]; - platforms = platforms.linux; + platforms = lib.platforms.linux; }; } From 28dd6a442ddc0415a2f4deddf616c60f8b985aa1 Mon Sep 17 00:00:00 2001 From: leo60228 Date: Sat, 17 May 2025 19:44:51 -0400 Subject: [PATCH 025/220] pokefinder: 4.2.0 -> 4.2.1 --- pkgs/tools/games/pokefinder/default.nix | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pkgs/tools/games/pokefinder/default.nix b/pkgs/tools/games/pokefinder/default.nix index ed9d62db08ce..874dc499debc 100644 --- a/pkgs/tools/games/pokefinder/default.nix +++ b/pkgs/tools/games/pokefinder/default.nix @@ -17,24 +17,18 @@ stdenv.mkDerivation rec { pname = "pokefinder"; - version = "4.2.0"; + version = "4.2.1"; src = fetchFromGitHub { owner = "Admiral-Fish"; repo = "PokeFinder"; rev = "v${version}"; - sha256 = "R0FrRRQRe0tWrHUoU4PPwOgIsltUEImEMTXL79ISfRE="; + sha256 = "wjHqox0Vxc73/UTcE7LSo/cG9o4eOqkcjTIW99BxsAc="; fetchSubmodules = true; }; patches = [ ./set-desktop-file-name.patch - # fix compatibility with our libstdc++ - # https://github.com/Admiral-Fish/PokeFinder/pull/392 - (fetchpatch { - url = "https://github.com/Admiral-Fish/PokeFinder/commit/2cb1b049cabdf0d1b32c8cf29bf6c9d9c5c55cb0.patch"; - hash = "sha256-F/w7ydsZ5tZParMWi33W3Tv8A6LLiJt4dAoCrs40DIo="; - }) ]; postPatch = '' From 060162f471b9c9302ae0c0412bd92122296a47ee Mon Sep 17 00:00:00 2001 From: Leah Amelia Chen Date: Sun, 18 May 2025 01:08:17 +0200 Subject: [PATCH 026/220] lla: 0.3.10 -> 0.3.11 --- pkgs/by-name/ll/lla/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ll/lla/package.nix b/pkgs/by-name/ll/lla/package.nix index 0323e2b709d0..3fc5f2f5e39a 100644 --- a/pkgs/by-name/ll/lla/package.nix +++ b/pkgs/by-name/ll/lla/package.nix @@ -8,7 +8,7 @@ nix-update-script, }: let - version = "0.3.10"; + version = "0.3.11"; in rustPlatform.buildRustPackage { pname = "lla"; @@ -18,7 +18,7 @@ rustPlatform.buildRustPackage { owner = "chaqchase"; repo = "lla"; tag = "v${version}"; - hash = "sha256-/6p23JW3ZaSuDf34IWcTggR92/zUTMRerQ32bTsRujo="; + hash = "sha256-HxHUpFTAeK3/pE+ozHGmMUj0Jt7iKrbZ1xnFj7828Ng="; }; nativeBuildInputs = [ @@ -27,7 +27,7 @@ rustPlatform.buildRustPackage { ]; useFetchCargoVendor = true; - cargoHash = "sha256-aX8nm/V0ug2g40QeFU9AWxjuFAnW+gYTR8RC5CV7wRQ="; + cargoHash = "sha256-YvxzuOUowr5tcKZaZwgpeskfMJcOKJyHci43CfQWhOY="; cargoBuildFlags = [ "--workspace" ]; From f85a596e6e3a18c9f19731a33b71546de6be10bc Mon Sep 17 00:00:00 2001 From: ZHAO Jin-Xiang Date: Sun, 18 May 2025 17:03:17 +0800 Subject: [PATCH 027/220] gojo: add installCheck --- pkgs/by-name/go/gojo/package.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkgs/by-name/go/gojo/package.nix b/pkgs/by-name/go/gojo/package.nix index c970aba7ff3f..7c6707420dbd 100644 --- a/pkgs/by-name/go/gojo/package.nix +++ b/pkgs/by-name/go/gojo/package.nix @@ -23,6 +23,10 @@ buildGoModule rec { versionCheckHook ]; versionCheckProgramArg = "-v"; + postInstallCheck = '' + $out/bin/gojo -V > /dev/null + seq 1 10 | $out/bin/gojo -a | grep '^\[1,2,3,4,5,6,7,8,9,10\]$' > /dev/null + ''; doInstallCheck = true; passthru.updateScript = nix-update-script { }; From 58886090b72d78135090b4b1da1f7591bfc5665e Mon Sep 17 00:00:00 2001 From: ZHAO Jin-Xiang Date: Sun, 18 May 2025 17:06:23 +0800 Subject: [PATCH 028/220] gojo: use finalAttrs style --- pkgs/by-name/go/gojo/package.nix | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkgs/by-name/go/gojo/package.nix b/pkgs/by-name/go/gojo/package.nix index 7c6707420dbd..b675dabd5378 100644 --- a/pkgs/by-name/go/gojo/package.nix +++ b/pkgs/by-name/go/gojo/package.nix @@ -6,14 +6,14 @@ nix-update-script, }: -buildGoModule rec { +buildGoModule (finalAttrs: { pname = "gojo"; version = "0.3.2"; src = fetchFromGitHub { owner = "itchyny"; repo = "gojo"; - tag = "v${version}"; + tag = "v${finalAttrs.version}"; hash = "sha256-DMFTB5CgJTWf+P9ntgBgzdmcF2qjS9t3iUQ1Rer+Ab4="; }; @@ -24,7 +24,7 @@ buildGoModule rec { ]; versionCheckProgramArg = "-v"; postInstallCheck = '' - $out/bin/gojo -V > /dev/null + $out/bin/gojo --help > /dev/null seq 1 10 | $out/bin/gojo -a | grep '^\[1,2,3,4,5,6,7,8,9,10\]$' > /dev/null ''; doInstallCheck = true; @@ -34,9 +34,9 @@ buildGoModule rec { meta = { description = "Yet another Go implementation of jo"; homepage = "https://github.com/itchyny/gojo"; - changelog = "https://github.com/itchyny/gojo/releases/tag/v${version}"; + changelog = "https://github.com/itchyny/gojo/releases/tag/v${finalAttrs.version}"; license = lib.licenses.mit; maintainers = with lib.maintainers; [ xiaoxiangmoe ]; mainProgram = "gojo"; }; -} +}) From 346a4e2f2c92ce76009e2ea075c0e89f9e072031 Mon Sep 17 00:00:00 2001 From: Louis Thevenet Date: Sun, 18 May 2025 12:23:54 +0200 Subject: [PATCH 029/220] vault-tasks: 0.11.0 -> 0.11.1 --- pkgs/by-name/va/vault-tasks/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/va/vault-tasks/package.nix b/pkgs/by-name/va/vault-tasks/package.nix index 423adab5fe02..711c88a01459 100644 --- a/pkgs/by-name/va/vault-tasks/package.nix +++ b/pkgs/by-name/va/vault-tasks/package.nix @@ -5,7 +5,7 @@ nix-update-script, }: let - version = "0.11.0"; + version = "0.11.1"; in rustPlatform.buildRustPackage { pname = "vault-tasks"; @@ -14,10 +14,10 @@ rustPlatform.buildRustPackage { owner = "louis-thevenet"; repo = "vault-tasks"; rev = "v${version}"; - hash = "sha256-3hRn3x86XLVMBtDlMsuqeEWgsgSeapri9MYNLqDxGF4="; + hash = "sha256-7stFa2fLczGyoM/O2S/uKCfjSDyABUw/b3tXp7Olqq8="; }; useFetchCargoVendor = true; - cargoHash = "sha256-mh6LUb1gS/cICyVWCYvmCSeqxaIWI6PyLeQx13dZ0CA="; + cargoHash = "sha256-RSW0N0icKAZbh8KQNkI9TgcKwa6hTKjhaJWCGADtfq8="; postInstall = "install -Dm444 desktop/vault-tasks.desktop -t $out/share/applications"; From c0e914f71422311f7fea8b58c27ab60ad31383ea Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Sun, 18 May 2025 22:58:59 +0200 Subject: [PATCH 030/220] containerlab: 0.67.0 -> 0.68.0 https://github.com/srl-labs/containerlab/releases/tag/v0.68.0 --- pkgs/by-name/co/containerlab/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/co/containerlab/package.nix b/pkgs/by-name/co/containerlab/package.nix index ab1f24407686..74ac889dda8a 100644 --- a/pkgs/by-name/co/containerlab/package.nix +++ b/pkgs/by-name/co/containerlab/package.nix @@ -8,16 +8,16 @@ buildGoModule rec { pname = "containerlab"; - version = "0.67.0"; + version = "0.68.0"; src = fetchFromGitHub { owner = "srl-labs"; repo = "containerlab"; rev = "v${version}"; - hash = "sha256-wTVGvaosHhQleRDytCdA1R4YKlzgGN4nWRZx6Ok+O3U="; + hash = "sha256-x6QDwduAMCD+Trj0awQXW0Tdleb2U6YBi/7mdMB6V/8="; }; - vendorHash = "sha256-Bba2Lt43I9jKg6zWhXWE0yJsVx7SlQ2GmrK++cZ9TrM="; + vendorHash = "sha256-XRgKfRw6VGg+lkbtPWUVNfAk5a7ZdFwVmhjtM7uSwHs="; nativeBuildInputs = [ installShellFiles ]; From 4d49b3bc17ff683c3465eed33f05759a07f260e7 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Mon, 19 May 2025 11:22:59 +0100 Subject: [PATCH 031/220] owncast: 0.2.0 -> 0.2.3 --- pkgs/by-name/ow/owncast/package.nix | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/pkgs/by-name/ow/owncast/package.nix b/pkgs/by-name/ow/owncast/package.nix index 66a2c11e9210..c1b8e0fbbd27 100644 --- a/pkgs/by-name/ow/owncast/package.nix +++ b/pkgs/by-name/ow/owncast/package.nix @@ -5,12 +5,11 @@ nixosTests, bash, which, - ffmpeg, + ffmpeg-full, makeBinaryWrapper, }: - let - version = "0.2.0"; + version = "0.2.3"; in buildGoModule { pname = "owncast"; @@ -19,21 +18,27 @@ buildGoModule { owner = "owncast"; repo = "owncast"; rev = "v${version}"; - hash = "sha256-MdquhDdbOdP1shnKHBlzQrSDe41fp0qnMzgaqL89jTk="; + hash = "sha256-JCIB4G3cOSkEEO/jcsj4mUP+HeQfgn0jX4OL8NX9/C0="; }; - vendorHash = "sha256-ERilQZ8vnhGW1IEcLA4CcmozDooHKbnmASMw87tjYD4="; + vendorHash = "sha256-FuynEBoPS0p1bRgmaeCxn1RPqbYHcltZpQ9SE71xHEE="; - propagatedBuildInputs = [ ffmpeg ]; + propagatedBuildInputs = [ ffmpeg-full ]; nativeBuildInputs = [ makeBinaryWrapper ]; + # lefthook is included as a tool in go.mod for a pre-commit hook, but causes the build to fail + preBuild = '' + # Remove lefthook from tools section in go.mod + sed -i '/tool (/,/)/{ /[[:space:]]*github.com\/evilmartians\/lefthook[[:space:]]*$/d; }' go.mod + ''; + postInstall = '' wrapProgram $out/bin/owncast \ --prefix PATH : ${ lib.makeBinPath [ bash which - ffmpeg + ffmpeg-full ] } ''; @@ -51,8 +56,10 @@ buildGoModule { homepage = "https://owncast.online"; license = licenses.mit; platforms = platforms.unix; - maintainers = with maintainers; [ MayNiklas ]; + maintainers = with maintainers; [ + flexiondotorg + MayNiklas + ]; mainProgram = "owncast"; }; - } From d6db85cb4fa1d5f3aed81b2fede651bc0c3b1793 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Mon, 19 May 2025 12:57:26 +0100 Subject: [PATCH 032/220] avizo: 1.3 -> 1.3-unstable-2024-11-03 --- pkgs/by-name/av/avizo/package.nix | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/av/avizo/package.nix b/pkgs/by-name/av/avizo/package.nix index 47a44f05aa73..cbec1c25b6b2 100644 --- a/pkgs/by-name/av/avizo/package.nix +++ b/pkgs/by-name/av/avizo/package.nix @@ -21,13 +21,13 @@ stdenv.mkDerivation rec { pname = "avizo"; - version = "1.3"; + version = "1.3-unstable-2024-11-03"; src = fetchFromGitHub { owner = "misterdanb"; repo = "avizo"; - rev = version; - sha256 = "sha256-Vj8OrNlAstl0AXTeVAPdEf5JgnAmJwl9s3Jdc0ZiYQc="; + rev = "5efaa22968b2cc1a3c15a304cac3f22ec2727b17"; + sha256 = "sha256-KYQPHVxjvqKt4d7BabplnrXP30FuBQ6jQ1NxzR5U7qI="; }; nativeBuildInputs = [ @@ -59,6 +59,9 @@ stdenv.mkDerivation rec { homepage = "https://github.com/misterdanb/avizo"; license = licenses.gpl3; platforms = platforms.linux; - maintainers = [ maintainers.berbiche ]; + maintainers = [ + maintainers.berbiche + maintainers.flexiondotorg + ]; }; } From bbcf5f763a5a4de569e7ef32de9298ae3e55a5d0 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Mon, 19 May 2025 15:53:16 +0100 Subject: [PATCH 033/220] obs-studio-plugins.obs-replay-source: remove unnecessary buildInputs --- .../obs-studio/plugins/obs-replay-source.nix | 35 +++++++------------ 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/pkgs/applications/video/obs-studio/plugins/obs-replay-source.nix b/pkgs/applications/video/obs-studio/plugins/obs-replay-source.nix index b589a6639332..8279fee497f9 100644 --- a/pkgs/applications/video/obs-studio/plugins/obs-replay-source.nix +++ b/pkgs/applications/video/obs-studio/plugins/obs-replay-source.nix @@ -3,44 +3,35 @@ lib, fetchFromGitHub, cmake, - libcaption, obs-studio, - qtbase, }: -stdenv.mkDerivation (finalAttrs: { +stdenv.mkDerivation rec { pname = "obs-replay-source"; version = "1.8.1"; src = fetchFromGitHub { owner = "exeldro"; repo = "obs-replay-source"; - tag = finalAttrs.version; - hash = "sha256-+PSALDqHXPyR0J7YnLn3QgPN6eIoH3yTIm1Bp7Li8c8="; + rev = version; + sha256 = "sha256-+PSALDqHXPyR0J7YnLn3QgPN6eIoH3yTIm1Bp7Li8c8="; }; nativeBuildInputs = [ cmake ]; - - buildInputs = [ - libcaption - obs-studio - qtbase - ]; + buildInputs = [ obs-studio ]; postInstall = '' - mkdir -p $out/lib $out/share - mv $out/obs-plugins/64bit $out/lib/obs-plugins - rm -rf $out/obs-plugins - mv $out/data $out/share/obs + rm -rf $out/obs-plugins $out/data ''; - dontWrapQtApps = true; - - meta = { + meta = with lib; { description = "Replay source for OBS studio"; homepage = "https://github.com/exeldro/obs-replay-source"; - license = lib.licenses.gpl2Only; - platforms = lib.platforms.linux; - maintainers = with lib.maintainers; [ pschmitt ]; + license = licenses.gpl2Only; + platforms = platforms.linux; + maintainers = with maintainers; [ + flexiondotorg + pschmitt + ]; }; -}) +} From 0fb43b602638cb757ee366d447895f4ba7313f07 Mon Sep 17 00:00:00 2001 From: Thomas Gerbet Date: Mon, 19 May 2025 17:39:11 +0200 Subject: [PATCH 034/220] opera: drop The listed maintainer did not touch nixpkgs for at least 6 months. The package is only updated from time to time only thanks to the update bot and drive-by committers. This is not a sustainable way to maintain a security-critical package. --- pkgs/by-name/op/opera/package.nix | 147 ------------------------------ pkgs/top-level/aliases.nix | 1 + 2 files changed, 1 insertion(+), 147 deletions(-) delete mode 100644 pkgs/by-name/op/opera/package.nix diff --git a/pkgs/by-name/op/opera/package.nix b/pkgs/by-name/op/opera/package.nix deleted file mode 100644 index a18655c63acb..000000000000 --- a/pkgs/by-name/op/opera/package.nix +++ /dev/null @@ -1,147 +0,0 @@ -{ - alsa-lib, - atk, - cairo, - cups, - curl, - dbus, - dpkg, - expat, - fetchurl, - fontconfig, - freetype, - gdk-pixbuf, - glib, - gtk3, - gtk4, - lib, - libX11, - libxcb, - libXScrnSaver, - libXcomposite, - libXcursor, - libXdamage, - libXext, - libXfixes, - libXi, - libXrandr, - libXrender, - libXtst, - libdrm, - libnotify, - libpulseaudio, - libuuid, - libxshmfence, - libgbm, - nspr, - nss, - pango, - stdenv, - systemd, - at-spi2-atk, - at-spi2-core, - autoPatchelfHook, - wrapGAppsHook3, - qt6, - proprietaryCodecs ? false, - vivaldi-ffmpeg-codecs, -}: - -let - mirror = "https://get.geo.opera.com/pub/opera/desktop"; -in -stdenv.mkDerivation rec { - pname = "opera"; - version = "118.0.5461.60"; - - src = fetchurl { - url = "${mirror}/${version}/linux/${pname}-stable_${version}_amd64.deb"; - hash = "sha256-SApVqrMeOrpw9GDMwBgpxMfSgMXJS1YV2bPx+KXBY/4="; - }; - - nativeBuildInputs = [ - dpkg - autoPatchelfHook - wrapGAppsHook3 - qt6.wrapQtAppsHook - ]; - - buildInputs = [ - alsa-lib - at-spi2-atk - at-spi2-core - atk - cairo - cups - curl - dbus - expat - fontconfig.lib - freetype - gdk-pixbuf - glib - gtk3 - libX11 - libXScrnSaver - libXcomposite - libXcursor - libXdamage - libXext - libXfixes - libXi - libXrandr - libXrender - libXtst - libdrm - libnotify - libuuid - libxcb - libxshmfence - libgbm - nspr - nss - pango - (lib.getLib stdenv.cc.cc) - qt6.qtbase - ]; - - runtimeDependencies = - [ - # Works fine without this except there is no sound. - libpulseaudio.out - - # This is a little tricky. Without it the app starts then crashes. Then it - # brings up the crash report, which also crashes. `strace -f` hints at a - # missing libudev.so.0. - (lib.getLib systemd) - - # Error at startup: - # "Illegal instruction (core dumped)" - gtk3 - gtk4 - ] - ++ lib.optionals proprietaryCodecs [ - vivaldi-ffmpeg-codecs - ]; - - dontWrapQtApps = true; - - installPhase = '' - mkdir -p $out/bin - cp -r usr $out - cp -r usr/share $out/share - - # we already using QT6, autopatchelf wants to patch this as well - rm $out/usr/lib/x86_64-linux-gnu/opera/libqt5_shim.so - ln -s $out/usr/bin/opera $out/bin/opera - ''; - - meta = with lib; { - homepage = "https://www.opera.com"; - description = "Faster, safer and smarter web browser"; - platforms = [ "x86_64-linux" ]; - license = licenses.unfree; - sourceProvenance = with sourceTypes; [ binaryNativeCode ]; - maintainers = with maintainers; [ kindrowboat ]; - }; -} diff --git a/pkgs/top-level/aliases.nix b/pkgs/top-level/aliases.nix index d767c8eb2842..468607cd58a7 100644 --- a/pkgs/top-level/aliases.nix +++ b/pkgs/top-level/aliases.nix @@ -1418,6 +1418,7 @@ mapAliases { opensycl = lib.warnOnInstantiate "'opensycl' has been renamed to 'adaptivecpp'" adaptivecpp; # Added 2024-12-04 opensyclWithRocm = lib.warnOnInstantiate "'opensyclWithRocm' has been renamed to 'adaptivecppWithRocm'" adaptivecppWithRocm; # Added 2024-12-04 openvdb_11 = throw "'openvdb_11' has been removed in favor of the latest version'"; # Added 2025-05-03 + opera = throw "'opera' has been removed due to lack of maintenance in nixpkgs"; # Added 2025-05-19 orchis = throw "'orchis' has been renamed to/replaced by 'orchis-theme'"; # Converted to throw 2024-10-17 omping = throw "'omping' has been removed because its upstream has been archived"; # Added 2025-05-10 onlyoffice-bin = onlyoffice-desktopeditors; # Added 2024-09-20 From 64a34904facd0044766360c6ad75f5bea0c2c021 Mon Sep 17 00:00:00 2001 From: Thomas Gerbet Date: Mon, 19 May 2025 17:40:43 +0200 Subject: [PATCH 035/220] maintainers: remove kindrowboat No recent activities in nixpkgs and no maintained package anymore. --- maintainers/maintainer-list.nix | 6 ------ 1 file changed, 6 deletions(-) diff --git a/maintainers/maintainer-list.nix b/maintainers/maintainer-list.nix index 9792a76c2feb..ebc6e256bdb9 100644 --- a/maintainers/maintainer-list.nix +++ b/maintainers/maintainer-list.nix @@ -12982,12 +12982,6 @@ githubId = 843652; name = "Kim Burgess"; }; - kindrowboat = { - email = "hello@kindrobot.ca"; - github = "kindrowboat"; - githubId = 777773; - name = "Stef Dunlap"; - }; kini = { email = "keshav.kini@gmail.com"; github = "kini"; From 355f5ce574e9510e5e8dde0f006d1b845e171ef4 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Mon, 19 May 2025 17:24:41 +0100 Subject: [PATCH 036/220] obs-studio-plugins.obs-noise: init at 1.0.0 --- .../video/obs-studio/plugins/default.nix | 2 + .../video/obs-studio/plugins/obs-noise.nix | 38 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-noise.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..30f62b1efbbd 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -48,6 +48,8 @@ obs-ndi = qt6Packages.callPackage ./obs-ndi { }; + obs-noise = callPackage ./obs-noise.nix { }; + obs-nvfbc = callPackage ./obs-nvfbc.nix { }; obs-pipewire-audio-capture = callPackage ./obs-pipewire-audio-capture.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-noise.nix b/pkgs/applications/video/obs-studio/plugins/obs-noise.nix new file mode 100644 index 000000000000..e40348455940 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-noise.nix @@ -0,0 +1,38 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + obs-studio, +}: + +stdenv.mkDerivation rec { + pname = "obs-noise"; + version = "1.0.0"; + + src = fetchFromGitHub { + owner = "FiniteSingularity"; + repo = "obs-noise"; + rev = "v${version}"; + sha256 = "sha256-D9vGXCrmQ8IDRmL8qZ1ZBiOz9AjhKm45W37zC16kRCk="; + }; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ obs-studio ]; + + postFixup = '' + mv $out/data/obs-plugins/${pname}/shaders $out/share/obs/obs-plugins/${pname}/ + rm -rf $out/data $out/obs-plugins + ''; + + meta = with lib; { + description = "A plug-in for noise generation and noise effects for OBS."; + homepage = "https://github.com/FiniteSingularity/obs-noise"; + maintainers = with maintainers; [ flexiondotorg ]; + license = licenses.gpl2Only; + platforms = [ + "x86_64-linux" + "i686-linux" + ]; + }; +} From 84f68ad674bfffc237bb007aeafb81430b0a285d Mon Sep 17 00:00:00 2001 From: Troels Henriksen Date: Sun, 18 May 2025 20:24:58 +0200 Subject: [PATCH 037/220] smlfut: 1.6.2 -> 1.6.4 --- pkgs/by-name/sm/smlfut/package.nix | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkgs/by-name/sm/smlfut/package.nix b/pkgs/by-name/sm/smlfut/package.nix index 21938595b301..323f3ca6c3f3 100644 --- a/pkgs/by-name/sm/smlfut/package.nix +++ b/pkgs/by-name/sm/smlfut/package.nix @@ -7,15 +7,15 @@ futhark, }: -stdenv.mkDerivation rec { +stdenv.mkDerivation (finalAttrs: { pname = "smlfut"; - version = "1.6.2"; + version = "1.6.4"; src = fetchFromGitHub { owner = "diku-dk"; repo = "smlfut"; - rev = "v${version}"; - hash = "sha256-0Bqgoyp43Y961BMghJFBUx+1lcM2HHlPDjPyLHquWiE="; + rev = "v${finalAttrs.version}"; + hash = "sha256-xICcobdvSdHZfNxz4WRDOsaL4JGFRK7LmhMzKOZY5FY="; }; enableParallelBuilding = true; @@ -43,4 +43,4 @@ stdenv.mkDerivation rec { maintainers = with maintainers; [ athas ]; mainProgram = "smlfut"; }; -} +}) From 220f9d8f28577277894d82464eee9fb7e5dc5399 Mon Sep 17 00:00:00 2001 From: emaryn Date: Sun, 11 May 2025 21:10:52 +0800 Subject: [PATCH 038/220] hamrs-pro: 2.33.0 -> 2.37.0 --- pkgs/by-name/ha/hamrs-pro/package.nix | 14 +++++++++----- pkgs/by-name/ha/hamrs-pro/update.sh | 24 ++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 5 deletions(-) create mode 100755 pkgs/by-name/ha/hamrs-pro/update.sh diff --git a/pkgs/by-name/ha/hamrs-pro/package.nix b/pkgs/by-name/ha/hamrs-pro/package.nix index e392175bc8de..e64230941558 100644 --- a/pkgs/by-name/ha/hamrs-pro/package.nix +++ b/pkgs/by-name/ha/hamrs-pro/package.nix @@ -8,34 +8,36 @@ let pname = "hamrs-pro"; - version = "2.33.0"; + version = "2.37.0"; throwSystem = throw "Unsupported system: ${stdenvNoCC.hostPlatform.system}"; srcs = { x86_64-linux = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-linux-x86_64.AppImage"; - hash = "sha256-FUwyyuXtWaHauZyvRvrH7KDC0du02eNR5TfKJyiKb9k="; + hash = "sha256-9aQW50w6bR+2vwQSi01q3KetGWwCIJbwADqx93yv5xw="; }; aarch64-linux = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-linux-arm64.AppImage"; - hash = "sha256-YQPKxjaNXE1AgEspZRLI1OUFU71rAU8NBcS0Jv94MS8="; + hash = "sha256-6vTi2RRPn4bgBQlv3DXO7oiCH0EOYxZ9NfXGnu00+nU="; }; x86_64-darwin = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-mac-x64.dmg"; - hash = "sha256-KtrXF47AwVAUXYk1Wu2aKMTXENv7q9JBb86Oy+UHQYY="; + hash = "sha256-frDkojSJE4dvHX/kGwDp3wH5e0d598iHykc3UcrkOpo="; }; aarch64-darwin = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-mac-arm64.dmg"; - hash = "sha256-H46z4V9lo+n/pZzna7KIiYxQBqTlZULitQrFEEMFDvo="; + hash = "sha256-xyhGrFRD4LVd9ZCM69rQvu+uf1M0ydC/bj++EGK6xpw="; }; }; src = srcs.${stdenvNoCC.hostPlatform.system} or throwSystem; + passthru.updateScript = ./update.sh; + meta = { homepage = "https://hamrs.app/"; description = "Simple, portable logger tailored for activities like Parks on the Air, Field Day, and more"; @@ -58,6 +60,7 @@ let pname version src + passthru meta ; @@ -78,6 +81,7 @@ let pname version src + passthru meta ; diff --git a/pkgs/by-name/ha/hamrs-pro/update.sh b/pkgs/by-name/ha/hamrs-pro/update.sh new file mode 100755 index 000000000000..dfd93fa6788a --- /dev/null +++ b/pkgs/by-name/ha/hamrs-pro/update.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env nix-shell +#!nix-shell -i bash -p bash nix-update common-updater-scripts nix + +set -euo pipefail + +currentVersion=$(nix-instantiate --eval -E "with import ./. {}; hamrs-pro.version or (lib.getVersion hamrs-pro)" | tr -d '"') +# extracting version from download link +latestVersion=$(curl -sL https://hamrs.app | grep -Po '(?<=hamrs-pro-)[0-9]+\.[0-9]+\.[0-9]+(?=-linux-x86_64\.AppImage)') + +if [[ "$currentVersion" == "$latestVersion" ]]; then + echo "package is up-to-date: $currentVersion" + exit 0 +fi + +update-source-version hamrs-pro $latestVersion || true + +for system in \ + x86_64-linux \ + aarch64-linux \ + x86_64-darwin \ + aarch64-darwin; do + hash=$(nix hash convert --to sri --hash-algo sha256 $(nix-prefetch-url $(nix-instantiate --eval -E "with import ./. {}; hamrs-pro.src.url" --system "$system" | tr -d '"'))) + update-source-version hamrs-pro $latestVersion $hash --system=$system --ignore-same-version +done From 01c975015cd93742eceda2778749225f15242bf7 Mon Sep 17 00:00:00 2001 From: emaryn Date: Tue, 20 May 2025 03:06:17 +0000 Subject: [PATCH 039/220] hamrs-pro: 2.37.0 -> 2.37.1 --- pkgs/by-name/ha/hamrs-pro/package.nix | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkgs/by-name/ha/hamrs-pro/package.nix b/pkgs/by-name/ha/hamrs-pro/package.nix index e64230941558..ccdff2aefdb8 100644 --- a/pkgs/by-name/ha/hamrs-pro/package.nix +++ b/pkgs/by-name/ha/hamrs-pro/package.nix @@ -8,29 +8,29 @@ let pname = "hamrs-pro"; - version = "2.37.0"; + version = "2.37.1"; throwSystem = throw "Unsupported system: ${stdenvNoCC.hostPlatform.system}"; srcs = { x86_64-linux = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-linux-x86_64.AppImage"; - hash = "sha256-9aQW50w6bR+2vwQSi01q3KetGWwCIJbwADqx93yv5xw="; + hash = "sha256-kLYgqRH+RpyitUSZVoZFfqVsrJjTXeZp80ILHGQmGTk="; }; aarch64-linux = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-linux-arm64.AppImage"; - hash = "sha256-6vTi2RRPn4bgBQlv3DXO7oiCH0EOYxZ9NfXGnu00+nU="; + hash = "sha256-BKS7xPzVoIUToqEbtI+8t4Gf7HvZSWhzfXmToghFPEk="; }; x86_64-darwin = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-mac-x64.dmg"; - hash = "sha256-frDkojSJE4dvHX/kGwDp3wH5e0d598iHykc3UcrkOpo="; + hash = "sha256-gejyYoW7VcR0ILD/PSwFGC2tzLiiR2vjEsErBxbvJ3o="; }; aarch64-darwin = fetchurl { url = "https://hamrs-dist.s3.amazonaws.com/hamrs-pro-${version}-mac-arm64.dmg"; - hash = "sha256-xyhGrFRD4LVd9ZCM69rQvu+uf1M0ydC/bj++EGK6xpw="; + hash = "sha256-Hi/t5ShfhUFw0aEzb2XIhOIppXg04qnq8tl3LKNH3qQ="; }; }; From 703026bbd8ee3773f6ab7302a192814145d309bf Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 10:12:46 +0100 Subject: [PATCH 040/220] obs-studio-plugins.obs-stroke-glow-shadow: init at 1.5.2 --- .../video/obs-studio/plugins/default.nix | 2 + .../plugins/obs-stroke-glow-shadow.nix | 38 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-stroke-glow-shadow.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..7104118b7948 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -66,6 +66,8 @@ obs-source-switcher = callPackage ./obs-source-switcher.nix { }; + obs-stroke-glow-shadow = callPackage ./obs-stroke-glow-shadow.nix { }; + obs-teleport = callPackage ./obs-teleport { }; obs-text-pthread = callPackage ./obs-text-pthread.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-stroke-glow-shadow.nix b/pkgs/applications/video/obs-studio/plugins/obs-stroke-glow-shadow.nix new file mode 100644 index 000000000000..5865f595a38e --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-stroke-glow-shadow.nix @@ -0,0 +1,38 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + obs-studio, +}: + +stdenv.mkDerivation rec { + pname = "obs-stroke-glow-shadow"; + version = "v1.5.2"; + + src = fetchFromGitHub { + owner = "FiniteSingularity"; + repo = "obs-stroke-glow-shadow"; + rev = version; + sha256 = "sha256-+2hb4u+6UG7IV9pAvPjp4wvDYhYnxe98U5QQjUcdD/k="; + }; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ obs-studio ]; + + postFixup = '' + rm -rf $out/obs-plugins + rm -rf $out/data + ''; + + meta = with lib; { + description = "An OBS plugin to provide efficient Stroke, Glow, and Shadow effects on masked sources."; + homepage = "https://github.com/FiniteSingularity/obs-stroke-glow-shadow"; + maintainers = with maintainers; [ flexiondotorg ]; + license = licenses.gpl2Only; + platforms = [ + "x86_64-linux" + "i686-linux" + ]; + }; +} From 1f2139103db89a4fe8858e70dc6c1ef5d9958567 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 10:44:47 +0100 Subject: [PATCH 041/220] obs-studio-plugins.obs-urlsource: init at 0.3.7 --- .../video/obs-studio/plugins/default.nix | 2 + .../obs-studio/plugins/obs-urlsource.nix | 98 +++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-urlsource.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..044f14eafc7b 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -74,6 +74,8 @@ obs-tuna = qt6Packages.callPackage ./obs-tuna { }; + obs-urlsource = qt6Packages.callPackage ./obs-urlsource.nix { }; + obs-vaapi = callPackage ./obs-vaapi { }; obs-vertical-canvas = qt6Packages.callPackage ./obs-vertical-canvas.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-urlsource.nix b/pkgs/applications/video/obs-studio/plugins/obs-urlsource.nix new file mode 100644 index 000000000000..0ced62150e21 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-urlsource.nix @@ -0,0 +1,98 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + curl, + git, + obs-studio, + pugixml, + qtbase, + writeScript, +}: + +let + websocketpp = fetchFromGitHub { + owner = "zaphoyd"; + repo = "websocketpp"; + rev = "0.8.2"; + sha256 = "sha256-9fIwouthv2GcmBe/UPvV7Xn9P2o0Kmn2hCI4jCh0hPM="; + }; + + lexbor = fetchFromGitHub { + owner = "lexbor"; + repo = "lexbor"; + rev = "v2.3.0"; + sha256 = "sha256-s5fZWBhXC0fuHIUk1YX19bHagahOtSLlKQugyHCIlgI="; + }; + + asio = fetchFromGitHub { + owner = "chriskohlhoff"; + repo = "asio"; + rev = "asio-1-28-0"; + sha256 = "sha256-dkiUdR8FgDnnqdptaJjE4rvNlgpC5HZl6SQQ5Di2C2s="; + }; +in +stdenv.mkDerivation rec { + pname = "obs-urlsource"; + version = "0.3.7"; + + src = fetchFromGitHub { + owner = "locaal-ai"; + repo = "obs-urlsource"; + rev = version; + sha256 = "sha256-ZWwD8jJkL1rAUeanD4iChcgpnJaC5pPo36Ot36XOSx8="; + fetchSubmodules = true; + }; + + nativeBuildInputs = [ + cmake + git + ]; + buildInputs = [ + curl + obs-studio + pugixml + qtbase + ]; + dontWrapQtApps = true; + + # Update websocketpp and lexabor configurations to use pre-fetched sources + postPatch = '' + sed -i 's|URL .*|SOURCE_DIR "${websocketpp}"\n DOWNLOAD_COMMAND ""|' cmake/FetchWebsocketpp.cmake + sed -i \ + -e 's|GIT_REPOSITORY .*|SOURCE_DIR "${lexbor}"|' \ + -e 's|GIT_TAG .*|DOWNLOAD_COMMAND ""\n UPDATE_COMMAND ""|' \ + cmake/BuildLexbor.cmake + ''; + + postInstall = '' + rm -rf $out/lib/cmake + ''; + + NIX_CFLAGS_COMPILE = [ + "-I${websocketpp}" + "-I${asio}/asio/include" + ]; + + cmakeFlags = [ + # Prevent deprecation warnings from failing the build + (lib.cmakeOptionType "string" "CMAKE_CXX_FLAGS" "-Wno-error=deprecated-declarations") + (lib.cmakeBool "ENABLE_QT" true) + (lib.cmakeBool "USE_SYSTEM_CURL" true) + (lib.cmakeBool "USE_SYSTEM_PUGIXML" true) + (lib.cmakeBool "CMAKE_COMPILE_WARNING_AS_ERROR" false) + "-Wno-dev" + ]; + + meta = with lib; { + description = "OBS plugin to fetch data from a URL or file, connect to an API or AI service, parse responses and display text, image or audio on scene"; + homepage = "https://github.com/locaal-ai/obs-urlsource"; + maintainers = with maintainers; [ flexiondotorg ]; + license = licenses.gpl2Only; + platforms = [ + "x86_64-linux" + "i686-linux" + ]; + }; +} From 4de6ded4453224b0d41a66696b35393ca18224c7 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 11:00:54 +0100 Subject: [PATCH 042/220] obs-studio-plugins.pixel-art: init at 0.0.4 --- .../video/obs-studio/plugins/default.nix | 2 ++ .../video/obs-studio/plugins/pixel-art.nix | 33 +++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/pixel-art.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..039cba63183f 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -88,6 +88,8 @@ obs-webkitgtk = callPackage ./obs-webkitgtk.nix { }; + pixel-art = callPackage ./pixel-art.nix { }; + wlrobs = callPackage ./wlrobs.nix { }; waveform = callPackage ./waveform { }; diff --git a/pkgs/applications/video/obs-studio/plugins/pixel-art.nix b/pkgs/applications/video/obs-studio/plugins/pixel-art.nix new file mode 100644 index 000000000000..dd01049e6802 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/pixel-art.nix @@ -0,0 +1,33 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + obs-studio, +}: + +stdenv.mkDerivation rec { + pname = "pixel-art"; + version = "0.0.4"; + + src = fetchFromGitHub { + owner = "dspstanky"; + repo = "pixel-art"; + rev = version; + sha256 = "sha256-7o63e7nK/JsK2SQg0AzUYcc4ZsPx0lt8gtAQm8Zy+9w="; + }; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ obs-studio ]; + + meta = with lib; { + description = "An OBS Plugin that can be used to create retro-inspired pixel art visuals."; + homepage = "https://github.com/dspstanky/pixel-art"; + maintainers = with maintainers; [ flexiondotorg ]; + license = licenses.gpl2Only; + platforms = [ + "x86_64-linux" + "i686-linux" + ]; + }; +} From 90756ed26c178708f8aea224fa79f1c352a3fe1c Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 11:15:07 +0100 Subject: [PATCH 043/220] obs-studio-plugins.obs-scene-as-transition: init at 1.1.1 --- .../video/obs-studio/plugins/default.nix | 2 + .../plugins/obs-scene-as-transition.nix | 37 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-scene-as-transition.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..d06214495523 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -58,6 +58,8 @@ obs-scale-to-sound = callPackage ./obs-scale-to-sound.nix { }; + obs-scene-as-transition = callPackage ./obs-scene-as-transition.nix { }; + obs-shaderfilter = qt6Packages.callPackage ./obs-shaderfilter.nix { }; obs-source-clone = callPackage ./obs-source-clone.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-scene-as-transition.nix b/pkgs/applications/video/obs-studio/plugins/obs-scene-as-transition.nix new file mode 100644 index 000000000000..cb1e41b6d286 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-scene-as-transition.nix @@ -0,0 +1,37 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + obs-studio, +}: + +stdenv.mkDerivation rec { + pname = "obs-scene-as-transition"; + version = "1.1.1"; + + src = fetchFromGitHub { + owner = "andilippi"; + repo = "obs-scene-as-transition"; + rev = "v${version}"; + sha256 = "sha256-qeiJR68MqvhpzvY7yNnR6w77SvavlZTdbnGBWrd7iZM="; + }; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ obs-studio ]; + + postInstall = '' + rm -rf $out/obs-plugins $out/data + ''; + + meta = with lib; { + description = "An OBS Studio plugin that will allow you to use a Scene as a transition"; + homepage = "https://github.com/andilippi/obs-scene-as-transition"; + maintainers = with maintainers; [ flexiondotorg ]; + license = licenses.gpl2Plus; + platforms = [ + "x86_64-linux" + "i686-linux" + ]; + }; +} From 453f97c7e2b79b90709f0e0618d9f263757b8314 Mon Sep 17 00:00:00 2001 From: Golbinex <2061409-Golbinex@users.noreply.gitlab.com> Date: Tue, 20 May 2025 11:04:19 +0000 Subject: [PATCH 044/220] devede: 4.19.0 -> 4.21.0 --- pkgs/by-name/de/devede/package.nix | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/de/devede/package.nix b/pkgs/by-name/de/devede/package.nix index d9342d690216..54f422ed4a7d 100644 --- a/pkgs/by-name/de/devede/package.nix +++ b/pkgs/by-name/de/devede/package.nix @@ -12,6 +12,7 @@ wrapGAppsHook3, gdk-pixbuf, gobject-introspection, + nix-update-script, }: let @@ -25,14 +26,14 @@ let in buildPythonApplication rec { pname = "devede"; - version = "4.19.0"; + version = "4.21.0"; namePrefix = ""; src = fetchFromGitLab { owner = "rastersoft"; repo = "devedeng"; rev = version; - hash = "sha256-hjt2bXQov4lC6O4VY/eu/PZ2taSKng9gRhFDFhQR9SY="; + hash = "sha256-sLJkIKw0ciX6spugbdO0eZ1dIkoHfuu5e/f2XwA70a0="; }; nativeBuildInputs = [ @@ -66,6 +67,8 @@ buildPythonApplication rec { --replace "/usr/local/share" "$out/share" ''; + passthru.updateScript = nix-update-script { }; + meta = with lib; { description = "DVD Creator for Linux"; homepage = "https://www.rastersoft.com/programas/devede.html"; From d1dc183171de0ba5a2b6c8245b3e89f05883930c Mon Sep 17 00:00:00 2001 From: Golbinex <2061409-Golbinex@users.noreply.gitlab.com> Date: Tue, 20 May 2025 11:05:28 +0000 Subject: [PATCH 045/220] devede: add baksa to maintainer list --- pkgs/by-name/de/devede/package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/de/devede/package.nix b/pkgs/by-name/de/devede/package.nix index 54f422ed4a7d..ff922648b855 100644 --- a/pkgs/by-name/de/devede/package.nix +++ b/pkgs/by-name/de/devede/package.nix @@ -73,6 +73,9 @@ buildPythonApplication rec { description = "DVD Creator for Linux"; homepage = "https://www.rastersoft.com/programas/devede.html"; license = licenses.gpl3; - maintainers = [ maintainers.bdimcheff ]; + maintainers = [ + maintainers.bdimcheff + maintainers.baksa + ]; }; } From 74d01f685a3e3e82e54dad62f1d10dfb76d87292 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 13:08:04 +0100 Subject: [PATCH 046/220] obs-studio-plugins.obs-rgb-levels: 1.0.0 -> 1.0.2 --- .../video/obs-studio/plugins/default.nix | 2 +- ...gb-levels-filter.nix => obs-rgb-levels.nix} | 18 +++++++----------- 2 files changed, 8 insertions(+), 12 deletions(-) rename pkgs/applications/video/obs-studio/plugins/{obs-rgb-levels-filter.nix => obs-rgb-levels.nix} (50%) diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..57436f7bfa07 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -54,7 +54,7 @@ obs-replay-source = qt6Packages.callPackage ./obs-replay-source.nix { }; - obs-rgb-levels-filter = callPackage ./obs-rgb-levels-filter.nix { }; + obs-rgb-levels = callPackage ./obs-rgb-levels.nix { }; obs-scale-to-sound = callPackage ./obs-scale-to-sound.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-rgb-levels-filter.nix b/pkgs/applications/video/obs-studio/plugins/obs-rgb-levels.nix similarity index 50% rename from pkgs/applications/video/obs-studio/plugins/obs-rgb-levels-filter.nix rename to pkgs/applications/video/obs-studio/plugins/obs-rgb-levels.nix index f51d7963089a..3779e14e4aee 100644 --- a/pkgs/applications/video/obs-studio/plugins/obs-rgb-levels-filter.nix +++ b/pkgs/applications/video/obs-studio/plugins/obs-rgb-levels.nix @@ -7,28 +7,24 @@ }: stdenv.mkDerivation rec { - pname = "obs-rgb-levels-filter"; - version = "1.0.0"; + pname = "obs-rgb-levels"; + version = "1.0.2"; src = fetchFromGitHub { owner = "wimpysworld"; - repo = "obs-rgb-levels-filter"; + repo = "obs-rgb-levels"; rev = version; - sha256 = "sha256-QREwK9nBhjCBFslXUj9bGUGPgfEns8QqlgP5e2O/0oU="; + sha256 = "sha256-W79KUUUodlARlIMg/DaN+fxq/NEkp4k8MuEOHrJTbCk="; }; nativeBuildInputs = [ cmake ]; buildInputs = [ obs-studio ]; - cmakeFlags = [ - "-DOBS_SRC_DIR=${obs-studio.src}" - ]; - meta = with lib; { - description = "Simple OBS Studio filter to adjust RGB levels"; - homepage = "https://github.com/wimpysworld/obs-rgb-levels-filter"; + description = "A simple OBS Studio filter to adjust RGB levels."; + homepage = "https://github.com/wimpysworld/obs-rgb-levels"; maintainers = with maintainers; [ flexiondotorg ]; - license = licenses.gpl2Plus; + license = licenses.gpl2Only; platforms = [ "x86_64-linux" "i686-linux" From 65b0112b39dededeca97f4d3340f70610f0ff890 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 12:48:42 +0100 Subject: [PATCH 047/220] obs-studio-plugins.obs-dvd-screensaver: init at 0.0.2 --- .../video/obs-studio/plugins/default.nix | 2 ++ .../plugins/obs-dvd-screensaver.nix | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-dvd-screensaver.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..bbe724c2addc 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -30,6 +30,8 @@ obs-composite-blur = callPackage ./obs-composite-blur.nix { }; + obs-dvd-screensaver = callPackage ./obs-dvd-screensaver.nix { }; + obs-freeze-filter = qt6Packages.callPackage ./obs-freeze-filter.nix { }; obs-gradient-source = callPackage ./obs-gradient-source.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-dvd-screensaver.nix b/pkgs/applications/video/obs-studio/plugins/obs-dvd-screensaver.nix new file mode 100644 index 000000000000..ab7e6891b067 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-dvd-screensaver.nix @@ -0,0 +1,30 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + obs-studio, +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "obs-dvd-screensaver"; + version = "0.0.2"; + + src = fetchFromGitHub { + owner = "wimpysworld"; + repo = "obs-dvd-screensaver"; + tag = "${finalAttrs.version}"; + hash = "sha256-uZdFP3TULECzYNKtwaxFIcFYeFYdEoJ+ZKAqh9y9MEo="; + }; + strictDeps = true; + nativeBuildInputs = [ cmake ]; + buildInputs = [ obs-studio ]; + + meta = { + description = "DVD screen saver for OBS Studio"; + homepage = "https://github.com/wimpysworld/obs-dvd-screensaver"; + maintainers = with lib.maintainers; [ flexiondotorg ]; + license = lib.licenses.gpl2Only; + platforms = lib.platforms.linux; + }; +}) From cdfcbc9b1ad292b15a29f5b93863b3b4fc337f22 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 13:56:13 +0100 Subject: [PATCH 048/220] obs-studio-plugins.obs-media-controls: init at 0.4.1 --- .../video/obs-studio/plugins/default.nix | 2 + .../obs-studio/plugins/obs-media-controls.nix | 40 +++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-media-controls.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..72691abbcf39 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -40,6 +40,8 @@ obs-livesplit-one = callPackage ./obs-livesplit-one { }; + obs-media-controls = qt6Packages.callPackage ./obs-media-controls.nix { }; + obs-move-transition = callPackage ./obs-move-transition.nix { }; obs-multi-rtmp = qt6Packages.callPackage ./obs-multi-rtmp { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-media-controls.nix b/pkgs/applications/video/obs-studio/plugins/obs-media-controls.nix new file mode 100644 index 000000000000..39868bc33002 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-media-controls.nix @@ -0,0 +1,40 @@ +{ + lib, + stdenv, + fetchFromGitHub, + cmake, + obs-studio, + qtbase, +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "obs-media-controls"; + version = "0.4.1"; + + src = fetchFromGitHub { + owner = "exeldro"; + repo = "obs-media-controls"; + tag = "${finalAttrs.version}"; + hash = "sha256-r9fqpg0G9rzGSqq5FUS8ul58rj0796aGZIND8PCJ9jk="; + }; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ + obs-studio + qtbase + ]; + + dontWrapQtApps = true; + + postInstall = '' + rm -rf $out/obs-plugins $out/data + ''; + + meta = { + description = "Plugin for OBS Studio to add a Media Controls dock."; + homepage = "https://github.com/exeldro/obs-media-controls"; + maintainers = with lib.maintainers; [ flexiondotorg ]; + license = lib.licenses.gpl2Only; + platforms = lib.platforms.linux; + }; +}) From c22ad72e7b8d0c20bdb12691e6ea3c7d772df328 Mon Sep 17 00:00:00 2001 From: Martin Wimpress Date: Tue, 20 May 2025 14:10:16 +0100 Subject: [PATCH 049/220] obs-studio-plugins.obs-recursion-effect: init at 0.1.0 --- .../video/obs-studio/plugins/default.nix | 2 + .../plugins/obs-recursion-effect.nix | 43 +++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 pkgs/applications/video/obs-studio/plugins/obs-recursion-effect.nix diff --git a/pkgs/applications/video/obs-studio/plugins/default.nix b/pkgs/applications/video/obs-studio/plugins/default.nix index 8019aa4a8b3a..9a3993dd59ca 100644 --- a/pkgs/applications/video/obs-studio/plugins/default.nix +++ b/pkgs/applications/video/obs-studio/plugins/default.nix @@ -52,6 +52,8 @@ obs-pipewire-audio-capture = callPackage ./obs-pipewire-audio-capture.nix { }; + obs-recursion-effect = callPackage ./obs-recursion-effect.nix { }; + obs-replay-source = qt6Packages.callPackage ./obs-replay-source.nix { }; obs-rgb-levels-filter = callPackage ./obs-rgb-levels-filter.nix { }; diff --git a/pkgs/applications/video/obs-studio/plugins/obs-recursion-effect.nix b/pkgs/applications/video/obs-studio/plugins/obs-recursion-effect.nix new file mode 100644 index 000000000000..2158ce22db16 --- /dev/null +++ b/pkgs/applications/video/obs-studio/plugins/obs-recursion-effect.nix @@ -0,0 +1,43 @@ +{ + lib, + stdenv, + fetchFromGitHub, + fetchpatch, + cmake, + obs-studio, +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "obs-recursion-effect"; + version = "0.1.0"; + + src = fetchFromGitHub { + owner = "exeldro"; + repo = "obs-recursion-effect"; + tag = "${finalAttrs.version}"; + hash = "sha256-PeWJy423QbX4NULuS15LJ/IR/W+tXCJD9TjZdJOGk6A="; + }; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ obs-studio ]; + + # Fix OBS API deprecations warnings + patches = [ + (fetchpatch { + url = "https://github.com/exeldro/obs-recursion-effect/commit/889a8484d5c0eb33267b44ccda545a8fadc189a5.diff"; + hash = "sha256-J2GnsoPUTqvEkuBuAae2TrxXMQg0Sm3dq75ZjGN65IE="; + }) + ]; + + postInstall = '' + rm -rf $out/obs-plugins $out/data + ''; + + meta = { + description = "Plugin for OBS Studio to add recursion effect to a source using a filter"; + homepage = "https://github.com/exeldro/obs-recursion-effect"; + maintainers = with lib.maintainers; [ flexiondotorg ]; + license = lib.licenses.gpl2Only; + platforms = lib.platforms.linux; + }; +}) From 8217eaf0cb2046c8a255a81d60874ef5eff72664 Mon Sep 17 00:00:00 2001 From: Michael Daniels Date: Tue, 20 May 2025 10:46:06 -0400 Subject: [PATCH 050/220] danger-gitlab: 8.0.0 -> 9.0.0 Resolves some security issues with dependencies (see #351833). --- .../danger-gitlab/Gemfile.lock | 111 ++++--- .../danger-gitlab/gemset.nix | 271 ++++++++---------- 2 files changed, 175 insertions(+), 207 deletions(-) diff --git a/pkgs/applications/version-management/danger-gitlab/Gemfile.lock b/pkgs/applications/version-management/danger-gitlab/Gemfile.lock index d68ec3108ca2..230297e9357e 100644 --- a/pkgs/applications/version-management/danger-gitlab/Gemfile.lock +++ b/pkgs/applications/version-management/danger-gitlab/Gemfile.lock @@ -1,9 +1,11 @@ GEM remote: https://rubygems.org/ specs: - addressable (2.8.0) - public_suffix (>= 2.0.2, < 5.0) - claide (1.0.3) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + base64 (0.2.0) + bigdecimal (3.1.9) + claide (1.1.0) claide-plugins (0.9.2) cork nap @@ -11,82 +13,77 @@ GEM colored2 (3.1.2) cork (0.3.0) colored2 (~> 3.1) - danger (8.3.1) + csv (3.3.4) + danger (9.5.1) + base64 (~> 0.2) claide (~> 1.0) claide-plugins (>= 0.9.2) colored2 (~> 3.1) cork (~> 0.1) - faraday (>= 0.9.0, < 2.0) + faraday (>= 0.9.0, < 3.0) faraday-http-cache (~> 2.0) - git (~> 1.7) + git (~> 1.13) kramdown (~> 2.3) kramdown-parser-gfm (~> 1.0) - no_proxy_fix - octokit (~> 4.7) + octokit (>= 4.0) + pstore (~> 0.1) terminal-table (>= 1, < 4) - danger-gitlab (8.0.0) + danger-gitlab (9.0.0) danger - gitlab (~> 4.2, >= 4.2.0) - faraday (1.7.0) - faraday-em_http (~> 1.0) - faraday-em_synchrony (~> 1.0) - faraday-excon (~> 1.1) - faraday-httpclient (~> 1.0.1) - faraday-net_http (~> 1.0) - faraday-net_http_persistent (~> 1.1) - faraday-patron (~> 1.0) - faraday-rack (~> 1.0) - multipart-post (>= 1.2, < 3) - ruby2_keywords (>= 0.0.4) - faraday-em_http (1.0.0) - faraday-em_synchrony (1.0.0) - faraday-excon (1.1.0) - faraday-http-cache (2.2.0) + gitlab (~> 5.0) + faraday (2.13.1) + faraday-net_http (>= 2.0, < 3.5) + json + logger + faraday-http-cache (2.5.1) faraday (>= 0.8) - faraday-httpclient (1.0.1) - faraday-net_http (1.0.1) - faraday-net_http_persistent (1.2.0) - faraday-patron (1.0.0) - faraday-rack (1.0.0) - git (1.9.1) + faraday-net_http (3.4.0) + net-http (>= 0.5.0) + git (1.19.1) + addressable (~> 2.8) rchardet (~> 1.8) - gitlab (4.17.0) - httparty (~> 0.18) - terminal-table (~> 1.5, >= 1.5.1) - httparty (0.18.1) - mime-types (~> 3.0) + gitlab (5.1.0) + base64 (~> 0.2.0) + httparty (~> 0.20) + terminal-table (>= 1.5.1) + httparty (0.23.1) + csv + mini_mime (>= 1.0.0) multi_xml (>= 0.5.2) - kramdown (2.3.1) - rexml + json (2.12.0) + kramdown (2.5.1) + rexml (>= 3.3.9) kramdown-parser-gfm (1.1.0) kramdown (~> 2.0) - mime-types (3.3.1) - mime-types-data (~> 3.2015) - mime-types-data (3.2021.0704) - multi_xml (0.6.0) - multipart-post (2.1.1) + logger (1.7.0) + mini_mime (1.1.5) + multi_xml (0.7.2) + bigdecimal (~> 3.1) nap (1.1.0) - no_proxy_fix (0.1.2) - octokit (4.21.0) - faraday (>= 0.9) - sawyer (~> 0.8.0, >= 0.5.3) + net-http (0.6.0) + uri + octokit (10.0.0) + faraday (>= 1, < 3) + sawyer (~> 0.9) open4 (1.3.4) - public_suffix (4.0.6) - rchardet (1.8.0) - rexml (3.2.5) - ruby2_keywords (0.0.5) - sawyer (0.8.2) + pstore (0.2.0) + public_suffix (6.0.2) + rchardet (1.9.0) + rexml (3.4.1) + sawyer (0.9.2) addressable (>= 2.3.5) - faraday (> 0.8, < 2.0) - terminal-table (1.8.0) - unicode-display_width (~> 1.1, >= 1.1.1) - unicode-display_width (1.7.0) + faraday (>= 0.17.3, < 3) + terminal-table (3.0.2) + unicode-display_width (>= 1.1.1, < 3) + unicode-display_width (2.6.0) + uri (1.0.3) PLATFORMS ruby + x86_64-linux DEPENDENCIES danger-gitlab BUNDLED WITH - 2.1.4 + 2.5.22 diff --git a/pkgs/applications/version-management/danger-gitlab/gemset.nix b/pkgs/applications/version-management/danger-gitlab/gemset.nix index 70b12e0d609f..2f47e820eb89 100644 --- a/pkgs/applications/version-management/danger-gitlab/gemset.nix +++ b/pkgs/applications/version-management/danger-gitlab/gemset.nix @@ -5,20 +5,40 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "022r3m9wdxljpbya69y2i3h9g3dhhfaqzidf95m6qjzms792jvgp"; + sha256 = "0cl2qpvwiffym62z991ynks7imsm87qmgxf0yfsmlwzkgi9qcaa6"; type = "gem"; }; - version = "2.8.0"; + version = "2.8.7"; + }; + base64 = { + groups = [ "default" ]; + platforms = [ ]; + source = { + remotes = [ "https://rubygems.org" ]; + sha256 = "01qml0yilb9basf7is2614skjp8384h2pycfx86cr8023arfj98g"; + type = "gem"; + }; + version = "0.2.0"; + }; + bigdecimal = { + groups = [ "default" ]; + platforms = [ ]; + source = { + remotes = [ "https://rubygems.org" ]; + sha256 = "1k6qzammv9r6b2cw3siasaik18i6wjc5m0gw5nfdc6jj64h79z1g"; + type = "gem"; + }; + version = "3.1.9"; }; claide = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0kasxsms24fgcdsq680nz99d5lazl9rmz1qkil2y5gbbssx89g0z"; + sha256 = "0bpqhc0kqjp1bh9b7ffc395l9gfls0337rrhmab4v46ykl45qg3d"; type = "gem"; }; - version = "1.0.3"; + version = "1.1.0"; }; claide-plugins = { dependencies = [ @@ -56,8 +76,19 @@ }; version = "0.3.0"; }; + csv = { + groups = [ "default" ]; + platforms = [ ]; + source = { + remotes = [ "https://rubygems.org" ]; + sha256 = "1kfqg0m6vqs6c67296f10cr07im5mffj90k2b5dsm51liidcsvp9"; + type = "gem"; + }; + version = "3.3.4"; + }; danger = { dependencies = [ + "base64" "claide" "claide-plugins" "colored2" @@ -67,18 +98,18 @@ "git" "kramdown" "kramdown-parser-gfm" - "no_proxy_fix" "octokit" + "pstore" "terminal-table" ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "12nmycrlwr8ca2s0fx76k81gjw12iz15k1n0qanszv5d4l1ykj2l"; + sha256 = "0s6liclz7vn2q1vzraq7gq6n2rfj4p3hn2gixgnx2qvggg2qsai1"; type = "gem"; }; - version = "8.3.1"; + version = "9.5.1"; }; danger-gitlab = { dependencies = [ @@ -89,62 +120,25 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1a530kx5s5rbx5yx3jqay56lkksqh0yj468hcpg16faiyv8dfza9"; + sha256 = "0bmsyv03n2ravjc0mzq73iairgc1apzc388jalg2c3rag1psgr47"; type = "gem"; }; - version = "8.0.0"; + version = "9.0.0"; }; faraday = { dependencies = [ - "faraday-em_http" - "faraday-em_synchrony" - "faraday-excon" - "faraday-httpclient" "faraday-net_http" - "faraday-net_http_persistent" - "faraday-patron" - "faraday-rack" - "multipart-post" - "ruby2_keywords" + "json" + "logger" ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0r6ik2yvsbx6jj30vck32da2bbvj4m0gf4jhp09vr75i1d6jzfvb"; + sha256 = "0xbv450qj2bx0qz9l2pjrd3kc057y6bglc3na7a78zby8ssiwlyc"; type = "gem"; }; - version = "1.7.0"; - }; - faraday-em_http = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "12cnqpbak4vhikrh2cdn94assh3yxza8rq2p9w2j34bqg5q4qgbs"; - type = "gem"; - }; - version = "1.0.0"; - }; - faraday-em_synchrony = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "1vgrbhkp83sngv6k4mii9f2s9v5lmp693hylfxp2ssfc60fas3a6"; - type = "gem"; - }; - version = "1.0.0"; - }; - faraday-excon = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "0h09wkb0k0bhm6dqsd47ac601qiaah8qdzjh8gvxfd376x1chmdh"; - type = "gem"; - }; - version = "1.1.0"; + version = "2.13.1"; }; faraday-http-cache = { dependencies = [ "faraday" ]; @@ -152,74 +146,39 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0lhfwlk4mhmw9pdlgdsl2bq4x45w7s51jkxjryf18wym8iiw36g7"; + sha256 = "10wld3vk3i8zsr3pa9zyjiyi2zlyyln872812f08bbg1hnd15z6b"; type = "gem"; }; - version = "2.2.0"; - }; - faraday-httpclient = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "0fyk0jd3ks7fdn8nv3spnwjpzx2lmxmg2gh4inz3by1zjzqg33sc"; - type = "gem"; - }; - version = "1.0.1"; + version = "2.5.1"; }; faraday-net_http = { + dependencies = [ "net-http" ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1fi8sda5hc54v1w3mqfl5yz09nhx35kglyx72w7b8xxvdr0cwi9j"; + sha256 = "0jp5ci6g40d6i50bsywp35l97nc2fpi9a592r2cibwicdb6y9wd1"; type = "gem"; }; - version = "1.0.1"; - }; - faraday-net_http_persistent = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "0dc36ih95qw3rlccffcb0vgxjhmipsvxhn6cw71l7ffs0f7vq30b"; - type = "gem"; - }; - version = "1.2.0"; - }; - faraday-patron = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "19wgsgfq0xkski1g7m96snv39la3zxz6x7nbdgiwhg5v82rxfb6w"; - type = "gem"; - }; - version = "1.0.0"; - }; - faraday-rack = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "1h184g4vqql5jv9s9im6igy00jp6mrah2h14py6mpf9bkabfqq7g"; - type = "gem"; - }; - version = "1.0.0"; + version = "3.4.0"; }; git = { - dependencies = [ "rchardet" ]; + dependencies = [ + "addressable" + "rchardet" + ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0s6426k24ph44kbx1qb16ciar170iczs8ivyl29ckin2ygmrrlvm"; + sha256 = "0w3xhay1z7qx9ab04wmy5p4f1fadvqa6239kib256wsiyvcj595h"; type = "gem"; }; - version = "1.9.1"; + version = "1.19.1"; }; gitlab = { dependencies = [ + "base64" "httparty" "terminal-table" ]; @@ -227,24 +186,35 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "00p8z8sxk78zik2dwdhflkvaynp5ximy2xc8cw6bz93gkr1xy8n3"; + sha256 = "1ivj6pq3s3lz8z0islynvdb3fv82ghr5k97drz07kwwqga02f702"; type = "gem"; }; - version = "4.17.0"; + version = "5.1.0"; }; httparty = { dependencies = [ - "mime-types" + "csv" + "mini_mime" "multi_xml" ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "17gpnbf2a7xkvsy20jig3ljvx8hl5520rqm9pffj2jrliq1yi3w7"; + sha256 = "0mbbjr774zxb2wcpbwc93l0i481bxk7ga5hpap76w3q1y9idvh9s"; type = "gem"; }; - version = "0.18.1"; + version = "0.23.1"; + }; + json = { + groups = [ "default" ]; + platforms = [ ]; + source = { + remotes = [ "https://rubygems.org" ]; + sha256 = "0l0av82l1i5703fd5qnxr263zw21xmbpx737av3r9pjn0w0cw3xk"; + type = "gem"; + }; + version = "2.12.0"; }; kramdown = { dependencies = [ "rexml" ]; @@ -252,10 +222,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0jdbcjv4v7sj888bv3vc6d1dg4ackkh7ywlmn9ln2g9alk7kisar"; + sha256 = "131nwypz8b4pq1hxs6gsz3k00i9b75y3cgpkq57vxknkv6mvdfw7"; type = "gem"; }; - version = "2.3.1"; + version = "2.5.1"; }; kramdown-parser-gfm = { dependencies = [ "kramdown" ]; @@ -268,46 +238,36 @@ }; version = "1.1.0"; }; - mime-types = { - dependencies = [ "mime-types-data" ]; + logger = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1zj12l9qk62anvk9bjvandpa6vy4xslil15wl6wlivyf51z773vh"; + sha256 = "00q2zznygpbls8asz5knjvvj2brr3ghmqxgr83xnrdj4rk3xwvhr"; type = "gem"; }; - version = "3.3.1"; + version = "1.7.0"; }; - mime-types-data = { + mini_mime = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0dlxwc75iy0dj23x824cxpvpa7c8aqcpskksrmb32j6m66h5mkcy"; + sha256 = "1vycif7pjzkr29mfk4dlqv3disc5dn0va04lkwajlpr1wkibg0c6"; type = "gem"; }; - version = "3.2021.0704"; + version = "1.1.5"; }; multi_xml = { + dependencies = [ "bigdecimal" ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0lmd4f401mvravi1i1yq7b2qjjli0yq7dfc4p1nj5nwajp7r6hyj"; + sha256 = "1kl7ax7zcj8czlxs6vn3kdhpnz1dwva4y5zwnavssfv193f9cyih"; type = "gem"; }; - version = "0.6.0"; - }; - multipart-post = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "1zgw9zlwh2a6i1yvhhc4a84ry1hv824d6g2iw2chs3k5aylpmpfj"; - type = "gem"; - }; - version = "2.1.1"; + version = "0.7.2"; }; nap = { groups = [ "default" ]; @@ -319,15 +279,16 @@ }; version = "1.1.0"; }; - no_proxy_fix = { + net-http = { + dependencies = [ "uri" ]; groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "006dmdb640v1kq0sll3dnlwj1b0kpf3i1p27ygyffv8lpcqlr6sf"; + sha256 = "1ysrwaabhf0sn24jrp0nnp51cdv0jf688mh5i6fsz63q2c6b48cn"; type = "gem"; }; - version = "0.1.2"; + version = "0.6.0"; }; octokit = { dependencies = [ @@ -338,10 +299,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0ak64rb48d8z98nw6q70r6i0i3ivv61iqla40ss5l79491qfnn27"; + sha256 = "1s14kbjfm9vdvcrwqdarfdbfsjqs1jxpglp60plvfdvnkd9rmsc2"; type = "gem"; }; - version = "4.21.0"; + version = "10.0.0"; }; open4 = { groups = [ "default" ]; @@ -353,45 +314,45 @@ }; version = "1.3.4"; }; + pstore = { + groups = [ "default" ]; + platforms = [ ]; + source = { + remotes = [ "https://rubygems.org" ]; + sha256 = "1a3lrq8k62n8bazhxgdmjykni9wv0mcjks5vi1g274i3wblcgrfn"; + type = "gem"; + }; + version = "0.2.0"; + }; public_suffix = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1xqcgkl7bwws1qrlnmxgh8g4g9m10vg60bhlw40fplninb3ng6d9"; + sha256 = "1543ap9w3ydhx39ljcd675cdz9cr948x9mp00ab8qvq6118wv9xz"; type = "gem"; }; - version = "4.0.6"; + version = "6.0.2"; }; rchardet = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1isj1b3ywgg2m1vdlnr41lpvpm3dbyarf1lla4dfibfmad9csfk9"; + sha256 = "1455yhd1arccrns3ghhvn4dl6gnrf4zn1xxsaa33ffyqrn399216"; type = "gem"; }; - version = "1.8.0"; + version = "1.9.0"; }; rexml = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "08ximcyfjy94pm1rhcx04ny1vx2sk0x4y185gzn86yfsbzwkng53"; + sha256 = "1jmbf6lf7pcyacpb939xjjpn1f84c3nw83dy3p1lwjx0l2ljfif7"; type = "gem"; }; - version = "3.2.5"; - }; - ruby2_keywords = { - groups = [ "default" ]; - platforms = [ ]; - source = { - remotes = [ "https://rubygems.org" ]; - sha256 = "1vz322p8n39hz3b4a9gkmz9y7a5jaz41zrm2ywf31dvkqm03glgz"; - type = "gem"; - }; - version = "0.0.5"; + version = "3.4.1"; }; sawyer = { dependencies = [ @@ -402,10 +363,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0yrdchs3psh583rjapkv33mljdivggqn99wkydkjdckcjn43j3cz"; + sha256 = "1jks1qjbmqm8f9kvwa81vqj39avaj9wdnzc531xm29a55bb74fps"; type = "gem"; }; - version = "0.8.2"; + version = "0.9.2"; }; terminal-table = { dependencies = [ "unicode-display_width" ]; @@ -413,19 +374,29 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1512cngw35hsmhvw4c05rscihc59mnj09m249sm9p3pik831ydqk"; + sha256 = "14dfmfjppmng5hwj7c5ka6qdapawm3h6k9lhn8zj001ybypvclgr"; type = "gem"; }; - version = "1.8.0"; + version = "3.0.2"; }; unicode-display_width = { groups = [ "default" ]; platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "06i3id27s60141x6fdnjn5rar1cywdwy64ilc59cz937303q3mna"; + sha256 = "0nkz7fadlrdbkf37m0x7sw8bnz8r355q3vwcfb9f9md6pds9h9qj"; type = "gem"; }; - version = "1.7.0"; + version = "2.6.0"; + }; + uri = { + groups = [ "default" ]; + platforms = [ ]; + source = { + remotes = [ "https://rubygems.org" ]; + sha256 = "04bhfvc25b07jaiaf62yrach7khhr5jlr5bx6nygg8pf11329wp9"; + type = "gem"; + }; + version = "1.0.3"; }; } From 362bea776ebe79664f513cfc74f2f9e8b587ab07 Mon Sep 17 00:00:00 2001 From: Jonathan Stacks Date: Tue, 20 May 2025 14:41:58 -0500 Subject: [PATCH 051/220] ngrok 3.19.1 -> 3.22.1 --- pkgs/by-name/ng/ngrok/versions.json | 36 ++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pkgs/by-name/ng/ngrok/versions.json b/pkgs/by-name/ng/ngrok/versions.json index 534f529f3337..8831f0240f5c 100644 --- a/pkgs/by-name/ng/ngrok/versions.json +++ b/pkgs/by-name/ng/ngrok/versions.json @@ -1,38 +1,38 @@ { "linux-386": { "sys": "linux-386", - "url": "https://bin.equinox.io/a/7rWAoLaoN6E/ngrok-v3-3.19.1-linux-386", - "sha256": "ade3cb371e0420b4d314051f702029661ec051158892ae8de87b0dd3fb8c9ae2", - "version": "3.19.1" + "url": "https://bin.equinox.io/a/fK7zHbUXRW4/ngrok-v3-3.22.1-linux-386", + "sha256": "25d1317d5f2014b6ee8e27408256c96efb68e5ae855ae31ec78f39a16c6af2c9", + "version": "3.22.1" }, "linux-amd64": { "sys": "linux-amd64", - "url": "https://bin.equinox.io/a/aNKWdiDQehF/ngrok-v3-3.19.1-linux-amd64", - "sha256": "eea9510a71beab13f50024c23938d00ba9cfe4a8b4840030b8432c8637b4427a", - "version": "3.19.1" + "url": "https://bin.equinox.io/a/jBvFVwnCxra/ngrok-v3-3.22.1-linux-amd64", + "sha256": "edf3b724fd9768c380257aec415ea0636f3e1e8d4f67318e3e1ad71c71fb7c3d", + "version": "3.22.1" }, "linux-arm": { "sys": "linux-arm", - "url": "https://bin.equinox.io/a/fHwvcnrN4W1/ngrok-v3-3.19.1-linux-arm", - "sha256": "ff1260e987641b0b280e5da3004d020093745a7586ecca65e1025bc3738d55d9", - "version": "3.19.1" + "url": "https://bin.equinox.io/a/3nCe4mzMEnu/ngrok-v3-3.22.1-linux-arm", + "sha256": "ef9e6d0796d9e73e3811a6b45f40dc534b2bafdf2c53e9d837d633417916bf0e", + "version": "3.22.1" }, "linux-arm64": { "sys": "linux-arm64", - "url": "https://bin.equinox.io/a/ckBcN6JRV3s/ngrok-v3-3.19.1-linux-arm64", - "sha256": "1f8eec521c00eece4a4a15750927dc492f1243e34598868b15996940ab2bed5b", - "version": "3.19.1" + "url": "https://bin.equinox.io/a/7qiGrRXJ1od/ngrok-v3-3.22.1-linux-arm64", + "sha256": "ef154e04bbc0d48a28f387c20ccf7a57d38485fbdad2c7e46c04749cd79e42b5", + "version": "3.22.1" }, "darwin-amd64": { "sys": "darwin-amd64", - "url": "https://bin.equinox.io/a/yubNbWvsvB/ngrok-v3-3.19.1-darwin-amd64", - "sha256": "4e19fee94598a74164516a8b439742bd8bee8844bfea4e3f41ba33b761323583", - "version": "3.19.1" + "url": "https://bin.equinox.io/a/bCPsqUE6DvJ/ngrok-v3-3.22.1-darwin-amd64", + "sha256": "7b2fb1bb04a4e18756ff59903bc5dc06a99a3426713058259f5359965a699b70", + "version": "3.22.1" }, "darwin-arm64": { "sys": "darwin-arm64", - "url": "https://bin.equinox.io/a/iv6WKkDK2i5/ngrok-v3-3.19.1-darwin-arm64", - "sha256": "1da4acdf28b7c64ded056d29a2f3bb452481b4112a04f520f33fcead8794e2a1", - "version": "3.19.1" + "url": "https://bin.equinox.io/a/4XSXTVcG6uw/ngrok-v3-3.22.1-darwin-arm64", + "sha256": "7911865275673426fc8bc24afbbe079a3047e61300ffe622b6e6214d4da85786", + "version": "3.22.1" } } From c82dedcfb0a62a600625caf5b03dcbc0b182ae54 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 20 May 2025 20:36:28 +0000 Subject: [PATCH 052/220] dnscrypt-proxy: 2.1.8 -> 2.1.9 --- pkgs/by-name/dn/dnscrypt-proxy/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/dn/dnscrypt-proxy/package.nix b/pkgs/by-name/dn/dnscrypt-proxy/package.nix index 0084aa0c5120..b1dcc7900ddc 100644 --- a/pkgs/by-name/dn/dnscrypt-proxy/package.nix +++ b/pkgs/by-name/dn/dnscrypt-proxy/package.nix @@ -7,7 +7,7 @@ buildGoModule rec { pname = "dnscrypt-proxy"; - version = "2.1.8"; + version = "2.1.9"; vendorHash = null; @@ -17,7 +17,7 @@ buildGoModule rec { owner = "DNSCrypt"; repo = "dnscrypt-proxy"; rev = version; - sha256 = "sha256-/D5RE8AbI9i9TVdFQCYW8OLPU4TgIIDRsZfWEyXo92g="; + sha256 = "sha256-8KnanJw9eBFm/zdy6f4OFCMStzic/n4Alnm5Y/pbDCA="; }; passthru.tests = { inherit (nixosTests) dnscrypt-proxy2; }; From a371ed99e11d8afe5c144a5182af04120ed65a61 Mon Sep 17 00:00:00 2001 From: Matt Sturgeon Date: Fri, 16 May 2025 18:38:29 +0100 Subject: [PATCH 053/220] nixfmt-tree: remove `tree-root-file` default This isn't needed since treefmt 2.3.1, as it can now determine the tree root by asking `git`. --- pkgs/by-name/ni/nixfmt-tree/package.nix | 98 ++++++++++++++----------- 1 file changed, 54 insertions(+), 44 deletions(-) diff --git a/pkgs/by-name/ni/nixfmt-tree/package.nix b/pkgs/by-name/ni/nixfmt-tree/package.nix index f9a7991544ef..a36a449b3449 100644 --- a/pkgs/by-name/ni/nixfmt-tree/package.nix +++ b/pkgs/by-name/ni/nixfmt-tree/package.nix @@ -4,6 +4,8 @@ treefmt, nixfmt-rfc-style, nixfmt-tree, + git, + writableTmpDirAsHomeHook, settings ? { }, runtimeInputs ? [ ], @@ -30,9 +32,6 @@ let # The default is warn, which would be too annoying for people who just care about Nix on-unmatched = lib.mkOptionDefault "info"; - # Assume the tree is a Git repository, will fail if it's not - tree-root-file = lib.mkOptionDefault ".git/index"; - # NOTE: The `mkIf` condition should not be needed once `runtimePackages` is removed. formatter.nixfmt = lib.mkIf (lib.any isNixfmt allRuntimeInputs) { command = "nixfmt"; @@ -102,9 +101,6 @@ treefmtWithConfig.overrideAttrs { # Log level for files treefmt won't format on-unmatched = "info"; - # Assume the tree is a Git repository, will fail if it's not - tree-root-file = ".git/index"; - # Configure nixfmt for .nix files formatter.nixfmt = { command = "nixfmt"; @@ -122,49 +118,63 @@ treefmtWithConfig.overrideAttrs { platforms = lib.platforms.all; }; - passthru.tests.simple = runCommand "nixfmt-tree-test-simple" { } '' - export XDG_CACHE_HOME=$(mktemp -d) - cat > unformatted.nix < formatted.nix < unformatted.nix < formatted.nix < Date: Wed, 21 May 2025 21:59:09 +0200 Subject: [PATCH 054/220] cerca: 0-unstable-2025-05-10 -> 0-unstable-2025-05-21 --- pkgs/by-name/ce/cerca/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ce/cerca/package.nix b/pkgs/by-name/ce/cerca/package.nix index b8787a7a47df..1a9134458728 100644 --- a/pkgs/by-name/ce/cerca/package.nix +++ b/pkgs/by-name/ce/cerca/package.nix @@ -6,13 +6,13 @@ buildGoModule rec { pname = "cerca"; - version = "0-unstable-2025-05-06"; + version = "0-unstable-2025-05-21"; src = fetchFromGitHub { owner = "cblgh"; repo = "cerca"; - rev = "a2706a35e3efc8b816b4374e24493548429041db"; - hash = "sha256-FDlASFjI+D/iOH0r2Yd638aS0na19TxkN7Z1kD/o/fY"; + rev = "722c38d96160ccf69dd7a8122b62660102b64a59"; + hash = "sha256-M5INnik/TIzH0Afi8/6/PnhwsAhd+kFaDHejfsmuhn0="; }; vendorHash = "sha256-yfsI0nKfzyzmtbS9bSHRaD2pEgxN6gOKAA/FRDxJx40="; From 32e4540bc039a49717fd0663ac2727c37ff8f9ca Mon Sep 17 00:00:00 2001 From: Amadej Kastelic Date: Wed, 21 May 2025 22:06:31 +0200 Subject: [PATCH 055/220] xrdp: 0.10.1 -> 0.10.3, xorgxrdp: 0.10.2 -> 0.10.4 --- pkgs/by-name/xr/xrdp/package.nix | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkgs/by-name/xr/xrdp/package.nix b/pkgs/by-name/xr/xrdp/package.nix index 69e8592aa7dd..b48f43dbb2b7 100644 --- a/pkgs/by-name/xr/xrdp/package.nix +++ b/pkgs/by-name/xr/xrdp/package.nix @@ -12,7 +12,7 @@ openssl, systemd, pam, - fuse, + fuse3, libdrm, libjpeg, libopus, @@ -29,13 +29,13 @@ let xorgxrdp = stdenv.mkDerivation rec { pname = "xorgxrdp"; - version = "0.10.2"; + version = "0.10.4"; src = fetchFromGitHub { owner = "neutrinolabs"; repo = "xorgxrdp"; rev = "v${version}"; - hash = "sha256-xwkGY9dD747kyTvoXrYAIoiFBzQe5ngskUYQhDawnbU="; + hash = "sha256-TuzUerfOn8+3YfueG00IBP9sMpvy2deyL16mWQ8cRHg="; }; nativeBuildInputs = [ @@ -74,7 +74,7 @@ let xrdp = stdenv.mkDerivation rec { pname = "xrdp"; - version = "0.10.1"; + version = "0.10.3"; src = applyPatches { inherit version; @@ -85,7 +85,7 @@ let repo = "xrdp"; rev = "v${version}"; fetchSubmodules = true; - hash = "sha256-lqifQJ/JX+0304arVctsEBEDFPhEPn2OWLyjAQW1who="; + hash = "sha256-6QSz0a0ed1UxfYYibehPgGUzU/xf1HmqEvVE4xU5hRg="; }; }; @@ -100,7 +100,7 @@ let ]; buildInputs = [ - fuse + fuse3 lame libjpeg libjpeg_turbo From 54c9bcabb40dc309e76370209f6e21bc79d47cd3 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Wed, 21 May 2025 20:24:33 +0000 Subject: [PATCH 056/220] texturepacker: 7.6.3 -> 7.7.0 --- pkgs/by-name/te/texturepacker/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/te/texturepacker/package.nix b/pkgs/by-name/te/texturepacker/package.nix index 61dc23ea5ee6..389c685de710 100644 --- a/pkgs/by-name/te/texturepacker/package.nix +++ b/pkgs/by-name/te/texturepacker/package.nix @@ -9,11 +9,11 @@ stdenv.mkDerivation (finalAttrs: { pname = "texturepacker"; - version = "7.6.3"; + version = "7.7.0"; src = fetchurl { url = "https://www.codeandweb.com/download/texturepacker/${finalAttrs.version}/TexturePacker-${finalAttrs.version}.deb"; - hash = "sha256-A1YNy6Y5EdOnV0dY0VN/k7nX26L/uaHqDHmdC5N1Otk="; + hash = "sha256-0HSKushYQXt13z619ZmZt0ADpQA20jmRrext1w6/Ghc="; }; nativeBuildInputs = [ From 6800fbd4fe1dd76885335868fa64d94ee5732919 Mon Sep 17 00:00:00 2001 From: Mynacol Date: Fri, 16 May 2025 16:33:00 +0000 Subject: [PATCH 057/220] leanify: unstable-2023-12-17 -> unstable-2025-05-20 --- pkgs/by-name/le/leanify/package.nix | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/le/leanify/package.nix b/pkgs/by-name/le/leanify/package.nix index e54f4bc74686..11193f2dbf53 100644 --- a/pkgs/by-name/le/leanify/package.nix +++ b/pkgs/by-name/le/leanify/package.nix @@ -7,13 +7,13 @@ stdenv.mkDerivation { pname = "leanify"; - version = "unstable-2023-12-17"; + version = "unstable-2025-05-15"; src = fetchFromGitHub { owner = "JayXon"; repo = "Leanify"; - rev = "9daa4303cdc03f6b90b72c369e6377c6beb75c39"; - hash = "sha256-fLazKCQnOT3bN3Kz25Q80RLk54EU5U6HCf6kPLcXn9c="; + rev = "42770e600b32962e7110c24b5fcaa8c7c2144b17"; + hash = "sha256-njfMR2DSKeVh+ZUewall7837E9JY3nhrTxO4LOY1pEo="; }; postPatch = lib.optionalString stdenv.hostPlatform.isDarwin '' @@ -29,7 +29,9 @@ stdenv.mkDerivation { checkPhase = '' runHook preCheck + ./leanify /dev/null + runHook postCheck ''; From 697e57a9ee3bac161d3f90a31baf0ea181c7e4f4 Mon Sep 17 00:00:00 2001 From: Negate This Date: Wed, 21 May 2025 16:42:15 -0400 Subject: [PATCH 058/220] rescrobbled: 0.7.1 -> 0.7.2 --- pkgs/by-name/re/rescrobbled/package.nix | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkgs/by-name/re/rescrobbled/package.nix b/pkgs/by-name/re/rescrobbled/package.nix index 59914fbb785c..eef97daf1511 100644 --- a/pkgs/by-name/re/rescrobbled/package.nix +++ b/pkgs/by-name/re/rescrobbled/package.nix @@ -1,6 +1,6 @@ { lib, - bash, + dash, fetchFromGitHub, rustPlatform, pkg-config, @@ -11,17 +11,17 @@ rustPlatform.buildRustPackage rec { pname = "rescrobbled"; - version = "0.7.1"; + version = "0.7.2"; src = fetchFromGitHub { owner = "InputUsername"; repo = "rescrobbled"; rev = "v${version}"; - hash = "sha256-1E+SeKjHCah+IFn2QLAyyv7jgEcZ1gtkh8iHgiVBuz4="; + hash = "sha256-HWv0r0eqzY4q+Q604ZIkdhnjmCGX+L6HHXa6iCtH2KE="; }; useFetchCargoVendor = true; - cargoHash = "sha256-oXj3pMT7lBcj/cNa6FY8ehr9TVSRUwqW3B4g5VeyH2w="; + cargoHash = "sha256-zZqDbXIXuNX914EmeSv3hZFnpjYzYdYZk7av3W60YuM="; nativeBuildInputs = [ pkg-config ]; @@ -32,11 +32,11 @@ rustPlatform.buildRustPackage rec { postPatch = '' # Required for tests - substituteInPlace src/filter.rs --replace '#!/usr/bin/bash' '#!${bash}/bin/bash' + substituteInPlace src/filter.rs --replace-fail '#!/usr/bin/env sh' '#!${dash}/bin/dash' ''; postInstall = '' - substituteInPlace rescrobbled.service --replace '%h/.cargo/bin/rescrobbled' "$out/bin/rescrobbled" + substituteInPlace rescrobbled.service --replace-fail '%h/.cargo/bin/rescrobbled' "$out/bin/rescrobbled" install -Dm644 rescrobbled.service -t "$out/share/systemd/user" ''; From 9d80569f4671b4e2dcd199983fe1a98d34ac6eb6 Mon Sep 17 00:00:00 2001 From: Thomas Gerbet Date: Thu, 15 May 2025 12:57:32 +0200 Subject: [PATCH 059/220] open-vm-tools: 12.5.0 -> 12.5.2 Fixes CVE-2025-22247. https://github.com/vmware/open-vm-tools/releases/tag/stable-12.5.2 --- pkgs/by-name/op/open-vm-tools/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/op/open-vm-tools/package.nix b/pkgs/by-name/op/open-vm-tools/package.nix index aa5290520a8c..b24a8459b35b 100644 --- a/pkgs/by-name/op/open-vm-tools/package.nix +++ b/pkgs/by-name/op/open-vm-tools/package.nix @@ -51,13 +51,13 @@ in stdenv.mkDerivation (finalAttrs: { pname = "open-vm-tools"; - version = "12.5.0"; + version = "12.5.2"; src = fetchFromGitHub { owner = "vmware"; repo = "open-vm-tools"; - rev = "stable-${finalAttrs.version}"; - hash = "sha256-pjMXhVN4xdmPCk1Aeb83VZjDJ1t1mb9wryC6h3O+Qvc="; + tag = "stable-${finalAttrs.version}"; + hash = "sha256-gKtPyLsmTrbA3aG/Jiod/oAl5aMpVm3enuCe+b7jsY4="; }; sourceRoot = "${finalAttrs.src.name}/open-vm-tools"; From d1465787ab2c1667d9ae4738e11f81c6a788fc25 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Thu, 22 May 2025 09:53:12 +0000 Subject: [PATCH 060/220] leo-editor: 6.8.3 -> 6.8.4 --- pkgs/applications/editors/leo-editor/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/applications/editors/leo-editor/default.nix b/pkgs/applications/editors/leo-editor/default.nix index b4d32534e284..3802ea035558 100644 --- a/pkgs/applications/editors/leo-editor/default.nix +++ b/pkgs/applications/editors/leo-editor/default.nix @@ -10,13 +10,13 @@ mkDerivation rec { pname = "leo-editor"; - version = "6.8.3"; + version = "6.8.4"; src = fetchFromGitHub { owner = "leo-editor"; repo = "leo-editor"; rev = version; - sha256 = "sha256-nK6JMR4XrxZxvLOAsYjuyHQo/sob+OLSk/8U3zZ/Iyo="; + sha256 = "sha256-CSugdfkAMy6VFdNdSGR+iCrK/XhwseoiMQ4mfgu4F/E="; }; dontBuild = true; From 19b0dd56a138b62be1c406ad7031ce8852af40eb Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Thu, 22 May 2025 12:36:30 +0000 Subject: [PATCH 061/220] libplacebo: 7.349.0 -> 7.351.0 --- pkgs/by-name/li/libplacebo/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/li/libplacebo/package.nix b/pkgs/by-name/li/libplacebo/package.nix index d50c722327ac..b959f62574b9 100644 --- a/pkgs/by-name/li/libplacebo/package.nix +++ b/pkgs/by-name/li/libplacebo/package.nix @@ -21,14 +21,14 @@ stdenv.mkDerivation rec { pname = "libplacebo"; - version = "7.349.0"; + version = "7.351.0"; src = fetchFromGitLab { domain = "code.videolan.org"; owner = "videolan"; repo = "libplacebo"; rev = "v${version}"; - hash = "sha256-mIjQvc7SRjE1Orb2BkHK+K1TcRQvzj2oUOCUT4DzIuA="; + hash = "sha256-ccoEFpp6tOFdrfMyE0JNKKMAdN4Q95tP7j7vzUj+lSQ="; }; nativeBuildInputs = [ From 1978b03a41fdaf7696c93a7dc34d11050ba69908 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Thu, 22 May 2025 14:17:45 +0000 Subject: [PATCH 062/220] vcpkg-tool-unwrapped: 2025-04-16 -> 2025-05-19 --- pkgs/by-name/vc/vcpkg-tool/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/vc/vcpkg-tool/package.nix b/pkgs/by-name/vc/vcpkg-tool/package.nix index b497e3d30fad..5817c9b49f86 100644 --- a/pkgs/by-name/vc/vcpkg-tool/package.nix +++ b/pkgs/by-name/vc/vcpkg-tool/package.nix @@ -24,13 +24,13 @@ }: stdenv.mkDerivation (finalAttrs: { pname = "vcpkg-tool"; - version = "2025-04-16"; + version = "2025-05-19"; src = fetchFromGitHub { owner = "microsoft"; repo = "vcpkg-tool"; rev = finalAttrs.version; - hash = "sha256-4XqpYEbE7TJyfdDPomcghII3iqcoX99I2GDuSHX5q2g="; + hash = "sha256-st9VLiuvKHKkokUToxw4KQ4aekGMqx8rfVBmmeddgVk="; }; nativeBuildInputs = [ From e520e12ffdf6433fedb0501acce368487a8cfd7b Mon Sep 17 00:00:00 2001 From: luftmensch-luftmensch Date: Thu, 22 May 2025 18:19:49 +0200 Subject: [PATCH 063/220] mpvScripts.modernx-zydezu: 0.4.1 -> 0.4.2 --- pkgs/applications/video/mpv/scripts/modernx-zydezu.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/applications/video/mpv/scripts/modernx-zydezu.nix b/pkgs/applications/video/mpv/scripts/modernx-zydezu.nix index e59fce3b9cec..618d40b2006c 100644 --- a/pkgs/applications/video/mpv/scripts/modernx-zydezu.nix +++ b/pkgs/applications/video/mpv/scripts/modernx-zydezu.nix @@ -7,14 +7,14 @@ }: buildLua (finalAttrs: { pname = "modernx-zydezu"; - version = "0.4.1"; + version = "0.4.2"; scriptPath = "modernx.lua"; src = fetchFromGitHub { owner = "zydezu"; repo = "ModernX"; rev = finalAttrs.version; - hash = "sha256-tm1vsHEFX2YnQ1w3DcLd/zHASetkqQ4wYcYT9w8HVok="; + hash = "sha256-7DkW3b0YEkV4VPURcg4kkUy8pSTFFb8jaJOuEtzTDow="; }; postInstall = '' From 001f99b21ff5139e64b6c378981046106010eb00 Mon Sep 17 00:00:00 2001 From: Guy Chronister Date: Thu, 22 May 2025 19:57:19 +0000 Subject: [PATCH 064/220] zsteg: Migrate to by-name --- pkgs/{tools/security => by-name/zs}/zsteg/Gemfile | 0 pkgs/{tools/security => by-name/zs}/zsteg/Gemfile.lock | 0 pkgs/{tools/security => by-name/zs}/zsteg/gemset.nix | 0 .../security/zsteg/default.nix => by-name/zs/zsteg/package.nix} | 0 pkgs/top-level/all-packages.nix | 2 -- 5 files changed, 2 deletions(-) rename pkgs/{tools/security => by-name/zs}/zsteg/Gemfile (100%) rename pkgs/{tools/security => by-name/zs}/zsteg/Gemfile.lock (100%) rename pkgs/{tools/security => by-name/zs}/zsteg/gemset.nix (100%) rename pkgs/{tools/security/zsteg/default.nix => by-name/zs/zsteg/package.nix} (100%) diff --git a/pkgs/tools/security/zsteg/Gemfile b/pkgs/by-name/zs/zsteg/Gemfile similarity index 100% rename from pkgs/tools/security/zsteg/Gemfile rename to pkgs/by-name/zs/zsteg/Gemfile diff --git a/pkgs/tools/security/zsteg/Gemfile.lock b/pkgs/by-name/zs/zsteg/Gemfile.lock similarity index 100% rename from pkgs/tools/security/zsteg/Gemfile.lock rename to pkgs/by-name/zs/zsteg/Gemfile.lock diff --git a/pkgs/tools/security/zsteg/gemset.nix b/pkgs/by-name/zs/zsteg/gemset.nix similarity index 100% rename from pkgs/tools/security/zsteg/gemset.nix rename to pkgs/by-name/zs/zsteg/gemset.nix diff --git a/pkgs/tools/security/zsteg/default.nix b/pkgs/by-name/zs/zsteg/package.nix similarity index 100% rename from pkgs/tools/security/zsteg/default.nix rename to pkgs/by-name/zs/zsteg/package.nix diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix index d8ed42c1930a..6dd81b012e7c 100644 --- a/pkgs/top-level/all-packages.nix +++ b/pkgs/top-level/all-packages.nix @@ -14662,8 +14662,6 @@ with pkgs; zotero_7 = pkgs.zotero; - zsteg = callPackage ../tools/security/zsteg { }; - zynaddsubfx = callPackage ../applications/audio/zynaddsubfx { guiModule = "zest"; fftw = fftwSinglePrec; From c322f8d7c87a81d98accd2a461e814be3852d632 Mon Sep 17 00:00:00 2001 From: Guy Chronister Date: Thu, 22 May 2025 20:00:34 +0000 Subject: [PATCH 065/220] zsteg: Update gems and add missing bundler updater. --- pkgs/by-name/zs/zsteg/Gemfile.lock | 8 ++++---- pkgs/by-name/zs/zsteg/gemset.nix | 12 ++++++------ pkgs/by-name/zs/zsteg/package.nix | 10 ++++++++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/pkgs/by-name/zs/zsteg/Gemfile.lock b/pkgs/by-name/zs/zsteg/Gemfile.lock index 0cd593f60a0f..ef7578657924 100644 --- a/pkgs/by-name/zs/zsteg/Gemfile.lock +++ b/pkgs/by-name/zs/zsteg/Gemfile.lock @@ -2,12 +2,12 @@ GEM remote: https://rubygems.org/ specs: forwardable (1.3.3) - iostruct (0.0.5) - prime (0.1.2) + iostruct (0.5.0) + prime (0.1.3) forwardable singleton rainbow (3.1.1) - singleton (0.2.0) + singleton (0.3.0) zpng (0.4.5) rainbow (~> 3.1.1) zsteg (0.2.13) @@ -22,4 +22,4 @@ DEPENDENCIES zsteg BUNDLED WITH - 2.4.13 + 2.6.6 diff --git a/pkgs/by-name/zs/zsteg/gemset.nix b/pkgs/by-name/zs/zsteg/gemset.nix index 07dfcf016f01..37fa75cb8113 100644 --- a/pkgs/by-name/zs/zsteg/gemset.nix +++ b/pkgs/by-name/zs/zsteg/gemset.nix @@ -14,10 +14,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1z3vnb8mhzns3ybf78vlj5cy6lq4pyfm8n40kqba2s33xccs3kl0"; + sha256 = "0pswyhjz9d90bympsz6s0rgv24b8nrd4lk5y16kz67vdw6vbaqbp"; type = "gem"; }; - version = "0.0.5"; + version = "0.5.0"; }; prime = { dependencies = [ @@ -28,10 +28,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "1973kz8lbck6ga5v42f55jk8b8pnbgwp9p67dl1xw15gvz55dsfl"; + sha256 = "1qsk9q2n4yb80f5mwslxzfzm2ckar25grghk95cj7sbc1p2k3w5s"; type = "gem"; }; - version = "0.1.2"; + version = "0.1.3"; }; rainbow = { groups = [ "default" ]; @@ -48,10 +48,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0qq54imvbksnckzf9hrq9bjzcdb0n8wfv6l5jc0di10n88277jx6"; + sha256 = "0y2pc7lr979pab5n5lvk3jhsi99fhskl5f2s6004v8sabz51psl3"; type = "gem"; }; - version = "0.2.0"; + version = "0.3.0"; }; zpng = { dependencies = [ "rainbow" ]; diff --git a/pkgs/by-name/zs/zsteg/package.nix b/pkgs/by-name/zs/zsteg/package.nix index c078189f4f29..d2f58193729a 100644 --- a/pkgs/by-name/zs/zsteg/package.nix +++ b/pkgs/by-name/zs/zsteg/package.nix @@ -1,12 +1,18 @@ -{ lib, bundlerApp }: +{ + lib, + bundlerApp, + bundlerUpdateScript, +}: -bundlerApp { +bundlerApp rec { pname = "zsteg"; gemdir = ./.; exes = [ "zsteg" ]; + passthru.updateScript = bundlerUpdateScript pname; + meta = with lib; { description = "Detect stegano-hidden data in PNG & BMP"; homepage = "http://zed.0xff.me/"; From 6cf1923dad62aad0df4def53c80b007d93f394c8 Mon Sep 17 00:00:00 2001 From: Guy Chronister Date: Thu, 22 May 2025 20:44:29 +0000 Subject: [PATCH 066/220] sublime_syntax_convertor: Add bundler updater to easily update gems. --- pkgs/by-name/su/sublime_syntax_convertor/Gemfile.lock | 5 ++--- pkgs/by-name/su/sublime_syntax_convertor/gemset.nix | 4 ++-- pkgs/by-name/su/sublime_syntax_convertor/package.nix | 11 +++++++++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/pkgs/by-name/su/sublime_syntax_convertor/Gemfile.lock b/pkgs/by-name/su/sublime_syntax_convertor/Gemfile.lock index 82c833feb066..1f079d3c6c56 100644 --- a/pkgs/by-name/su/sublime_syntax_convertor/Gemfile.lock +++ b/pkgs/by-name/su/sublime_syntax_convertor/Gemfile.lock @@ -4,16 +4,15 @@ GEM GEM remote: https://rubygems.org/ specs: - plist (3.7.1) + plist (3.7.2) sublime_syntax_convertor (0.1.0) plist PLATFORMS ruby - x86_64-linux DEPENDENCIES sublime_syntax_convertor! BUNDLED WITH - 2.5.5 + 2.6.6 diff --git a/pkgs/by-name/su/sublime_syntax_convertor/gemset.nix b/pkgs/by-name/su/sublime_syntax_convertor/gemset.nix index 246a8f8e5297..86122b31f069 100644 --- a/pkgs/by-name/su/sublime_syntax_convertor/gemset.nix +++ b/pkgs/by-name/su/sublime_syntax_convertor/gemset.nix @@ -4,10 +4,10 @@ platforms = [ ]; source = { remotes = [ "https://rubygems.org" ]; - sha256 = "0b643i5b7b7galvlb2fc414ifmb78b5lsq47gnvhzl8m27dl559z"; + sha256 = "0hlaf4b3d8grxm9fqbnam5gwd55wvghl0jyzjd1hc5hirhklaynk"; type = "gem"; }; - version = "3.7.1"; + version = "3.7.2"; }; sublime_syntax_convertor = { dependencies = [ "plist" ]; diff --git a/pkgs/by-name/su/sublime_syntax_convertor/package.nix b/pkgs/by-name/su/sublime_syntax_convertor/package.nix index 869498bfb4ed..2a654168bccb 100644 --- a/pkgs/by-name/su/sublime_syntax_convertor/package.nix +++ b/pkgs/by-name/su/sublime_syntax_convertor/package.nix @@ -1,9 +1,16 @@ -{ lib, bundlerApp }: -bundlerApp { +{ + lib, + bundlerApp, + bundlerUpdateScript, +}: + +bundlerApp rec { pname = "sublime_syntax_convertor"; gemdir = ./.; exes = [ "sublime_syntax_convertor" ]; + passthru.updateScript = bundlerUpdateScript pname; + meta = { description = "Converts tmLanguage to sublime-syntax"; homepage = "https://github.com/aziz/SublimeSyntaxConvertor/"; From 3455352762ce4896ce76b6bc1dc561e33d4ed009 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 01:06:04 +0000 Subject: [PATCH 067/220] gh-gei: 1.15.0 -> 1.15.1 --- pkgs/by-name/gh/gh-gei/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/gh/gh-gei/package.nix b/pkgs/by-name/gh/gh-gei/package.nix index 68f9040a6036..73f360632092 100644 --- a/pkgs/by-name/gh/gh-gei/package.nix +++ b/pkgs/by-name/gh/gh-gei/package.nix @@ -7,13 +7,13 @@ buildDotnetModule rec { pname = "gh-gei"; - version = "1.15.0"; + version = "1.15.1"; src = fetchFromGitHub { owner = "github"; repo = "gh-gei"; rev = "v${version}"; - hash = "sha256-33Npwf4C6IFrrsIRq4+udphfovaCXQ8JfN0yzfxIRq0="; + hash = "sha256-Iuhz/kaamgMWNxilNvCRnjdTFrhSPhHpFKYllQ8OuGU="; }; dotnet-sdk = dotnetCorePackages.sdk_8_0_4xx; From 1d7de97be77fd14b18630c554926a5965f45977f Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 05:07:46 +0000 Subject: [PATCH 068/220] python3Packages.sensorpro-ble: 0.7.0 -> 0.7.1 --- pkgs/development/python-modules/sensorpro-ble/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/development/python-modules/sensorpro-ble/default.nix b/pkgs/development/python-modules/sensorpro-ble/default.nix index 7fb91f04c097..dec2a5b5a5ef 100644 --- a/pkgs/development/python-modules/sensorpro-ble/default.nix +++ b/pkgs/development/python-modules/sensorpro-ble/default.nix @@ -13,7 +13,7 @@ buildPythonPackage rec { pname = "sensorpro-ble"; - version = "0.7.0"; + version = "0.7.1"; pyproject = true; disabled = pythonOlder "3.9"; @@ -22,7 +22,7 @@ buildPythonPackage rec { owner = "Bluetooth-Devices"; repo = "sensorpro-ble"; tag = "v${version}"; - hash = "sha256-YMcpe4daM4X23nOMubYNcmqlW8PttwDGC4WL9g4P+4I="; + hash = "sha256-/brgy3B/Hqgu1M4xmjciXJx25btN/iFgjT0TgTdij2o="; }; build-system = [ poetry-core ]; @@ -43,7 +43,7 @@ buildPythonPackage rec { meta = with lib; { description = "Library for Sensorpro BLE devices"; homepage = "https://github.com/Bluetooth-Devices/sensorpro-ble"; - changelog = "https://github.com/Bluetooth-Devices/sensorpro-ble/blob/v${version}/CHANGELOG.md"; + changelog = "https://github.com/Bluetooth-Devices/sensorpro-ble/blob/${src.tag}/CHANGELOG.md"; license = licenses.mit; maintainers = with maintainers; [ fab ]; }; From 6e809040cc7a7e17357eb98ca7a98a57807b9f7d Mon Sep 17 00:00:00 2001 From: eymeric Date: Fri, 23 May 2025 09:05:56 +0200 Subject: [PATCH 069/220] melos: 6.2.0 -> 6.3.2 --- pkgs/by-name/me/melos/package.nix | 4 +-- pkgs/by-name/me/melos/pubspec.lock.json | 40 +++++++++---------------- 2 files changed, 16 insertions(+), 28 deletions(-) diff --git a/pkgs/by-name/me/melos/package.nix b/pkgs/by-name/me/melos/package.nix index 4d9654af977c..a821d74fdb66 100644 --- a/pkgs/by-name/me/melos/package.nix +++ b/pkgs/by-name/me/melos/package.nix @@ -5,12 +5,12 @@ }: let pname = "melos"; - version = "6.2.0"; + version = "6.3.2"; src = fetchFromGitHub { owner = "invertase"; repo = "melos"; rev = "melos-v${version}"; - hash = "sha256-00K/LwrwjvO4LnXM2PDooQMJ6sXcJy9FBErtEwoMZlM="; + hash = "sha256-hD4UlQPFugRqtOZecyT/6wV3vFocoQ6OO5w+SZsYdO0="; }; in buildDartApplication { diff --git a/pkgs/by-name/me/melos/pubspec.lock.json b/pkgs/by-name/me/melos/pubspec.lock.json index 959553a3e607..3b04d9d91464 100644 --- a/pkgs/by-name/me/melos/pubspec.lock.json +++ b/pkgs/by-name/me/melos/pubspec.lock.json @@ -100,6 +100,16 @@ "source": "hosted", "version": "1.3.1" }, + "checked_yaml": { + "dependency": "transitive", + "description": { + "name": "checked_yaml", + "sha256": "feb6bed21949061731a7a75fc5d2aa727cf160b91af9a3e464c5e3a32e28b5ff", + "url": "https://pub.dev" + }, + "source": "hosted", + "version": "2.0.3" + }, "cli_launcher": { "dependency": "direct main", "description": { @@ -470,25 +480,15 @@ "source": "hosted", "version": "0.4.0" }, - "pubspec": { + "pubspec_parse": { "dependency": "direct main", "description": { - "name": "pubspec", - "sha256": "f534a50a2b4d48dc3bc0ec147c8bd7c304280fff23b153f3f11803c4d49d927e", + "name": "pubspec_parse", + "sha256": "81876843eb50dc2e1e5b151792c9a985c5ed2536914115ed04e9c8528f6647b0", "url": "https://pub.dev" }, "source": "hosted", - "version": "2.3.0" - }, - "quiver": { - "dependency": "transitive", - "description": { - "name": "quiver", - "sha256": "b1c1ac5ce6688d77f65f3375a9abb9319b3cb32486bdc7a1e0fdf004d7ba4e47", - "url": "https://pub.dev" - }, - "source": "hosted", - "version": "3.2.1" + "version": "1.4.0" }, "shelf": { "dependency": "transitive", @@ -650,16 +650,6 @@ "source": "hosted", "version": "1.3.2" }, - "uri": { - "dependency": "transitive", - "description": { - "name": "uri", - "sha256": "889eea21e953187c6099802b7b4cf5219ba8f3518f604a1033064d45b1b8268a", - "url": "https://pub.dev" - }, - "source": "hosted", - "version": "1.0.0" - }, "vm_service": { "dependency": "transitive", "description": { @@ -745,5 +735,3 @@ "dart": ">=3.3.0 <4.0.0" } } - - From bf45803a123e7967f12911ee63748b34cdd90dd6 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 10:51:13 +0000 Subject: [PATCH 070/220] python3Packages.ultralytics: 8.3.130 -> 8.3.143 --- pkgs/development/python-modules/ultralytics/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/ultralytics/default.nix b/pkgs/development/python-modules/ultralytics/default.nix index 9f16d7775fcc..33c89f940659 100644 --- a/pkgs/development/python-modules/ultralytics/default.nix +++ b/pkgs/development/python-modules/ultralytics/default.nix @@ -32,14 +32,14 @@ buildPythonPackage rec { pname = "ultralytics"; - version = "8.3.130"; + version = "8.3.143"; pyproject = true; src = fetchFromGitHub { owner = "ultralytics"; repo = "ultralytics"; tag = "v${version}"; - hash = "sha256-lB4Q1LK3hbn67mHcVn2qCh9YjVPDBl4DM3LXDL7lsvQ="; + hash = "sha256-qpFQcGLTEQS7Bt9CvdXgv2JyNfOONS0Cf71dckCrlPw="; }; build-system = [ setuptools ]; From 19069456f778dcdd9b60e6f8833ac48b055084b5 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 11:03:30 +0000 Subject: [PATCH 071/220] r2modman: 3.1.58 -> 3.2.0 --- pkgs/by-name/r2/r2modman/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/r2/r2modman/package.nix b/pkgs/by-name/r2/r2modman/package.nix index 8ff252ef6fdf..cef3abad2691 100644 --- a/pkgs/by-name/r2/r2modman/package.nix +++ b/pkgs/by-name/r2/r2modman/package.nix @@ -15,18 +15,18 @@ stdenv.mkDerivation (finalAttrs: { pname = "r2modman"; - version = "3.1.58"; + version = "3.2.0"; src = fetchFromGitHub { owner = "ebkr"; repo = "r2modmanPlus"; rev = "v${finalAttrs.version}"; - hash = "sha256-ICLKkhgEi0ThWHLgm9fr0QXXtWMCAOJ6nkD66JR8XMo="; + hash = "sha256-RCMb9NaGzFRV2sXBxeb9G9pHmKf66/wwlNrpKIUE2iQ="; }; offlineCache = fetchYarnDeps { yarnLock = "${finalAttrs.src}/yarn.lock"; - hash = "sha256-3SMvUx+TwUmOur/50HDLWt0EayY5tst4YANWIlXdiPQ="; + hash = "sha256-F+TYP8F/2YAubeBIFMXlhNjjAPVTPMkZ+oH6UVt/gbs="; }; patches = [ From 3498fc21c881cd871e59078dac275ad7845d77ad Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 13:49:18 +0000 Subject: [PATCH 072/220] plex-desktop: 1.108.1 -> 1.109.0 --- pkgs/by-name/pl/plex-desktop/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/pl/plex-desktop/package.nix b/pkgs/by-name/pl/plex-desktop/package.nix index 8d114601d70f..0d69c75c4052 100644 --- a/pkgs/by-name/pl/plex-desktop/package.nix +++ b/pkgs/by-name/pl/plex-desktop/package.nix @@ -23,8 +23,8 @@ }: let pname = "plex-desktop"; - version = "1.108.1"; - rev = "84"; + version = "1.109.0"; + rev = "85"; meta = { homepage = "https://plex.tv/"; description = "Streaming media player for Plex"; @@ -44,7 +44,7 @@ let src = fetchurl { url = "https://api.snapcraft.io/api/v1/snaps/download/qc6MFRM433ZhI1XjVzErdHivhSOhlpf0_${rev}.snap"; - hash = "sha512-ZcP84maap5Dskf9yECd76gn5x+tWxyVcIo+c0P2VJiQ4VwN2KCgWmwH2JkHzafFCcCFm9EqFBrFlNXWEvnUieQ=="; + hash = "sha512-BSnA84purHv6qIVELp+AJI2m6erTngnupbuoCZTaje6LCd2+5+U+7gqWdahmO1mxJEGvuBwzetdDrp1Ibz5a6A=="; }; nativeBuildInputs = [ From a4c6b536a1c04d01dc9f6c4fe232a1ed8f2a4884 Mon Sep 17 00:00:00 2001 From: 1adept <69433209+1adept@users.noreply.github.com> Date: Fri, 23 May 2025 16:56:04 +0200 Subject: [PATCH 073/220] carapace: 1.3.1 -> 1.3.2 --- pkgs/shells/carapace/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/shells/carapace/default.nix b/pkgs/shells/carapace/default.nix index f639bd38ef1f..e95c6a36b45d 100644 --- a/pkgs/shells/carapace/default.nix +++ b/pkgs/shells/carapace/default.nix @@ -9,16 +9,16 @@ buildGoModule (finalAttrs: { pname = "carapace"; - version = "1.3.1"; + version = "1.3.2"; src = fetchFromGitHub { owner = "carapace-sh"; repo = "carapace-bin"; tag = "v${finalAttrs.version}"; - hash = "sha256-VKc4JnezPdbgUIiSOnHIkUCLas//4TMIKiYd71EMamk="; + hash = "sha256-DgWC3IsuHncJzVfWxIGWDxknTAdHJEijvjhO7q14EYQ="; }; - vendorHash = "sha256-APJBCUdicKb81gY3ukhMHVgapDl+4tsMdNHEwZbarKE="; + vendorHash = "sha256-oq1hZ2P093zsI+UAGHi5XfRXqGGxWpR5j7x7N7ng3xE="; ldflags = [ "-s" From 630eb498515088736cb0a5e9f7318a75952ca076 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 15:15:26 +0000 Subject: [PATCH 074/220] vgmstream: 1980 -> 2023 --- pkgs/by-name/vg/vgmstream/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/vg/vgmstream/package.nix b/pkgs/by-name/vg/vgmstream/package.nix index f6901368baae..fdcfcdfe91db 100644 --- a/pkgs/by-name/vg/vgmstream/package.nix +++ b/pkgs/by-name/vg/vgmstream/package.nix @@ -37,13 +37,13 @@ in stdenv.mkDerivation rec { pname = "vgmstream"; - version = "1980"; + version = "2023"; src = fetchFromGitHub { owner = "vgmstream"; repo = "vgmstream"; tag = "r${version}"; - hash = "sha256-TmaWC04XbtFfBYhmTO4ouh3NoByio1BCpDJGJy3r0NY="; + hash = "sha256-RyVh9twBZqFs4bKRZKmat0JB25R+rQtnAARo0dvCS+8="; }; passthru.updateScript = nix-update-script { From 6bf8ab3dedccab2fb98f32f55a0003f8c0ef5ab2 Mon Sep 17 00:00:00 2001 From: Marcin Serwin Date: Fri, 23 May 2025 18:06:41 +0200 Subject: [PATCH 075/220] rebels-in-the-sky: 1.0.29 -> 1.0.30 Signed-off-by: Marcin Serwin --- pkgs/by-name/re/rebels-in-the-sky/package.nix | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/pkgs/by-name/re/rebels-in-the-sky/package.nix b/pkgs/by-name/re/rebels-in-the-sky/package.nix index 7f6cbe16c746..7a8cf044293f 100644 --- a/pkgs/by-name/re/rebels-in-the-sky/package.nix +++ b/pkgs/by-name/re/rebels-in-the-sky/package.nix @@ -3,7 +3,6 @@ lib, fetchFromGitHub, rustPlatform, - fetchpatch, cmake, pkg-config, alsa-lib, @@ -15,28 +14,20 @@ rustPlatform.buildRustPackage (finalAttrs: { pname = "rebels-in-the-sky"; - version = "1.0.29"; + version = "1.0.30"; src = fetchFromGitHub { owner = "ricott1"; repo = "rebels-in-the-sky"; tag = "v${finalAttrs.version}"; - hash = "sha256-rWBaD4nxSmr1RZRbc51Sz9Xl2Te2yv4HNuFqWj8KayM="; + hash = "sha256-eC8n9g2kFErTRWWNo6jwAMGBX3+xGjtzq23+r3w0n0I="; }; useFetchCargoVendor = true; - cargoHash = "sha256-ZRxq6/mgXZ33o1AEHnSOt4HJAI1y+F+ysVNvvbb9M28="; + cargoHash = "sha256-dGD0RpelENEWe9W/3CXUS2GhOXRaWhCoD8AI2n4mUfs="; - patches = - lib.optionals (!withRadio) [ - ./disable-radio.patch - ] - ++ [ - # https://github.com/ricott1/rebels-in-the-sky/pull/25 - (fetchpatch { - url = "https://github.com/ricott1/rebels-in-the-sky/commit/31778fee783637fe8af09f71754f35c5d15b800a.patch"; - hash = "sha256-PO/aY+fB72gQpxE5eaIP/s4xevfQ/Ac1TH5ZEKwpw1I="; - }) - ]; + patches = lib.optionals (!withRadio) [ + ./disable-radio.patch + ]; nativeBuildInputs = [ From 7a51e0487adb1ae5596fff853917154e5c51108d Mon Sep 17 00:00:00 2001 From: Sarah Clark Date: Fri, 16 May 2025 19:37:19 -0700 Subject: [PATCH 076/220] python3Packages.google-photos-library-api: fix failing tests on Darwin Needs _darwinEnableLocalNetworking --- .../python-modules/google-photos-library-api/default.nix | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkgs/development/python-modules/google-photos-library-api/default.nix b/pkgs/development/python-modules/google-photos-library-api/default.nix index bc2965b3eeb1..87a7c8a5f626 100644 --- a/pkgs/development/python-modules/google-photos-library-api/default.nix +++ b/pkgs/development/python-modules/google-photos-library-api/default.nix @@ -39,6 +39,8 @@ buildPythonPackage rec { pytestCheckHook ]; + __darwinAllowLocalNetworking = true; + meta = { changelog = "https://github.com/allenporter/python-google-photos-library-api/releases/tag/${version}"; description = "Python client library for Google Photos Library API"; From 6a3bfcd042cbce0f909136bcd35f09cd4909d6a0 Mon Sep 17 00:00:00 2001 From: Sarah Clark Date: Thu, 22 May 2025 16:04:41 -0700 Subject: [PATCH 077/220] python3Packages.google-cloud-netapp: build from GitHub Build from github repo, modernize, enable tests, and add maintainer --- .../google-cloud-netapp/default.nix | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/pkgs/development/python-modules/google-cloud-netapp/default.nix b/pkgs/development/python-modules/google-cloud-netapp/default.nix index 2f4a1b850fc8..5e8cec8ea9e3 100644 --- a/pkgs/development/python-modules/google-cloud-netapp/default.nix +++ b/pkgs/development/python-modules/google-cloud-netapp/default.nix @@ -1,15 +1,15 @@ { lib, buildPythonPackage, - fetchPypi, + fetchFromGitHub, google-api-core, google-auth, mock, + nix-update-script, proto-plus, protobuf, pytest-asyncio, pytestCheckHook, - pythonOlder, setuptools, }: @@ -18,14 +18,15 @@ buildPythonPackage rec { version = "0.3.23"; pyproject = true; - disabled = pythonOlder "3.8"; - - src = fetchPypi { - pname = "google_cloud_netapp"; - inherit version; - hash = "sha256-PP4o+qHCa3Ok6y9Ehyevmq1ac9Wb2zZoEDQgIpm0sr0="; + src = fetchFromGitHub { + owner = "googleapis"; + repo = "google-cloud-python"; + rev = "google-cloud-netapp-v${version}"; + hash = "sha256-ietiyPCghGUD1jlGdZMhVgVozAlyfdvYgkV6NNlzLQg="; }; + sourceRoot = "${src.name}/packages/google-cloud-netapp"; + build-system = [ setuptools ]; dependencies = [ @@ -46,11 +47,18 @@ buildPythonPackage rec { "google.cloud.netapp_v1" ]; - meta = with lib; { + passthru.updateScript = nix-update-script { + extraArgs = [ + "--version-regex" + "google-cloud-netapp-v([0-9.]+)" + ]; + }; + + meta = { description = "Python Client for NetApp API"; homepage = "https://github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-netapp"; changelog = "https://github.com/googleapis/google-cloud-python/blob/google-cloud-netapp-v${version}/packages/google-cloud-netapp/CHANGELOG.md"; - license = licenses.asl20; - maintainers = [ ]; + license = lib.licenses.asl20; + maintainers = [ lib.maintainers.sarahec ]; }; } From 81d495bf47d23206a4015df4d292150f903b30cc Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Fri, 23 May 2025 23:04:17 +0000 Subject: [PATCH 078/220] jmol: 16.3.17 -> 16.3.23 --- pkgs/applications/science/chemistry/jmol/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/applications/science/chemistry/jmol/default.nix b/pkgs/applications/science/chemistry/jmol/default.nix index 55e3dd1f6e2e..5b6854a1d354 100644 --- a/pkgs/applications/science/chemistry/jmol/default.nix +++ b/pkgs/applications/science/chemistry/jmol/default.nix @@ -31,7 +31,7 @@ let }; in stdenv.mkDerivation rec { - version = "16.3.17"; + version = "16.3.23"; pname = "jmol"; src = @@ -40,7 +40,7 @@ stdenv.mkDerivation rec { in fetchurl { url = "mirror://sourceforge/jmol/Jmol/Version%20${baseVersion}/Jmol%20${version}/Jmol-${version}-binary.tar.gz"; - hash = "sha256-5L9+JGVHKwgW9bOX/Xm/fQUOisO7aivWhhBdQaNpzyk="; + hash = "sha256-qZDkEsdl1kxwyavtBv9N8E33WSGJoe82w4v8x8v9SPs="; }; patchPhase = '' From 63ffc1e5833b382d4e1556df15f5a1e60e8aa722 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Sat, 24 May 2025 03:58:02 +0000 Subject: [PATCH 079/220] intel-compute-runtime: 25.13.33276.16 -> 25.18.33578.6 --- pkgs/by-name/in/intel-compute-runtime/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/in/intel-compute-runtime/package.nix b/pkgs/by-name/in/intel-compute-runtime/package.nix index 16b529f39ecc..071715a4b08f 100644 --- a/pkgs/by-name/in/intel-compute-runtime/package.nix +++ b/pkgs/by-name/in/intel-compute-runtime/package.nix @@ -12,13 +12,13 @@ stdenv.mkDerivation rec { pname = "intel-compute-runtime"; - version = "25.13.33276.16"; + version = "25.18.33578.6"; src = fetchFromGitHub { owner = "intel"; repo = "compute-runtime"; tag = version; - hash = "sha256-dGOFWmgPOcSQtpfmYTTPLYeHfwba6gp9nJRF999hybw="; + hash = "sha256-6HJUwoMzd8T9o0dohLiXz2xwtqnUmkFuftIUPqKpy5s="; }; nativeBuildInputs = [ From 1c26c86248131e168c10b9a55ab279cc5c1d0333 Mon Sep 17 00:00:00 2001 From: Jiajie Chen Date: Sat, 24 May 2025 12:58:36 +0800 Subject: [PATCH 080/220] cbmc: 6.4.1 -> 6.6.0 --- pkgs/by-name/cb/cbmc/package.nix | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pkgs/by-name/cb/cbmc/package.nix b/pkgs/by-name/cb/cbmc/package.nix index 9f43b14131de..b154d6cc9a82 100644 --- a/pkgs/by-name/cb/cbmc/package.nix +++ b/pkgs/by-name/cb/cbmc/package.nix @@ -17,13 +17,13 @@ stdenv.mkDerivation (finalAttrs: { pname = "cbmc"; - version = "6.4.1"; + version = "6.6.0"; src = fetchFromGitHub { owner = "diffblue"; repo = "cbmc"; tag = "cbmc-${finalAttrs.version}"; - hash = "sha256-O8aZTW+Eylshl9bmm9GzbljWB0+cj2liZHs2uScERkM="; + hash = "sha256-ot0vVBgiSVru/RE7KeyTsXzDfs0CSa5vaFsON+PCZZo="; }; srcglucose = fetchFromGitHub { @@ -51,12 +51,6 @@ stdenv.mkDerivation (finalAttrs: { cudd = cudd.src; }) ./0002-Do-not-download-sources-in-cmake.patch - # Fixes build with libc++ >= 19 due to the removal of std::char_traits. - # Remove for versions > 6.4.1. - (fetchpatch { - url = "https://github.com/diffblue/cbmc/commit/684bf4221c8737952e6469304f5a360dc3d5439d.patch"; - hash = "sha256-3hHu6FcyHjfeFjNxhyhxxk7I/SK98BXT+xy7NgtEt50="; - }) ]; postPatch = From ec885ec960f4c9550439e6671dd608635d044c75 Mon Sep 17 00:00:00 2001 From: Andrew Bruce Date: Sat, 24 May 2025 12:08:24 +0100 Subject: [PATCH 081/220] s3proxy: 2.1.0 -> 2.6.0 --- pkgs/by-name/s3/s3proxy/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/s3/s3proxy/package.nix b/pkgs/by-name/s3/s3proxy/package.nix index b2281b7f2ad9..011db530cd00 100644 --- a/pkgs/by-name/s3/s3proxy/package.nix +++ b/pkgs/by-name/s3/s3proxy/package.nix @@ -9,17 +9,17 @@ let pname = "s3proxy"; - version = "2.1.0"; + version = "2.6.0"; in maven.buildMavenPackage { inherit pname version; - mvnHash = "sha256-85mE/pZ0DXkzOKvTAqBXGatAt8gc4VPRCxmEyIlyVGI="; + mvnHash = "sha256-OCFs1Q4NL5heP8AVvkQ+ZdhmPD2SNZMCF2gxjXpbfW4="; src = fetchFromGitHub { owner = "gaul"; repo = pname; rev = "s3proxy-${version}"; - hash = "sha256-GhZPvo8wlXInHwg8rSmpwMMkZVw5SMpnZyKqFUYLbrE="; + hash = "sha256-wd3GdSAcoJvlyFqnccdhM83IY2Q7KJQHoyV+sQGEwo4="; }; doCheck = !stdenv.hostPlatform.isDarwin; From d290f7530628518363d84882c84a514cca288631 Mon Sep 17 00:00:00 2001 From: John Garcia Date: Sat, 24 May 2025 13:30:28 +0100 Subject: [PATCH 082/220] decent-sampler: 1.12.5 -> 1.12.14 --- pkgs/by-name/de/decent-sampler/package.nix | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/de/decent-sampler/package.nix b/pkgs/by-name/de/decent-sampler/package.nix index c388cef5aa75..5742b06a596c 100644 --- a/pkgs/by-name/de/decent-sampler/package.nix +++ b/pkgs/by-name/de/decent-sampler/package.nix @@ -14,7 +14,7 @@ let pname = "decent-sampler"; - version = "1.12.5"; + version = "1.12.14"; icon = fetchurl { url = "https://www.decentsamples.com/wp-content/uploads/2018/09/cropped-Favicon_512x512.png"; @@ -26,8 +26,8 @@ let src = fetchzip { # dropbox links: https://www.dropbox.com/sh/dwyry6xpy5uut07/AABBJ84bjTTSQWzXGG5TOQpfa\ - url = "https://www.dropbox.com/scl/fo/a0i0udw7ggfwnjoi05hh3/APOyrCpI3CaO46Gq1IFUv-A/Decent_Sampler-1.12.5-Linux-Static-x86_64.tar.gz?rlkey=orvjprslmwn0dkfs0ncx6nxnm&dl=0"; - hash = "sha256-jr2bl8nQhfWdpZZGQU6T6TDKSW6SZpweJ2GiQz7n9Ug="; + url = "https://www.dropbox.com/scl/fo/a0i0udw7ggfwnjoi05hh3/AFAQQGWSQ-kxJv5JggeMTrE/Decent_Sampler-1.12.14-Linux-Static-x86_64.tar.gz?rlkey=orvjprslmwn0dkfs0ncx6nxnm&dl=0"; + hash = "sha256-n9WTR11chK9oCz84uYhymov1axTVRr4OLo6W0cRpdWc="; }; nativeBuildInputs = [ copyDesktopItems ]; @@ -91,6 +91,7 @@ buildFHSEnv { # It claims to be free but we currently cannot find any license # that it is released under. license = licenses.unfree; + sourceProvenance = with sourceTypes; [ binaryNativeCode ]; platforms = [ "x86_64-linux" ]; maintainers = with maintainers; [ adam248 From 4a3054643c136123715e0214031e2a7e610ec818 Mon Sep 17 00:00:00 2001 From: DontEatOreo <57304299+DontEatOreo@users.noreply.github.com> Date: Sat, 24 May 2025 17:10:04 +0300 Subject: [PATCH 083/220] gallery-dl: 1.29.6 -> 1.29.7 Chnagelog: https://github.com/mikf/gallery-dl/releases/tag/v1.29.7 Diff: https://github.com/mikf/gallery-dl/compare/v1.29.6...v1.29.7 --- pkgs/by-name/ga/gallery-dl/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ga/gallery-dl/package.nix b/pkgs/by-name/ga/gallery-dl/package.nix index d0fbd3fd6571..22470932709d 100644 --- a/pkgs/by-name/ga/gallery-dl/package.nix +++ b/pkgs/by-name/ga/gallery-dl/package.nix @@ -8,7 +8,7 @@ let pname = "gallery-dl"; - version = "1.29.6"; + version = "1.29.7"; in python3Packages.buildPythonApplication { inherit pname version; @@ -18,7 +18,7 @@ python3Packages.buildPythonApplication { owner = "mikf"; repo = "gallery-dl"; tag = "v${version}"; - hash = "sha256-D/HPAnIwCAfwzBrteGkZSMHFvXDPQLF4bHKDwppdkzc="; + hash = "sha256-OngtJ6E7Gvr+/5Vjv1vepPVVksNDRlXZkU9yMYRvh2k="; }; build-system = [ python3Packages.setuptools ]; From 528e669ff3e8db2dd28c65e996603678367b3b67 Mon Sep 17 00:00:00 2001 From: codgician <15964984+codgician@users.noreply.github.com> Date: Sun, 25 May 2025 00:30:44 +0800 Subject: [PATCH 084/220] nixos/open-webui: allow service to access gpu --- nixos/modules/services/misc/open-webui.nix | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/nixos/modules/services/misc/open-webui.nix b/nixos/modules/services/misc/open-webui.nix index a31c6b42d696..521aacd39939 100644 --- a/nixos/modules/services/misc/open-webui.nix +++ b/nixos/modules/services/misc/open-webui.nix @@ -132,6 +132,21 @@ in "@system-service" "~@privileged" ]; + SupplementaryGroups = [ "render" ]; # for rocm to access /dev/dri/renderD* devices + DeviceAllow = [ + # CUDA + # https://docs.nvidia.com/dgx/pdf/dgx-os-5-user-guide.pdf + "char-nvidiactl" + "char-nvidia-caps" + "char-nvidia-frontend" + "char-nvidia-uvm" + # ROCm + "char-drm" + "char-fb" + "char-kfd" + # WSL (Windows Subsystem for Linux) + "/dev/dxg" + ]; }; }; From 43f70b942d7cdc3219ba4273f3acb77ce1877cff Mon Sep 17 00:00:00 2001 From: Kalle Fagerberg Date: Sat, 24 May 2025 18:41:16 +0200 Subject: [PATCH 085/220] kubectl-klock: 0.7.2 -> 0.8.0 --- pkgs/by-name/ku/kubectl-klock/package.nix | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/ku/kubectl-klock/package.nix b/pkgs/by-name/ku/kubectl-klock/package.nix index aae2d47da28d..ba056902714d 100644 --- a/pkgs/by-name/ku/kubectl-klock/package.nix +++ b/pkgs/by-name/ku/kubectl-klock/package.nix @@ -7,18 +7,24 @@ buildGoModule rec { pname = "kubectl-klock"; - version = "0.7.2"; + version = "0.8.0"; nativeBuildInputs = [ makeWrapper ]; src = fetchFromGitHub { owner = "applejag"; - repo = pname; + repo = "kubectl-klock"; rev = "v${version}"; - hash = "sha256-S7cpVRVboLkU+GgvwozJmfFAO29tKpPlk+r9mbVLxF8="; + hash = "sha256-1t/DJ6cTikAl2edJFfDzXAB8OgdZSjk1C7vOGXyTu0U="; }; - vendorHash = "sha256-xz1I79FklKNpWdoQdzpXYAnKM+7FJcGn04lKH2E9A50="; + ldflags = [ + "-s" + "-w" + "-X main.version=${version}" + ]; + + vendorHash = "sha256-FWfAn3ZWScIXbdv3zwwZxFyMkpzJHZJuhxe22qvv1ac="; postInstall = '' makeWrapper $out/bin/kubectl-klock $out/bin/kubectl_complete-klock --add-flags __complete From 1865bce126f9c921fa2e33243d43a66839d5660a Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Sat, 24 May 2025 18:54:34 +0000 Subject: [PATCH 086/220] runitor: 1.3.0 -> 1.4.0 --- pkgs/by-name/ru/runitor/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ru/runitor/package.nix b/pkgs/by-name/ru/runitor/package.nix index b5e157aca970..afcc7c661565 100644 --- a/pkgs/by-name/ru/runitor/package.nix +++ b/pkgs/by-name/ru/runitor/package.nix @@ -8,14 +8,14 @@ buildGoModule rec { pname = "runitor"; - version = "1.3.0"; - vendorHash = null; + version = "1.4.0"; + vendorHash = "sha256-SYYAAtuWt/mTmZPBilYxf2uZ6OcgeTnobYiye47i8mI="; src = fetchFromGitHub { owner = "bdd"; repo = "runitor"; rev = "v${version}"; - sha256 = "sha256-9sg+ku3Qh/X/EZ2VCrvIc0pq5iyn4O8RZrO4KpkciAI="; + sha256 = "sha256-eD8bJ34ZfTPToQrZ8kZGcSBdMmmCwRtuXgwZmz15O3s="; }; ldflags = [ From eb9e26b2dcf8ed273452663ce8f7c83873a6df8a Mon Sep 17 00:00:00 2001 From: Sarah Clark Date: Sat, 24 May 2025 12:29:59 -0700 Subject: [PATCH 087/220] python3Packages.typedunits: Disable fractional rounding test on aarch64 see https://github.com/quantumlib/TypedUnits/issues/14 --- pkgs/development/python-modules/typedunits/default.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/development/python-modules/typedunits/default.nix b/pkgs/development/python-modules/typedunits/default.nix index 3f0ed6e1d4ac..941db335c3e0 100644 --- a/pkgs/development/python-modules/typedunits/default.nix +++ b/pkgs/development/python-modules/typedunits/default.nix @@ -42,7 +42,7 @@ buildPythonPackage { pytestCheckHook ]; - disabledTests = lib.optionals stdenv.hostPlatform.isDarwin [ + disabledTests = lib.optionals stdenv.hostPlatform.isAarch [ # Rounding differences "test_float_to_twelths_frac" ]; From 0d249125d13f3c4cf07a79a9a06790d7f4d73327 Mon Sep 17 00:00:00 2001 From: backfire-monism-net Date: Sat, 24 May 2025 14:05:00 -0700 Subject: [PATCH 088/220] majima: 0.5.0 -> 0.5.1 --- pkgs/by-name/ma/majima/package.nix | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkgs/by-name/ma/majima/package.nix b/pkgs/by-name/ma/majima/package.nix index 69b3d37a5e5a..67ef20d5c259 100644 --- a/pkgs/by-name/ma/majima/package.nix +++ b/pkgs/by-name/ma/majima/package.nix @@ -5,17 +5,17 @@ }: rustPlatform.buildRustPackage { pname = "majima"; - version = "0.5.0"; + version = "0.5.1"; src = fetchFromSourcehut { - owner = "~cucumber-zoom"; + owner = "~wq"; repo = "majima"; - rev = "0f32dceeaf09c082cf33ab31b40d3bfc45aaa6f8"; - hash = "sha256-P5E0Wiy3mNPRCQ/bsIW4fG7LnPSPRXmW7pnbgl0/lBQ="; + rev = "630427fcd158ccbaafe8bc3f7368fa8577b03548"; + hash = "sha256-znlJY/U7H+BvBM7n4IqE5x9ek1/QVxYkptsAnODz/Q0="; }; useFetchCargoVendor = true; - cargoHash = "sha256-bb3rg7vWRBSOPf0LM7avQQNlMjLiLFRtrGIfJbFWtHI="; + cargoHash = "sha256-I0txA41rmTZ3AHllRVsJzmZXbrm5+GSdd08EatxKCzk="; meta = { description = "Generate random usernames quickly and in various formats"; From 0e1d8e94b0ce934457d515bfec3666cf486efb8d Mon Sep 17 00:00:00 2001 From: Raf Gemmail Date: Sun, 25 May 2025 13:14:50 +1200 Subject: [PATCH 089/220] matrix-brandy: 1.23.3 -> 1.23.5, add updateScript --- pkgs/by-name/ma/matrix-brandy/package.nix | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ma/matrix-brandy/package.nix b/pkgs/by-name/ma/matrix-brandy/package.nix index bf9d518783c3..de9247c2fa20 100644 --- a/pkgs/by-name/ma/matrix-brandy/package.nix +++ b/pkgs/by-name/ma/matrix-brandy/package.nix @@ -3,18 +3,19 @@ stdenv, fetchFromGitHub, libX11, + nix-update-script, SDL, }: stdenv.mkDerivation rec { pname = "matrix-brandy"; - version = "1.23.3"; + version = "1.23.5"; src = fetchFromGitHub { owner = "stardot"; repo = "MatrixBrandy"; rev = "V${version}"; - hash = "sha256-jw5SxCQ2flvCjO/JO3BHpnpt31wBsBxDkVH7uwVxTS0="; + hash = "sha256-sMgYgV4/vV1x5xSICXRpW6K8uCdVlJrS7iEg6XzQRo8="; }; buildInputs = [ @@ -27,6 +28,8 @@ stdenv.mkDerivation rec { cp brandy $out/bin ''; + passthru.updateScript = nix-update-script { }; + meta = with lib; { homepage = "http://brandy.matrixnetwork.co.uk/"; description = "Matrix Brandy BASIC VI for Linux, Windows, MacOSX"; From 0b4d19ccdded19dbde2a8a36251b184157d1e37c Mon Sep 17 00:00:00 2001 From: Raf Gemmail Date: Sun, 25 May 2025 13:17:20 +1200 Subject: [PATCH 090/220] matrix-brandy: add darwin support --- pkgs/by-name/ma/matrix-brandy/no-lrt.patch | 13 +++++++++++++ pkgs/by-name/ma/matrix-brandy/package.nix | 9 ++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 pkgs/by-name/ma/matrix-brandy/no-lrt.patch diff --git a/pkgs/by-name/ma/matrix-brandy/no-lrt.patch b/pkgs/by-name/ma/matrix-brandy/no-lrt.patch new file mode 100644 index 000000000000..7f42553852a9 --- /dev/null +++ b/pkgs/by-name/ma/matrix-brandy/no-lrt.patch @@ -0,0 +1,13 @@ +diff --git a/makefile b/makefile +index d89cee1..37c1ac5 100644 +--- a/makefile ++++ b/makefile +@@ -13,7 +13,7 @@ CFLAGS = -O3 -fPIE $(shell sdl-config --cflags) -DUSE_SDL -DDEFAULT_IGNORE -Wall + + LDFLAGS += + +-LIBS = -lm $(shell sdl-config --libs) -ldl -pthread -lrt -lX11 ++LIBS = -lm $(shell sdl-config --libs) -ldl -pthread -lX11 + + SRCDIR = src + diff --git a/pkgs/by-name/ma/matrix-brandy/package.nix b/pkgs/by-name/ma/matrix-brandy/package.nix index de9247c2fa20..2adc72a2e897 100644 --- a/pkgs/by-name/ma/matrix-brandy/package.nix +++ b/pkgs/by-name/ma/matrix-brandy/package.nix @@ -18,6 +18,13 @@ stdenv.mkDerivation rec { hash = "sha256-sMgYgV4/vV1x5xSICXRpW6K8uCdVlJrS7iEg6XzQRo8="; }; + patches = lib.optionals stdenv.isDarwin [ ./no-lrt.patch ]; + + makeFlags = lib.optionals stdenv.isDarwin [ + "CC=cc" + "LD=clang" + ]; + buildInputs = [ libX11 SDL @@ -35,7 +42,7 @@ stdenv.mkDerivation rec { description = "Matrix Brandy BASIC VI for Linux, Windows, MacOSX"; mainProgram = "brandy"; license = licenses.gpl2Plus; - platforms = platforms.linux; + platforms = platforms.unix; maintainers = with maintainers; [ fiq ]; }; } From 147e08f8a1f5acd56d425e9684bee8e6bfb87279 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20M=C3=A9meint?= Date: Sun, 25 May 2025 07:44:07 +0200 Subject: [PATCH 091/220] authelia: 4.39.3 -> 4.39.4 --- pkgs/servers/authelia/sources.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkgs/servers/authelia/sources.nix b/pkgs/servers/authelia/sources.nix index 61a553705af0..a12804d8abfc 100644 --- a/pkgs/servers/authelia/sources.nix +++ b/pkgs/servers/authelia/sources.nix @@ -1,14 +1,14 @@ { fetchFromGitHub }: rec { pname = "authelia"; - version = "4.39.3"; + version = "4.39.4"; src = fetchFromGitHub { owner = "authelia"; repo = "authelia"; rev = "v${version}"; - hash = "sha256-HBkHN7c8O07b2ZI6R7KFvdBF5GWuYU6rmisxLMSH5EQ="; + hash = "sha256-OIf7Q84uWk2q+lTBQNHHO11QEl7FBGv2uNx+g2GNHE0="; }; - vendorHash = "sha256-2wJvX6jAjU9iaFMIcC5Qm1agRMPv4fFfsCeTkvXSpYs="; - pnpmDepsHash = "sha256-uy6uKfZpsFEl2n6zOriRsKwlw3av1f0xBF/CwhWLJMU="; + vendorHash = "sha256-Vndkts5e3NSdtTk3rVZSjfuGuafQ3eswoSLLFspXTIw="; + pnpmDepsHash = "sha256-hA9STLJbFw5pFHx2Wi3X6JFsTvHzCMFVS3HEJApQ9zM="; } From 51d7608330282b3bb92d68eeb8e9354833921782 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Sun, 25 May 2025 08:02:47 +0000 Subject: [PATCH 092/220] svdtools: 0.4.6 -> 0.5.0 --- pkgs/by-name/sv/svdtools/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/sv/svdtools/package.nix b/pkgs/by-name/sv/svdtools/package.nix index 86f0059cca56..313d6bbdd3c2 100644 --- a/pkgs/by-name/sv/svdtools/package.nix +++ b/pkgs/by-name/sv/svdtools/package.nix @@ -6,15 +6,15 @@ rustPlatform.buildRustPackage rec { pname = "svdtools"; - version = "0.4.6"; + version = "0.5.0"; src = fetchCrate { inherit version pname; - hash = "sha256-AfRFtybGEpArLGKp4AkGlokfNFMK8Ez5VA5Fu5GUhRI="; + hash = "sha256-2GemBVTRvYC5bvlYgJKmDJM78ZoE63B1QwV8cfSHYPg="; }; useFetchCargoVendor = true; - cargoHash = "sha256-0GR9pbrevb0USu8de1oFHePJH1hGTvcVh3Gc9WKP0uA="; + cargoHash = "sha256-sn+Z3/p4Ek/wxwTj6uwDBFP1hFNGDb2EZ7MO0zvPjPk="; meta = with lib; { description = "Tools to handle vendor-supplied, often buggy SVD files"; From 99887950106c637aeb8197d2295409d1932248de Mon Sep 17 00:00:00 2001 From: Weijia Wang <9713184+wegank@users.noreply.github.com> Date: Tue, 29 Apr 2025 02:18:21 +0200 Subject: [PATCH 093/220] lib.systems.loongarch64-multiplatform: init --- lib/systems/platforms.nix | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/systems/platforms.nix b/lib/systems/platforms.nix index 8e3f3621698b..9c9010b9fb46 100644 --- a/lib/systems/platforms.nix +++ b/lib/systems/platforms.nix @@ -583,6 +583,14 @@ rec { # https://github.com/llvm/llvm-project/pull/132173 cmodel = "medium"; }; + linux-kernel = { + name = "loongarch-multiplatform"; + target = "vmlinuz.efi"; + autoModules = true; + preferBuiltin = true; + baseConfig = "defconfig"; + DTB = true; + }; }; # This function takes a minimally-valid "platform" and returns an @@ -611,6 +619,9 @@ rec { else if platform.isAarch64 then if platform.isDarwin then apple-m1 else aarch64-multiplatform + else if platform.isLoongArch64 then + loongarch64-multiplatform + else if platform.isRiscV then riscv-multiplatform From 76e2bfa1dc649ee41637a17cafe1bbbf5f0fb9e6 Mon Sep 17 00:00:00 2001 From: Ryan Horiguchi Date: Sun, 25 May 2025 16:33:01 +0200 Subject: [PATCH 094/220] prowlarr: 1.35.1.5034 -> 1.36.3.5071 --- pkgs/by-name/pr/prowlarr/package.nix | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkgs/by-name/pr/prowlarr/package.nix b/pkgs/by-name/pr/prowlarr/package.nix index 85180da593bf..2368d0d7eea7 100644 --- a/pkgs/by-name/pr/prowlarr/package.nix +++ b/pkgs/by-name/pr/prowlarr/package.nix @@ -37,16 +37,16 @@ let hash = { - aarch64-darwin = "sha256-zHaYgR9UXKnyikt5z3UUmh6oTNoNv5mqtnAId9kCzgE="; - aarch64-linux = "sha256-xChYbzs4zTWe71xwn3TfUZHGTOHHjzDwbkzPgQEfQZk="; - x86_64-darwin = "sha256-mual72UuFh9h7Hc/hHqSUeFInwm09f+UptX8jqknXlg="; - x86_64-linux = "sha256-UCiHeg46ncRQA6rDW4uxoFRqC7cuwMdr33OrN7yj51o="; + aarch64-darwin = "sha256-IkFkQoEPVaV+eVp2DkZECXTkzJyyNYTUBsCBdXCBZC8="; + aarch64-linux = "sha256-uwg5Ec9MC6jLwNdauF1tj2gSkhWdyhvWnUTLt8P1OZw="; + x86_64-darwin = "sha256-mdDZvKyhKXnHEKvZRH8Di6dZP80AEktnkMOnIZW+Gik="; + x86_64-linux = "sha256-N0KDb6MsGAJKSh5GSm7aiamjflHRXb06fL1KM2T1+bg="; } .${stdenv.hostPlatform.system} or unsupported; in stdenv.mkDerivation rec { inherit pname; - version = "1.35.1.5034"; + version = "1.36.3.5071"; src = fetchurl { url = "https://github.com/Prowlarr/Prowlarr/releases/download/v${version}/Prowlarr.master.${version}.${os}-core-${arch}.tar.gz"; From dc0d03c468acd956fe76f4d7999e89da56efc71f Mon Sep 17 00:00:00 2001 From: OPNA2608 Date: Thu, 15 May 2025 13:21:44 +0200 Subject: [PATCH 095/220] lomiri.lomiri-music-app: 3.2.2 -> 3.3.0 --- .../applications/lomiri-music-app/default.nix | 27 +++++-------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix b/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix index 3a1dcf319dff..c80eebf96503 100644 --- a/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix +++ b/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix @@ -2,7 +2,6 @@ stdenv, lib, fetchFromGitLab, - fetchpatch, gitUpdater, nixosTests, cmake, @@ -22,31 +21,15 @@ stdenv.mkDerivation (finalAttrs: { pname = "lomiri-music-app"; - version = "3.2.2"; + version = "3.3.0"; src = fetchFromGitLab { owner = "ubports"; repo = "development/apps/lomiri-music-app"; - rev = "refs/tags/v${finalAttrs.version}"; - hash = "sha256-tHCbZF+7i/gYs8WqM5jDBhhKmM4ZeUbG3DYBdQAiUT8="; + tag = "v${finalAttrs.version}"; + hash = "sha256-lCpRt0SeNszlCsmJOZvnzoDmHV7xCGKdmIZBJTlBQDo="; }; - patches = [ - # Remove when version > 3.2.2 - (fetchpatch { - name = "0001-lomiri-music-app-Fix-GNUInstallDirs-usage.patch"; - url = "https://gitlab.com/ubports/development/apps/lomiri-music-app/-/commit/32591f2332aa204b9ee2857992e50594db0e6ff2.patch"; - hash = "sha256-SXn+7jItzi1Q0xK0iK57+W3SpEdSCx1dKSfeghOCePg="; - }) - - # Remove when version > 3.2.2 - (fetchpatch { - name = "0002-lomiri-music-app-bindtextdomain.patch"; - url = "https://gitlab.com/ubports/development/apps/lomiri-music-app/-/commit/4e950521a67e201f3d02b3b71c6bb1ddce8ef2b2.patch"; - hash = "sha256-HgGKk44FU+IXRVx2NK3iGSo/wPJce1T2k/vP8nZtewQ="; - }) - ]; - postPatch = '' # We don't want absolute paths in desktop files substituteInPlace CMakeLists.txt \ @@ -113,7 +96,9 @@ stdenv.mkDerivation (finalAttrs: { meta = { description = "Default Music application for Ubuntu devices"; homepage = "https://gitlab.com/ubports/development/apps/lomiri-music-app"; - changelog = "https://gitlab.com/ubports/development/apps/lomiri-music-app/-/blob/v${finalAttrs.version}/ChangeLog"; + changelog = "https://gitlab.com/ubports/development/apps/lomiri-music-app/-/blob/${ + if (!builtins.isNull finalAttrs.src.tag) then finalAttrs.src.tag else finalAttrs.src.rev + }/ChangeLog"; license = with lib.licenses; [ gpl3Only ]; From 4e3b940de08bf4bcac71a1b8aa0d80c236379452 Mon Sep 17 00:00:00 2001 From: OPNA2608 Date: Sun, 25 May 2025 17:28:59 +0200 Subject: [PATCH 096/220] nixosTests.lomiri-music-app: Optimise OCR --- nixos/tests/lomiri-music-app.nix | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/nixos/tests/lomiri-music-app.nix b/nixos/tests/lomiri-music-app.nix index 87722db239d7..9efff85fee20 100644 --- a/nixos/tests/lomiri-music-app.nix +++ b/nixos/tests/lomiri-music-app.nix @@ -1,7 +1,8 @@ { lib, ... }: let ocrContent = "Music Test"; - musicFile = "test.mp3"; + musicFileName = "Example"; + musicFile = "${musicFileName}.mp3"; ocrPauseColor = "#FF00FF"; ocrStartColor = "#00FFFF"; @@ -136,6 +137,7 @@ in with subtest("lomiri music launches"): machine.succeed("lomiri-music-app >&2 &") + machine.wait_for_console_text("Queue is empty") machine.sleep(10) machine.send_key("alt-f10") machine.sleep(2) @@ -144,11 +146,14 @@ in with subtest("lomiri music plays music"): machine.succeed("xdotool mousemove 30 720 click 1") # Skip intro + machine.sleep(2) machine.wait_for_text("Albums") machine.succeed("xdotool mousemove 25 45 click 1") # Open categories + machine.sleep(2) machine.wait_for_text("Tracks") machine.succeed("xdotool mousemove 25 240 click 1") # Switch to Tracks category - machine.wait_for_text("test") # the test file + machine.sleep(2) + machine.wait_for_text("${musicFileName}") # the test file machine.screenshot("lomiri-music_listing") # Ensure pause colours isn't present already @@ -185,6 +190,7 @@ in with subtest("lomiri music localisation works"): machine.succeed("env LANG=de_DE.UTF-8 lomiri-music-app .mp4 >&2 &") + machine.wait_for_console_text("Restoring library queue") machine.sleep(10) machine.send_key("alt-f10") machine.sleep(2) From b05fd9403e3cbc45817ddac8d0d452e686d388e9 Mon Sep 17 00:00:00 2001 From: OPNA2608 Date: Sun, 25 May 2025 17:31:59 +0200 Subject: [PATCH 097/220] lomiri.lomiri-music-app: Satisfy MediaScanner2 QML dependency Instead of relying on Lomiri to have installed it & put it on QML2_IMPORT_PATH. --- pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix b/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix index c80eebf96503..34654e838795 100644 --- a/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix +++ b/pkgs/desktops/lomiri/applications/lomiri-music-app/default.nix @@ -11,6 +11,7 @@ lomiri-content-hub, lomiri-thumbnailer, lomiri-ui-toolkit, + mediascanner2, qtbase, qtdeclarative, qtmultimedia, @@ -56,6 +57,7 @@ stdenv.mkDerivation (finalAttrs: { lomiri-content-hub lomiri-thumbnailer lomiri-ui-toolkit + mediascanner2 qtmultimedia qtsystems ] From f62576650efce1b435048ed8ab0ea5a86e80ee15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Schr=C3=B6ter?= Date: Sun, 25 May 2025 19:05:39 +0200 Subject: [PATCH 098/220] protoc-gen-es: 2.2.5 -> 2.5.0 https://github.com/bufbuild/protobuf-es/releases/tag/v2.5.0 --- pkgs/by-name/pr/protoc-gen-es/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/pr/protoc-gen-es/package.nix b/pkgs/by-name/pr/protoc-gen-es/package.nix index bffd37a9b187..ea38a71b109b 100644 --- a/pkgs/by-name/pr/protoc-gen-es/package.nix +++ b/pkgs/by-name/pr/protoc-gen-es/package.nix @@ -7,20 +7,20 @@ buildNpmPackage rec { pname = "protoc-gen-es"; - version = "2.2.5"; + version = "2.5.0"; src = fetchFromGitHub { owner = "bufbuild"; repo = "protobuf-es"; tag = "v${version}"; - hash = "sha256-7g7DZSSFyidgpWJQNuKQRpyDuCDQT6gGgIKNk1JsbEk="; + hash = "sha256-wllLeX7veCbpb/mTAIs0cf/hcVItEmw2HW/UR4k0Epc="; postFetch = '' ${lib.getExe npm-lockfile-fix} $out/package-lock.json ''; }; - npmDepsHash = "sha256-NclrKsBBHlcYIgWf0bEq7xI3pUq2RvZ+0Ebj77ICars="; + npmDepsHash = "sha256-PGieFyPgb2ERTdQc3HH5mg/uh5xj7nkUa0qwmgxofVY="; npmWorkspace = "packages/protoc-gen-es"; From 2bc665802e0c7fc35c182844987931a4dca81fec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=BCtz?= Date: Sun, 25 May 2025 19:07:55 -0700 Subject: [PATCH 099/220] python3Packages.caldav: 1.4.0 -> 1.5.0 Diff: https://github.com/python-caldav/caldav/compare/refs/tags/v1.4.0...refs/tags/v1.5.0 Changelog: https://github.com/python-caldav/caldav/blob/v1.5.0/CHANGELOG.md --- pkgs/development/python-modules/caldav/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/caldav/default.nix b/pkgs/development/python-modules/caldav/default.nix index 76ffc259e48f..1343bae47695 100644 --- a/pkgs/development/python-modules/caldav/default.nix +++ b/pkgs/development/python-modules/caldav/default.nix @@ -19,14 +19,14 @@ buildPythonPackage rec { pname = "caldav"; - version = "1.4.0"; + version = "1.5.0"; pyproject = true; src = fetchFromGitHub { owner = "python-caldav"; repo = "caldav"; tag = "v${version}"; - hash = "sha256-rixhEIcl37ZIiYFOnJY0Ww75xZy3o/436JcgLmoOGi0="; + hash = "sha256-SYjfQG4muuBcnVeu9cl00Zb2fGUhw157LLxA5/N5EJ0="; }; build-system = [ From 3b9b6f3487470397f0db9d48c30f3891867e963f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=BCtz?= Date: Sun, 25 May 2025 19:27:33 -0700 Subject: [PATCH 100/220] python3Packages.pytibber: 0.31.2 -> 0.31.4 Diff: https://github.com/Danielhiversen/pyTibber/compare/refs/tags/0.31.2...refs/tags/0.31.4 Changelog: https://github.com/Danielhiversen/pyTibber/releases/tag/0.31.3 --- pkgs/development/python-modules/pytibber/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/development/python-modules/pytibber/default.nix b/pkgs/development/python-modules/pytibber/default.nix index 398840a3fea2..813b8ff67a6e 100644 --- a/pkgs/development/python-modules/pytibber/default.nix +++ b/pkgs/development/python-modules/pytibber/default.nix @@ -13,7 +13,7 @@ buildPythonPackage rec { pname = "pytibber"; - version = "0.31.2"; + version = "0.31.4"; pyproject = true; disabled = pythonOlder "3.11"; @@ -22,7 +22,7 @@ buildPythonPackage rec { owner = "Danielhiversen"; repo = "pyTibber"; tag = version; - hash = "sha256-/k9XnRgfaE59+Fi/5AhCJt4/jFLdYeDCNaoGCDYGWso="; + hash = "sha256-VaVSFBylLKHmgmjl6riI7d+Ddgg/4F7Caei9xZIDS/Y="; }; build-system = [ setuptools ]; @@ -47,7 +47,7 @@ buildPythonPackage rec { description = "Python library to communicate with Tibber"; homepage = "https://github.com/Danielhiversen/pyTibber"; changelog = "https://github.com/Danielhiversen/pyTibber/releases/tag/${src.tag}"; - license = lib.licenses.mit; + license = lib.licenses.gpl3Plus; maintainers = with lib.maintainers; [ dotlambda ]; }; } From 3f70c06787a1e6d9685921d37000b982d4318be2 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 05:41:53 +0000 Subject: [PATCH 101/220] python3Packages.globus-sdk: 3.56.0 -> 3.56.1 --- pkgs/development/python-modules/globus-sdk/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/globus-sdk/default.nix b/pkgs/development/python-modules/globus-sdk/default.nix index 35d0ee7a959c..2f3eab1ca574 100644 --- a/pkgs/development/python-modules/globus-sdk/default.nix +++ b/pkgs/development/python-modules/globus-sdk/default.nix @@ -15,7 +15,7 @@ buildPythonPackage rec { pname = "globus-sdk"; - version = "3.56.0"; + version = "3.56.1"; pyproject = true; disabled = pythonOlder "3.7"; @@ -24,7 +24,7 @@ buildPythonPackage rec { owner = "globus"; repo = "globus-sdk-python"; tag = version; - hash = "sha256-/Ft4vyZEZpvmpBefBGdTXNr+bZFzF9WJso/kKFST1IY="; + hash = "sha256-M7ZOtj8zekKrouiipOafKBQP/EhPY4hGODXAovBF5ew="; }; build-system = [ setuptools ]; From 7e55141d371a4fdd945871d710b742dc2c12286b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=BCtz?= Date: Sun, 25 May 2025 23:32:08 -0700 Subject: [PATCH 102/220] python3Packages.azure-eventhub: add updateScript --- pkgs/development/python-modules/azure-eventhub/default.nix | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkgs/development/python-modules/azure-eventhub/default.nix b/pkgs/development/python-modules/azure-eventhub/default.nix index 5f4c8615ddf8..1cfda2439a9e 100644 --- a/pkgs/development/python-modules/azure-eventhub/default.nix +++ b/pkgs/development/python-modules/azure-eventhub/default.nix @@ -3,6 +3,7 @@ azure-core, buildPythonPackage, fetchFromGitHub, + gitUpdater, setuptools, typing-extensions, }: @@ -36,6 +37,10 @@ buildPythonPackage rec { "azure.eventhub.aio" ]; + passthru = { + updateScript = gitUpdater { rev-prefix = "azure.eventhub."; }; + }; + meta = with lib; { description = "Microsoft Azure Event Hubs Client Library for Python"; homepage = "https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhub"; From 7f9ecacfe688b261152377c64dd9fa31332d5bb1 Mon Sep 17 00:00:00 2001 From: Grimmauld Date: Mon, 26 May 2025 10:17:51 +0200 Subject: [PATCH 103/220] libsForQt5.kjs: remove pcre dependency pcre is actually still in use by kjs. Removing it runs the risk of subtle breakages, see also the upstream notice [1]. However, `pcre` 1.x is insecure and unmaintained and should be removed. With plasma 5 being scheduled for removal, the breakages caused by removing pcre from kjs should be non-critical. [1] https://invent.kde.org/frameworks/kjs/-/blob/3c663ad8ac16f8982784a5ebd5d9200e7aa07936/CMakeLists.txt#L36-46 --- pkgs/development/libraries/kde-frameworks/kjs.nix | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pkgs/development/libraries/kde-frameworks/kjs.nix b/pkgs/development/libraries/kde-frameworks/kjs.nix index 83ed383345e6..681748d86e70 100644 --- a/pkgs/development/libraries/kde-frameworks/kjs.nix +++ b/pkgs/development/libraries/kde-frameworks/kjs.nix @@ -1,8 +1,8 @@ { + lib, mkDerivation, extra-cmake-modules, kdoctools, - pcre, qtbase, }: @@ -13,7 +13,12 @@ mkDerivation { kdoctools ]; buildInputs = [ - pcre qtbase ]; + cmakeFlags = [ + # this can break stuff, see: + # https://invent.kde.org/frameworks/kjs/-/blob/3c663ad8ac16f8982784a5ebd5d9200e7aa07936/CMakeLists.txt#L36-46 + # However: It shouldn't break much considering plasma 5 is planned to be removed. + (lib.cmakeBool "KJS_FORCE_DISABLE_PCRE" true) + ]; } From 1d1e72f7e93947e0bdd7b564448772a04f3d19a9 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 10:37:32 +0000 Subject: [PATCH 104/220] ed-odyssey-materials-helper: 2.173 -> 2.178 --- pkgs/by-name/ed/ed-odyssey-materials-helper/deps.json | 8 ++++---- pkgs/by-name/ed/ed-odyssey-materials-helper/package.nix | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkgs/by-name/ed/ed-odyssey-materials-helper/deps.json b/pkgs/by-name/ed/ed-odyssey-materials-helper/deps.json index cc345d6abf3f..8f96bfec3d6f 100644 --- a/pkgs/by-name/ed/ed-odyssey-materials-helper/deps.json +++ b/pkgs/by-name/ed/ed-odyssey-materials-helper/deps.json @@ -493,10 +493,10 @@ "module": "sha256-rwV/vBEyR6Pp/cYOWU+dh2xPW8oZy4sb2myBGP9ixpU=", "pom": "sha256-EeldzI+ywwumAH/f9GxW+HF2/lwwLFGEQThZEk1Tq60=" }, - "io/sentry#sentry/8.11.1": { - "jar": "sha256-0EmSqkQXOQazcYAmpRyUMXDc663czsRTtszYAdGuZkg=", - "module": "sha256-x4i43VQ1Avv5hy7X11gvLfBPZwEzEoWb0fgun5sqgRM=", - "pom": "sha256-Fcd/SfMLh3uTBDq5O05T5KlFDlXxgWz+++/2fd47X2c=" + "io/sentry#sentry/8.12.0": { + "jar": "sha256-LkfktB4/El/cgKUS1fUaA5DRD9haWRHR+A5im7p3AAQ=", + "module": "sha256-LrKRmE4DJppwx0nCTSBwZHF9Rw8ex1lAD96birtXim4=", + "pom": "sha256-pwlifFbJHCooUNMFXPWWFTk+TCEMcMojMEpifX9SAlg=" }, "jakarta/json/bind#jakarta.json.bind-api/2.0.0": { "jar": "sha256-peYGtYiLQStIkHrWiLNN/k4wroGJxvJ8wEkbjzwDYoc=", diff --git a/pkgs/by-name/ed/ed-odyssey-materials-helper/package.nix b/pkgs/by-name/ed/ed-odyssey-materials-helper/package.nix index 0605d65747e1..f24b30fc7263 100644 --- a/pkgs/by-name/ed/ed-odyssey-materials-helper/package.nix +++ b/pkgs/by-name/ed/ed-odyssey-materials-helper/package.nix @@ -16,13 +16,13 @@ }: stdenv.mkDerivation rec { pname = "ed-odyssey-materials-helper"; - version = "2.173"; + version = "2.178"; src = fetchFromGitHub { owner = "jixxed"; repo = "ed-odyssey-materials-helper"; tag = version; - hash = "sha256-PW5AnplciFenupASEqXA7NqQrH14Wfz1SSm1c/LWA7A="; + hash = "sha256-a/nrRw5FjUZBJE0CmSevGAw4LBI/A3jPAEJfg7GY5+U="; }; nativeBuildInputs = [ From fe03f72e318805a65d1365b55ab956d9636a2f6a Mon Sep 17 00:00:00 2001 From: Markus Kowalewski Date: Mon, 26 May 2025 13:29:23 +0200 Subject: [PATCH 105/220] gromacs: 2025.1 -> 2025.2 --- .../science/molecular-dynamics/gromacs/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/applications/science/molecular-dynamics/gromacs/default.nix b/pkgs/applications/science/molecular-dynamics/gromacs/default.nix index b1423b30a846..d8bc2a855598 100644 --- a/pkgs/applications/science/molecular-dynamics/gromacs/default.nix +++ b/pkgs/applications/science/molecular-dynamics/gromacs/default.nix @@ -53,8 +53,8 @@ let } else { - version = "2025.1"; - hash = "sha256-Ct9iGoD9gEP43v7ITOAoEcDN9CoFIjKJCTLYHyXE0oo="; + version = "2025.2"; + hash = "sha256-DfCfnUWpnvAOZrm6qUk6J+kGgTdjo7bHZyIXxmtD6hE="; }; in From f34483be5ee2418a563545a56743b7b59c549935 Mon Sep 17 00:00:00 2001 From: Sizhe Zhao Date: Sat, 24 May 2025 23:40:23 +0800 Subject: [PATCH 106/220] nixosTests: handleTest -> runTest, batch 1 Reference: https://github.com/NixOS/nixpkgs/issues/386873 --- nixos/tests/all-tests.nix | 1080 ++++++++--------- nixos/tests/ax25.nix | 225 ++-- nixos/tests/benchexec.nix | 118 +- nixos/tests/bitcoind.nix | 100 +- nixos/tests/bittorrent.nix | 336 +++-- nixos/tests/blockbook-frontend.nix | 52 +- nixos/tests/boot-stage1.nix | 348 +++--- nixos/tests/boot-stage2.nix | 128 +- nixos/tests/borgbackup.nix | 488 ++++---- nixos/tests/borgmatic.nix | 48 +- nixos/tests/bpftune.nix | 38 +- nixos/tests/breitbandmessung.nix | 70 +- nixos/tests/brscan5.nix | 80 +- nixos/tests/btrbk-doas.nix | 214 ++-- nixos/tests/btrbk-no-timer.nix | 66 +- nixos/tests/btrbk-section-order.nix | 90 +- nixos/tests/btrbk.nix | 200 ++- nixos/tests/budgie.nix | 174 ++- nixos/tests/buildkite-agents.nix | 50 +- nixos/tests/c2fmzq.nix | 148 ++- nixos/tests/cage.nix | 70 +- nixos/tests/cagebreak.nix | 130 +- nixos/tests/canaille.nix | 108 +- nixos/tests/castopod.nix | 436 ++++--- nixos/tests/charliecloud.nix | 82 +- nixos/tests/cinnamon-wayland.nix | 136 +-- nixos/tests/cinnamon.nix | 194 ++- nixos/tests/cjdns.nix | 168 ++- nixos/tests/clickhouse.nix | 62 +- nixos/tests/cloudlog.nix | 38 +- nixos/tests/cockpit.nix | 298 +++-- nixos/tests/code-server.nix | 42 +- nixos/tests/coder.nix | 40 +- nixos/tests/collectd.nix | 68 +- nixos/tests/commafeed.nix | 32 +- nixos/tests/connman.nix | 150 ++- nixos/tests/consul-template.nix | 72 +- nixos/tests/consul.nix | 438 ++++--- nixos/tests/containers-bridge.nix | 186 ++- nixos/tests/containers-custom-pkgs.nix | 86 +- nixos/tests/containers-ephemeral.nix | 90 +- nixos/tests/containers-extra_veth.nix | 196 ++- nixos/tests/containers-hosts.nix | 92 +- nixos/tests/containers-imperative.nix | 386 +++--- nixos/tests/containers-ip.nix | 108 +- nixos/tests/containers-macvlans.nix | 156 ++- nixos/tests/containers-names.nix | 80 +- nixos/tests/containers-nested.nix | 58 +- .../tests/containers-physical_interfaces.nix | 248 ++-- nixos/tests/containers-portforward.nix | 108 +- nixos/tests/containers-reloadable.nix | 100 +- .../tests/containers-require-bind-mounts.nix | 64 +- nixos/tests/containers-restart_networking.nix | 216 ++-- nixos/tests/containers-tmpfs.nix | 152 ++- nixos/tests/containers-unified-hierarchy.nix | 40 +- nixos/tests/convos.nix | 46 +- nixos/tests/coturn.nix | 62 +- nixos/tests/couchdb.nix | 88 +- nixos/tests/crabfit.nix | 48 +- nixos/tests/croc.nix | 100 +- nixos/tests/curl-impersonate.nix | 328 +++-- nixos/tests/dae.nix | 60 +- nixos/tests/db-rest.nix | 224 ++-- nixos/tests/dconf.nix | 78 +- nixos/tests/ddns-updater.nix | 44 +- nixos/tests/deconz.nix | 64 +- nixos/tests/deepin.nix | 90 +- nixos/tests/deluge.nix | 116 +- nixos/tests/dependency-track.nix | 126 +- nixos/tests/devpi-server.nix | 74 +- nixos/tests/dex-oidc.nix | 146 ++- nixos/tests/disable-installer-tools.nix | 58 +- nixos/tests/discourse.nix | 400 +++--- nixos/tests/documize.nix | 114 +- nixos/tests/doh-proxy-rust.nix | 84 +- nixos/tests/domination.nix | 58 +- nixos/tests/drbd-driver.nix | 40 +- nixos/tests/drbd.nix | 152 ++- nixos/tests/dublin-traceroute.nix | 130 +- nixos/tests/ecryptfs.nix | 134 +- nixos/tests/endlessh-go.nix | 118 +- nixos/tests/endlessh.nix | 86 +- nixos/tests/engelsystem.nix | 84 +- nixos/tests/enlightenment.nix | 172 ++- nixos/tests/env.nix | 88 +- nixos/tests/envfs.nix | 74 +- nixos/tests/ergo.nix | 40 +- nixos/tests/ergochat.nix | 174 ++- nixos/tests/eris-server.nix | 50 +- nixos/tests/esphome.nix | 72 +- nixos/tests/etebase-server.nix | 86 +- nixos/tests/etesync-dav.nix | 48 +- nixos/tests/fakeroute.nix | 48 +- nixos/tests/fanout.nix | 54 +- nixos/tests/fenics.nix | 90 +- nixos/tests/ferm.nix | 178 ++- nixos/tests/filesender.nix | 274 +++-- nixos/tests/firefoxpwa.nix | 64 +- nixos/tests/firejail.nix | 164 ++- nixos/tests/firezone/firezone.nix | 604 +++++---- nixos/tests/flaresolverr.nix | 36 +- nixos/tests/flood.nix | 56 +- nixos/tests/fluentd.nix | 98 +- nixos/tests/fluidd.nix | 36 +- nixos/tests/fontconfig-default-fonts.nix | 62 +- nixos/tests/freeswitch.nix | 64 +- nixos/tests/frp.nix | 158 ++- nixos/tests/frr.nix | 204 ++-- nixos/tests/fscrypt.nix | 84 +- nixos/tests/ft2-clone.nix | 54 +- nixos/tests/gancio.nix | 144 ++- nixos/tests/geth.nix | 122 +- nixos/tests/ghostunnel.nix | 194 ++- nixos/tests/gitdaemon.nix | 132 +- nixos/tests/gitolite-fcgiwrap.nix | 154 ++- nixos/tests/gitolite.nix | 242 ++-- nixos/tests/glusterfs.nix | 126 +- nixos/tests/gnome-extensions.nix | 280 +++-- nixos/tests/gnome-flashback.nix | 104 +- nixos/tests/gnome-xorg.nix | 194 ++- nixos/tests/gns3-server.nix | 104 +- nixos/tests/gnupg.nix | 206 ++-- nixos/tests/goatcounter.nix | 48 +- nixos/tests/gobgpd.nix | 162 ++- nixos/tests/gocd-agent.nix | 76 +- nixos/tests/gocd-server.nix | 44 +- nixos/tests/gollum.nix | 36 +- nixos/tests/gonic.nix | 52 +- nixos/tests/gopro-tool.nix | 64 +- nixos/tests/goss.nix | 80 +- nixos/tests/gotenberg.nix | 36 +- nixos/tests/gotify-server.nix | 88 +- nixos/tests/graphite.nix | 70 +- nixos/tests/graylog.nix | 262 ++-- nixos/tests/greetd-no-shadow.nix | 90 +- nixos/tests/grub.nix | 106 +- nixos/tests/guacamole-server.nix | 40 +- nixos/tests/gvisor.nix | 80 +- nixos/tests/hardened.nix | 176 ++- nixos/tests/haste-server.nix | 44 +- nixos/tests/headscale.nix | 170 ++- nixos/tests/hedgedoc.nix | 178 ++- nixos/tests/herbstluftwm.nix | 74 +- nixos/tests/hledger-web.nix | 100 +- nixos/tests/hockeypuck.nix | 120 +- nixos/tests/homebox.nix | 76 +- nixos/tests/hound.nix | 108 +- nixos/tests/i3wm.nix | 100 +- nixos/tests/ifm.nix | 62 +- nixos/tests/iftop.nix | 54 +- nixos/tests/incron.nix | 82 +- nixos/tests/influxdb.nix | 68 +- nixos/tests/influxdb2.nix | 410 ++++--- nixos/tests/initrd-luks-empty-passphrase.nix | 186 ++- nixos/tests/initrd-network.nix | 62 +- nixos/tests/input-remapper.nix | 116 +- nixos/tests/inspircd.nix | 166 ++- nixos/tests/intune.nix | 118 +- nixos/tests/invidious.nix | 258 ++-- nixos/tests/iodine.nix | 114 +- nixos/tests/iosched.nix | 122 +- nixos/tests/ipv6.nix | 224 ++-- nixos/tests/iscsi-multipath-root.nix | 526 ++++---- nixos/tests/iscsi-root.nix | 332 +++-- nixos/tests/isolate.nix | 68 +- nixos/tests/isso.nix | 56 +- nixos/tests/jackett.nix | 42 +- nixos/tests/jellyfin.nix | 244 ++-- nixos/tests/jenkins-cli.nix | 52 +- nixos/tests/jenkins.nix | 230 ++-- nixos/tests/jibri.nix | 130 +- nixos/tests/jirafeau.nix | 38 +- nixos/tests/jitsi-meet.nix | 128 +- nixos/tests/jotta-cli.nix | 52 +- nixos/tests/kanidm-provisioning.nix | 866 +++++++------ nixos/tests/kanidm.nix | 298 +++-- nixos/tests/karma.nix | 148 ++- nixos/tests/kavita.nix | 78 +- nixos/tests/kbd-setfont-decompress.nix | 38 +- nixos/tests/kbd-update-search-paths-patch.nix | 40 +- nixos/tests/keepalived.nix | 90 +- nixos/tests/keepassxc.nix | 150 ++- nixos/tests/kernel-latest-ath-user-regd.nix | 34 +- nixos/tests/keter.nix | 76 +- nixos/tests/komga.nix | 36 +- nixos/tests/ksm.nix | 42 +- nixos/tests/kthxbye.nix | 192 ++- nixos/tests/ladybird.nix | 56 +- nixos/tests/languagetool.nix | 46 +- nixos/tests/lanraragi.nix | 72 +- nixos/tests/leaps.nix | 50 +- nixos/tests/legit.nix | 100 +- nixos/tests/lemmy.nix | 168 ++- nixos/tests/libinput.nix | 64 +- nixos/tests/libresprite.nix | 60 +- nixos/tests/libuiohook.nix | 64 +- nixos/tests/libvirtd.nix | 140 ++- nixos/tests/lidarr.nix | 34 +- nixos/tests/lightdm.nix | 64 +- nixos/tests/limesurvey.nix | 54 +- nixos/tests/litestream.nix | 204 ++-- nixos/tests/livebook-service.nix | 86 +- nixos/tests/lldap.nix | 48 +- nixos/tests/localsend.nix | 38 +- nixos/tests/locate.nix | 110 +- nixos/tests/login.nix | 120 +- nixos/tests/loki.nix | 112 +- nixos/tests/luks.nix | 138 ++- nixos/tests/lxd-image-server.nix | 164 ++- nixos/tests/lxqt.nix | 124 +- nixos/tests/ly.nix | 76 +- nixos/tests/maestral.nix | 156 ++- nixos/tests/magnetico.nix | 80 +- nixos/tests/marytts.nix | 148 ++- nixos/tests/mate-wayland.nix | 108 +- nixos/tests/mate.nix | 150 ++- nixos/tests/matrix/conduit.nix | 176 ++- nixos/tests/matrix/dendrite.nix | 158 ++- nixos/tests/matrix/mautrix-meta-postgres.nix | 416 ++++--- nixos/tests/matrix/mautrix-meta-sqlite.nix | 428 ++++--- nixos/tests/matrix/mjolnir.nix | 338 +++--- nixos/tests/matrix/pantalaimon.nix | 178 ++- nixos/tests/matrix/synapse-workers.nix | 96 +- nixos/tests/matrix/synapse.nix | 444 ++++--- nixos/tests/matter-server.nix | 80 +- nixos/tests/mealie.nix | 76 +- nixos/tests/mediamtx.nix | 96 +- nixos/tests/meilisearch.nix | 118 +- nixos/tests/merecat.nix | 60 +- nixos/tests/metabase.nix | 42 +- nixos/tests/mihomo.nix | 84 +- nixos/tests/mimir.nix | 92 +- nixos/tests/mindustry.nix | 56 +- nixos/tests/minecraft-server.nix | 72 +- nixos/tests/minecraft.nix | 76 +- nixos/tests/minidlna.nix | 76 +- nixos/tests/miniflux.nix | 230 ++-- nixos/tests/minio.nix | 202 ++- nixos/tests/misc.nix | 286 +++-- nixos/tests/misskey.nix | 46 +- nixos/tests/mod_perl.nix | 116 +- nixos/tests/molly-brown.nix | 132 +- nixos/tests/mollysocket.nix | 46 +- nixos/tests/monado.nix | 84 +- nixos/tests/monetdb.nix | 158 ++- nixos/tests/moonraker.nix | 86 +- nixos/tests/moosefs.nix | 178 ++- nixos/tests/mopidy.nix | 28 +- nixos/tests/morty.nix | 54 +- nixos/tests/mtp.nix | 228 ++-- nixos/tests/multipass.nix | 68 +- nixos/tests/munin.nix | 78 +- nixos/tests/mutable-users.nix | 124 +- nixos/tests/mympd.nix | 46 +- nixos/tests/nar-serve.nix | 92 +- nixos/tests/nats.nix | 124 +- nixos/tests/navidrome.nix | 28 +- nixos/tests/nbd.nix | 212 ++-- nixos/tests/ncdns.nix | 170 ++- nixos/tests/ndppd.nix | 134 +- nixos/tests/nebula.nix | 786 ++++++------ nixos/tests/netbird.nix | 102 +- nixos/tests/netdata.nix | 90 +- nixos/tests/networking-proxy.nix | 210 ++-- nixos/tests/nexus.nix | 46 +- nixos/tests/nghttpx.nix | 110 +- nixos/tests/nginx-njs.nix | 66 +- nixos/tests/nimdow.nix | 62 +- nixos/tests/nitter.nix | 60 +- nixos/tests/nix-config.nix | 40 +- nixos/tests/nix-serve-ssh.nix | 88 +- nixos/tests/nixos-generate-config.nix | 100 +- .../extra-python-packages.nix | 22 +- nixos/tests/nixseparatedebuginfod.nix | 148 ++- .../noto-fonts-cjk-qt-default-weight.nix | 62 +- nixos/tests/npmrc.nix | 42 +- nixos/tests/nscd.nix | 254 ++-- nixos/tests/nsd.nix | 222 ++-- nixos/tests/ntpd-rs.nix | 126 +- nixos/tests/ntpd.nix | 44 +- nixos/tests/nvmetcfg.nix | 92 +- nixos/tests/nzbget.nix | 96 +- nixos/tests/nzbhydra2.nix | 34 +- nixos/tests/ocis.nix | 388 +++--- nixos/tests/octoprint.nix | 112 +- nixos/tests/odoo.nix | 70 +- nixos/tests/oh-my-zsh.nix | 34 +- nixos/tests/ombi.nix | 32 +- nixos/tests/openarena.nix | 116 +- nixos/tests/openldap.nix | 312 +++-- nixos/tests/openresty-lua.nix | 182 ++- nixos/tests/opensnitch.nix | 152 ++- nixos/tests/openssh.nix | 656 +++++----- nixos/tests/opentabletdriver.nix | 62 +- nixos/tests/opentelemetry-collector.nix | 158 ++- nixos/tests/openvscode-server.nix | 42 +- nixos/tests/orangefs.nix | 154 ++- nixos/tests/osrm-backend.nix | 114 +- nixos/tests/outline.nix | 98 +- nixos/tests/overlayfs.nix | 94 +- nixos/tests/owncast.nix | 134 +- nixos/tests/pacemaker.nix | 228 ++-- nixos/tests/packagekit.nix | 50 +- nixos/tests/pam/pam-file-contents.nix | 40 +- nixos/tests/pam/pam-oath-login.nix | 180 ++- nixos/tests/pam/pam-u2f.nix | 50 +- nixos/tests/pam/pam-ussh.nix | 150 ++- nixos/tests/pam/zfs-key.nix | 144 ++- nixos/tests/pantheon-wayland.nix | 174 ++- nixos/tests/pantheon.nix | 186 ++- nixos/tests/paperless.nix | 218 ++-- nixos/tests/pass-secret-service.nix | 130 +- .../password-option-override-ordering.nix | 298 +++-- nixos/tests/pds.nix | 44 +- nixos/tests/peerflix.nix | 40 +- nixos/tests/peroxide.nix | 34 +- nixos/tests/pgbouncer.nix | 100 +- nixos/tests/pgmanage.nix | 80 +- nixos/tests/phosh.nix | 150 ++- nixos/tests/photonvision.nix | 38 +- nixos/tests/photoprism.nix | 48 +- nixos/tests/pict-rs.nix | 40 +- nixos/tests/pingvin-share.nix | 42 +- nixos/tests/plantuml-server.nix | 40 +- nixos/tests/plasma-bigscreen.nix | 72 +- nixos/tests/plasma5-systemd-start.nix | 80 +- nixos/tests/plasma5.nix | 118 +- nixos/tests/plasma6.nix | 116 +- nixos/tests/plausible.nix | 56 +- nixos/tests/playwright-python.nix | 112 +- nixos/tests/please.nix | 114 +- nixos/tests/plikd.nix | 54 +- nixos/tests/plotinus.nix | 66 +- nixos/tests/pocket-id.nix | 78 +- nixos/tests/podgrab.nix | 60 +- nixos/tests/polaris.nix | 54 +- nixos/tests/portunus.nix | 30 +- nixos/tests/postfixadmin.nix | 64 +- nixos/tests/power-profiles-daemon.nix | 102 +- nixos/tests/powerdns.nix | 114 +- nixos/tests/pppd.nix | 132 +- nixos/tests/private-gpt.nix | 50 +- nixos/tests/privoxy.nix | 274 +++-- nixos/tests/proxy.nix | 162 ++- nixos/tests/pt2-clone.nix | 62 +- nixos/tests/public-inbox.nix | 444 ++++--- nixos/tests/pufferpanel.nix | 134 +- nixos/tests/pykms.nix | 40 +- nixos/tests/pyload.nix | 56 +- nixos/tests/qownnotes.nix | 136 +-- nixos/tests/quake3.nix | 152 ++- nixos/tests/quicktun.nix | 36 +- nixos/tests/quickwit.nix | 180 ++- nixos/tests/quorum.nix | 190 ++- nixos/tests/rabbitmq.nix | 112 +- nixos/tests/radarr.nix | 32 +- nixos/tests/radicale.nix | 174 ++- nixos/tests/rasdaemon.nix | 70 +- nixos/tests/readarr.nix | 32 +- nixos/tests/realm.nix | 68 +- nixos/tests/redlib.nix | 50 +- nixos/tests/renovate.nix | 118 +- nixos/tests/restart-by-activation-script.nix | 140 ++- nixos/tests/restic-rest-server.nix | 218 ++-- nixos/tests/restic.nix | 440 ++++--- nixos/tests/retroarch.nix | 90 +- nixos/tests/robustirc-bridge.nix | 46 +- nixos/tests/rosenpass.nix | 388 +++--- nixos/tests/roundcube.nix | 72 +- nixos/tests/rspamd-trainer.nix | 314 +++-- nixos/tests/rstudio-server.nix | 76 +- nixos/tests/rsyncd.nix | 76 +- nixos/tests/rtorrent.nix | 52 +- nixos/tests/rustls-libssl.nix | 168 ++- nixos/tests/rxe.nix | 86 +- nixos/tests/sabnzbd.nix | 52 +- nixos/tests/samba-wsdd.nix | 78 +- nixos/tests/sane.nix | 172 ++- nixos/tests/sanoid.nix | 266 ++-- nixos/tests/saunafs.nix | 222 ++-- nixos/tests/sdl3.nix | 40 +- nixos/tests/seafile.nix | 208 ++-- nixos/tests/seatd.nix | 110 +- nixos/tests/service-runner.nix | 76 +- nixos/tests/sfxr-qt.nix | 58 +- nixos/tests/sgt-puzzles.nix | 64 +- nixos/tests/shadow.nix | 316 +++-- nixos/tests/shattered-pixel-dungeon.nix | 52 +- nixos/tests/shiori.nix | 152 ++- nixos/tests/silverbullet.nix | 108 +- nixos/tests/simple.nix | 36 +- nixos/tests/sing-box.nix | 976 ++++++++------- nixos/tests/slimserver.nix | 84 +- nixos/tests/slurm.nix | 332 +++-- nixos/tests/smokeping.nix | 80 +- nixos/tests/snapper.nix | 84 +- nixos/tests/snmpd.nix | 40 +- nixos/tests/soapui.nix | 52 +- nixos/tests/soft-serve.nix | 194 ++- nixos/tests/sogo.nix | 120 +- nixos/tests/soju.nix | 56 +- nixos/tests/solanum.nix | 174 ++- nixos/tests/sonarr.nix | 32 +- nixos/tests/sonic-server.nix | 42 +- nixos/tests/spacecookie.nix | 84 +- nixos/tests/sqlite3-to-mysql.nix | 120 +- nixos/tests/squid.nix | 332 +++-- nixos/tests/ssh-agent-auth.nix | 114 +- nixos/tests/ssh-audit.nix | 182 ++- nixos/tests/stalwart-mail.nix | 214 ++-- nixos/tests/strongswan-swanctl.nix | 250 ++-- nixos/tests/sudo.nix | 312 +++-- nixos/tests/sunshine.nix | 136 +-- nixos/tests/suricata.nix | 136 +-- nixos/tests/swap-file-btrfs.nix | 82 +- nixos/tests/swap-partition.nix | 90 +- nixos/tests/swap-random-encryption.nix | 140 ++- nixos/tests/sway.nix | 386 +++--- nixos/tests/swayfx.nix | 342 +++--- nixos/tests/sx.nix | 104 +- nixos/tests/sympa.nix | 60 +- nixos/tests/syncthing-init.nix | 54 +- nixos/tests/syncthing-many-devices.nix | 426 ++++--- nixos/tests/syncthing-no-settings.nix | 44 +- nixos/tests/syncthing-relay.nix | 48 +- nixos/tests/syncthing.nix | 120 +- nixos/tests/systemd-analyze.nix | 84 +- nixos/tests/systemd-bpf.nix | 96 +- nixos/tests/systemd-coredump.nix | 80 +- nixos/tests/systemd-credentials-tpm2.nix | 114 +- nixos/tests/systemd-cryptenroll.nix | 76 +- nixos/tests/systemd-escaping.nix | 118 +- nixos/tests/systemd-homed.nix | 186 ++- nixos/tests/systemd-initrd-bridge.nix | 128 +- nixos/tests/systemd-initrd-btrfs-raid.nix | 96 +- nixos/tests/systemd-initrd-luks-fido2.nix | 98 +- nixos/tests/systemd-initrd-luks-keyfile.nix | 106 +- nixos/tests/systemd-initrd-luks-password.nix | 112 +- nixos/tests/systemd-initrd-luks-tpm2.nix | 94 +- nixos/tests/systemd-initrd-luks-unl0kr.nix | 182 ++- nixos/tests/systemd-initrd-modprobe.nix | 50 +- nixos/tests/systemd-initrd-networkd-ssh.nix | 102 +- nixos/tests/systemd-initrd-swraid.nix | 132 +- nixos/tests/systemd-initrd-vconsole.nix | 90 +- nixos/tests/systemd-initrd-vlan.nix | 112 +- nixos/tests/systemd-journal-gateway.nix | 176 ++- nixos/tests/systemd-journal-upload.nix | 200 ++- nixos/tests/systemd-journal.nix | 128 +- nixos/tests/systemd-machinectl.nix | 344 +++--- nixos/tests/systemd-misc.nix | 110 +- nixos/tests/systemd-networkd-bridge.nix | 98 +- nixos/tests/systemd-networkd-dhcpserver.nix | 212 ++-- nixos/tests/systemd-networkd-vrf.nix | 348 +++--- nixos/tests/systemd-networkd.nix | 150 ++- nixos/tests/systemd-no-tainted.nix | 28 +- nixos/tests/systemd-nspawn-configfile.nix | 230 ++-- nixos/tests/systemd-nspawn.nix | 118 +- nixos/tests/systemd-oomd.nix | 98 +- nixos/tests/systemd-portabled.nix | 104 +- nixos/tests/systemd-resolved.nix | 180 ++- nixos/tests/systemd-shutdown.nix | 80 +- nixos/tests/systemd-timesyncd-nscd-dnssec.nix | 98 +- nixos/tests/systemd-timesyncd.nix | 122 +- nixos/tests/systemd-user-linger.nix | 62 +- nixos/tests/systemd-user-tmpfiles-rules.nix | 70 +- nixos/tests/systemd-userdbd.nix | 64 +- nixos/tests/systemd.nix | 470 ++++--- nixos/tests/tandoor-recipes-script-name.nix | 146 ++- nixos/tests/tandoor-recipes.nix | 80 +- nixos/tests/tang.nix | 168 ++- nixos/tests/taskchampion-sync-server.nix | 86 +- nixos/tests/taskserver.nix | 476 ++++---- nixos/tests/tayga.nix | 458 ++++--- nixos/tests/technitium-dns-server.nix | 50 +- nixos/tests/teeworlds.nix | 86 +- nixos/tests/tiddlywiki.nix | 118 +- nixos/tests/timezone.nix | 98 +- nixos/tests/tinydns.nix | 86 +- nixos/tests/tinyproxy.nix | 38 +- nixos/tests/tinywl.nix | 122 +- nixos/tests/tmate-ssh-server.nix | 150 ++- nixos/tests/tomcat.nix | 56 +- nixos/tests/tor.nix | 56 +- nixos/tests/trafficserver.nix | 338 +++--- nixos/tests/transfer-sh.nix | 42 +- nixos/tests/trezord.nix | 50 +- nixos/tests/trickster.nix | 82 +- nixos/tests/tsm-client-gui.nix | 86 +- nixos/tests/tuptime.nix | 56 +- nixos/tests/turbovnc-headless-server.nix | 270 ++--- nixos/tests/turn-rs.nix | 104 +- nixos/tests/txredisapi.nix | 68 +- nixos/tests/typesense.nix | 50 +- nixos/tests/ucarp.nix | 130 +- nixos/tests/udisks2.nix | 122 +- nixos/tests/ulogd/ulogd.nix | 100 +- nixos/tests/umurmur.nix | 168 ++- nixos/tests/unbound.nix | 738 ++++++----- nixos/tests/uptermd.nix | 114 +- nixos/tests/uptime-kuma.nix | 34 +- nixos/tests/urn-timer.nix | 52 +- nixos/tests/usbguard.nix | 112 +- nixos/tests/user-activation-scripts.nix | 72 +- nixos/tests/user-home-mode.nix | 72 +- nixos/tests/ustreamer.nix | 136 +-- nixos/tests/uwsgi.nix | 152 ++- nixos/tests/v2ray.nix | 184 ++- nixos/tests/vault-agent.nix | 108 +- nixos/tests/vault-dev.nix | 76 +- nixos/tests/vault-postgresql.nix | 124 +- nixos/tests/vault.nix | 48 +- nixos/tests/vdirsyncer.nix | 516 ++++---- nixos/tests/vengi-tools.nix | 54 +- nixos/tests/vikunja.nix | 136 +-- nixos/tests/vsftpd.nix | 76 +- nixos/tests/waagent.nix | 122 +- nixos/tests/warzone2100.nix | 52 +- nixos/tests/wasabibackend.nix | 76 +- nixos/tests/watchdogd.nix | 44 +- nixos/tests/web-apps/healthchecks.nix | 72 +- nixos/tests/web-apps/immich-public-proxy.nix | 180 ++- nixos/tests/web-apps/immich.nix | 100 +- nixos/tests/web-apps/netbox-upgrade.nix | 184 ++- nixos/tests/web-apps/open-web-calendar.nix | 86 +- nixos/tests/web-apps/peering-manager.nix | 84 +- nixos/tests/web-apps/phylactery.nix | 40 +- nixos/tests/web-apps/tt-rss.nix | 80 +- nixos/tests/web-apps/weblate.nix | 176 ++- nixos/tests/web-servers/ttyd.nix | 60 +- nixos/tests/web-servers/unit-perl.nix | 78 +- nixos/tests/wg-access-server.nix | 58 +- nixos/tests/whisparr.nix | 32 +- nixos/tests/whoogle-search.nix | 40 +- nixos/tests/without-nix.nix | 66 +- nixos/tests/wmderland.nix | 122 +- nixos/tests/workout-tracker.nix | 44 +- nixos/tests/wrappers.nix | 220 ++-- nixos/tests/xmpp/ejabberd.nix | 568 +++++---- 538 files changed, 35525 insertions(+), 36600 deletions(-) diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix index c01da895fbbc..2ba9260afff2 100644 --- a/nixos/tests/all-tests.nix +++ b/nixos/tests/all-tests.nix @@ -146,7 +146,7 @@ in # Testing the test driver nixos-test-driver = { - extra-python-packages = handleTest ./nixos-test-driver/extra-python-packages.nix { }; + extra-python-packages = runTest ./nixos-test-driver/extra-python-packages.nix; lib-extend = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./nixos-test-driver/lib-extend.nix { }; node-name = runTest ./nixos-test-driver/node-name.nix; busybox = runTest ./nixos-test-driver/busybox.nix; @@ -213,7 +213,7 @@ in atop = import ./atop.nix { inherit pkgs runTest; }; atticd = runTest ./atticd.nix; atuin = runTest ./atuin.nix; - ax25 = handleTest ./ax25.nix { }; + ax25 = runTest ./ax25.nix; audiobookshelf = runTest ./audiobookshelf.nix; auth-mysql = runTest ./auth-mysql.nix; authelia = runTest ./authelia.nix; @@ -233,7 +233,7 @@ in bcachefs = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./bcachefs.nix; beanstalkd = runTest ./beanstalkd.nix; bees = runTest ./bees.nix; - benchexec = handleTest ./benchexec.nix { }; + benchexec = runTest ./benchexec.nix; binary-cache = runTest { imports = [ ./binary-cache.nix ]; _module.args.compression = "zstd"; @@ -250,37 +250,37 @@ in bird = handleTest ./bird.nix { }; birdwatcher = handleTest ./birdwatcher.nix { }; bitbox-bridge = runTest ./bitbox-bridge.nix; - bitcoind = handleTest ./bitcoind.nix { }; - bittorrent = handleTest ./bittorrent.nix { }; - blockbook-frontend = handleTest ./blockbook-frontend.nix { }; + bitcoind = runTest ./bitcoind.nix; + bittorrent = runTest ./bittorrent.nix; + blockbook-frontend = runTest ./blockbook-frontend.nix; blocky = handleTest ./blocky.nix { }; boot = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./boot.nix { }; bootspec = handleTestOn [ "x86_64-linux" ] ./bootspec.nix { }; - boot-stage1 = handleTest ./boot-stage1.nix { }; - boot-stage2 = handleTest ./boot-stage2.nix { }; - borgbackup = handleTest ./borgbackup.nix { }; - borgmatic = handleTest ./borgmatic.nix { }; + boot-stage1 = runTest ./boot-stage1.nix; + boot-stage2 = runTest ./boot-stage2.nix; + borgbackup = runTest ./borgbackup.nix; + borgmatic = runTest ./borgmatic.nix; botamusique = runTest ./botamusique.nix; bpf = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./bpf.nix { }; - bpftune = handleTest ./bpftune.nix { }; - breitbandmessung = handleTest ./breitbandmessung.nix { }; - brscan5 = handleTest ./brscan5.nix { }; - btrbk = handleTest ./btrbk.nix { }; - btrbk-doas = handleTest ./btrbk-doas.nix { }; - btrbk-no-timer = handleTest ./btrbk-no-timer.nix { }; - btrbk-section-order = handleTest ./btrbk-section-order.nix { }; - budgie = handleTest ./budgie.nix { }; + bpftune = runTest ./bpftune.nix; + breitbandmessung = runTest ./breitbandmessung.nix; + brscan5 = runTest ./brscan5.nix; + btrbk = runTest ./btrbk.nix; + btrbk-doas = runTest ./btrbk-doas.nix; + btrbk-no-timer = runTest ./btrbk-no-timer.nix; + btrbk-section-order = runTest ./btrbk-section-order.nix; + budgie = runTest ./budgie.nix; buildbot = runTest ./buildbot.nix; - buildkite-agents = handleTest ./buildkite-agents.nix { }; - c2fmzq = handleTest ./c2fmzq.nix { }; + buildkite-agents = runTest ./buildkite-agents.nix; + c2fmzq = runTest ./c2fmzq.nix; caddy = runTest ./caddy.nix; cadvisor = handleTestOn [ "x86_64-linux" ] ./cadvisor.nix { }; - cage = handleTest ./cage.nix { }; - cagebreak = handleTest ./cagebreak.nix { }; + cage = runTest ./cage.nix; + cagebreak = runTest ./cagebreak.nix; calibre-web = runTest ./calibre-web.nix; calibre-server = import ./calibre-server.nix { inherit pkgs runTest; }; - canaille = handleTest ./canaille.nix { }; - castopod = handleTest ./castopod.nix { }; + canaille = runTest ./canaille.nix; + castopod = runTest ./castopod.nix; cassandra_4 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_4; }; centrifugo = runTest ./centrifugo.nix; ceph-multi-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix { }; @@ -296,47 +296,47 @@ in certmgr = handleTest ./certmgr.nix { }; cfssl = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./cfssl.nix { }; cgit = runTest ./cgit.nix; - charliecloud = handleTest ./charliecloud.nix { }; + charliecloud = runTest ./charliecloud.nix; chromadb = runTest ./chromadb.nix; chromium = (handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./chromium.nix { }).stable or { }; chrony = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./chrony.nix { }; chrony-ptp = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./chrony-ptp.nix { }; - cinnamon = handleTest ./cinnamon.nix { }; - cinnamon-wayland = handleTest ./cinnamon-wayland.nix { }; - cjdns = handleTest ./cjdns.nix { }; + cinnamon = runTest ./cinnamon.nix; + cinnamon-wayland = runTest ./cinnamon-wayland.nix; + cjdns = runTest ./cjdns.nix; clatd = runTest ./clatd.nix; - clickhouse = handleTest ./clickhouse.nix { }; + clickhouse = runTest ./clickhouse.nix; cloud-init = handleTest ./cloud-init.nix { }; cloud-init-hostname = handleTest ./cloud-init-hostname.nix { }; - cloudlog = handleTest ./cloudlog.nix { }; + cloudlog = runTest ./cloudlog.nix; cntr = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./cntr.nix { }; - cockpit = handleTest ./cockpit.nix { }; + cockpit = runTest ./cockpit.nix; cockroachdb = handleTestOn [ "x86_64-linux" ] ./cockroachdb.nix { }; - code-server = handleTest ./code-server.nix { }; - coder = handleTest ./coder.nix { }; - collectd = handleTest ./collectd.nix { }; - commafeed = handleTest ./commafeed.nix { }; - connman = handleTest ./connman.nix { }; - consul = handleTest ./consul.nix { }; - consul-template = handleTest ./consul-template.nix { }; - containers-bridge = handleTest ./containers-bridge.nix { }; - containers-custom-pkgs.nix = handleTest ./containers-custom-pkgs.nix { }; - containers-ephemeral = handleTest ./containers-ephemeral.nix { }; - containers-extra_veth = handleTest ./containers-extra_veth.nix { }; - containers-hosts = handleTest ./containers-hosts.nix { }; - containers-imperative = handleTest ./containers-imperative.nix { }; - containers-ip = handleTest ./containers-ip.nix { }; - containers-macvlans = handleTest ./containers-macvlans.nix { }; - containers-names = handleTest ./containers-names.nix { }; - containers-nested = handleTest ./containers-nested.nix { }; - containers-physical_interfaces = handleTest ./containers-physical_interfaces.nix { }; - containers-portforward = handleTest ./containers-portforward.nix { }; - containers-reloadable = handleTest ./containers-reloadable.nix { }; - containers-require-bind-mounts = handleTest ./containers-require-bind-mounts.nix { }; - containers-restart_networking = handleTest ./containers-restart_networking.nix { }; - containers-tmpfs = handleTest ./containers-tmpfs.nix { }; - containers-unified-hierarchy = handleTest ./containers-unified-hierarchy.nix { }; - convos = handleTest ./convos.nix { }; + code-server = runTest ./code-server.nix; + coder = runTest ./coder.nix; + collectd = runTest ./collectd.nix; + commafeed = runTest ./commafeed.nix; + connman = runTest ./connman.nix; + consul = runTest ./consul.nix; + consul-template = runTest ./consul-template.nix; + containers-bridge = runTest ./containers-bridge.nix; + containers-custom-pkgs.nix = runTest ./containers-custom-pkgs.nix; + containers-ephemeral = runTest ./containers-ephemeral.nix; + containers-extra_veth = runTest ./containers-extra_veth.nix; + containers-hosts = runTest ./containers-hosts.nix; + containers-imperative = runTest ./containers-imperative.nix; + containers-ip = runTest ./containers-ip.nix; + containers-macvlans = runTest ./containers-macvlans.nix; + containers-names = runTest ./containers-names.nix; + containers-nested = runTest ./containers-nested.nix; + containers-physical_interfaces = runTest ./containers-physical_interfaces.nix; + containers-portforward = runTest ./containers-portforward.nix; + containers-reloadable = runTest ./containers-reloadable.nix; + containers-require-bind-mounts = runTest ./containers-require-bind-mounts.nix; + containers-restart_networking = runTest ./containers-restart_networking.nix; + containers-tmpfs = runTest ./containers-tmpfs.nix; + containers-unified-hierarchy = runTest ./containers-unified-hierarchy.nix; + convos = runTest ./convos.nix; corerad = handleTest ./corerad.nix { }; cosmic = runTest { imports = [ ./cosmic.nix ]; @@ -362,33 +362,33 @@ in _module.args.enableAutologin = true; _module.args.enableXWayland = false; }; - coturn = handleTest ./coturn.nix { }; - couchdb = handleTest ./couchdb.nix { }; - crabfit = handleTest ./crabfit.nix { }; + coturn = runTest ./coturn.nix; + couchdb = runTest ./couchdb.nix; + crabfit = runTest ./crabfit.nix; cri-o = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./cri-o.nix { }; cryptpad = runTest ./cryptpad.nix; cups-pdf = runTest ./cups-pdf.nix; - curl-impersonate = handleTest ./curl-impersonate.nix { }; + curl-impersonate = runTest ./curl-impersonate.nix; custom-ca = handleTest ./custom-ca.nix { }; - croc = handleTest ./croc.nix { }; + croc = runTest ./croc.nix; cross-seed = runTest ./cross-seed.nix; cyrus-imap = runTest ./cyrus-imap.nix; darling-dmg = runTest ./darling-dmg.nix; - dae = handleTest ./dae.nix { }; + dae = runTest ./dae.nix; davis = runTest ./davis.nix; - db-rest = handleTest ./db-rest.nix { }; - dconf = handleTest ./dconf.nix { }; - ddns-updater = handleTest ./ddns-updater.nix { }; - deconz = handleTest ./deconz.nix { }; - deepin = handleTest ./deepin.nix { }; - deluge = handleTest ./deluge.nix { }; - dendrite = handleTest ./matrix/dendrite.nix { }; - dependency-track = handleTest ./dependency-track.nix { }; - devpi-server = handleTest ./devpi-server.nix { }; - dex-oidc = handleTest ./dex-oidc.nix { }; + db-rest = runTest ./db-rest.nix; + dconf = runTest ./dconf.nix; + ddns-updater = runTest ./ddns-updater.nix; + deconz = runTest ./deconz.nix; + deepin = runTest ./deepin.nix; + deluge = runTest ./deluge.nix; + dendrite = runTest ./matrix/dendrite.nix; + dependency-track = runTest ./dependency-track.nix; + devpi-server = runTest ./devpi-server.nix; + dex-oidc = runTest ./dex-oidc.nix; dhparams = handleTest ./dhparams.nix { }; - disable-installer-tools = handleTest ./disable-installer-tools.nix { }; - discourse = handleTest ./discourse.nix { }; + disable-installer-tools = runTest ./disable-installer-tools.nix; + discourse = runTest ./discourse.nix; dnscrypt-proxy2 = handleTestOn [ "x86_64-linux" ] ./dnscrypt-proxy2.nix { }; dnsdist = import ./dnsdist.nix { inherit pkgs runTest; }; doas = runTest ./doas.nix; @@ -400,36 +400,36 @@ in docker-tools-cross = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./docker-tools-cross.nix; docker-tools-overlay = runTestOn [ "x86_64-linux" ] ./docker-tools-overlay.nix; docling-serve = runTest ./docling-serve.nix; - documize = handleTest ./documize.nix { }; + documize = runTest ./documize.nix; documentation = pkgs.callPackage ../modules/misc/documentation/test.nix { inherit nixosLib; }; - doh-proxy-rust = handleTest ./doh-proxy-rust.nix { }; + doh-proxy-rust = runTest ./doh-proxy-rust.nix; dokuwiki = runTest ./dokuwiki.nix; dolibarr = runTest ./dolibarr.nix; - domination = handleTest ./domination.nix { }; + domination = runTest ./domination.nix; dovecot = handleTest ./dovecot.nix { }; drawterm = discoverTests (import ./drawterm.nix); - drbd = handleTest ./drbd.nix { }; + drbd = runTest ./drbd.nix; druid = handleTestOn [ "x86_64-linux" ] ./druid { }; - drbd-driver = handleTest ./drbd-driver.nix { }; - dublin-traceroute = handleTest ./dublin-traceroute.nix { }; + drbd-driver = runTest ./drbd-driver.nix; + dublin-traceroute = runTest ./dublin-traceroute.nix; earlyoom = handleTestOn [ "x86_64-linux" ] ./earlyoom.nix { }; early-mount-options = handleTest ./early-mount-options.nix { }; ec2-config = (handleTestOn [ "x86_64-linux" ] ./ec2.nix { }).boot-ec2-config or { }; ec2-nixops = (handleTestOn [ "x86_64-linux" ] ./ec2.nix { }).boot-ec2-nixops or { }; echoip = runTest ./echoip.nix; - ecryptfs = handleTest ./ecryptfs.nix { }; - fscrypt = handleTest ./fscrypt.nix { }; + ecryptfs = runTest ./ecryptfs.nix; + fscrypt = runTest ./fscrypt.nix; fastnetmon-advanced = runTest ./fastnetmon-advanced.nix; eintopf = runTest ./eintopf.nix; - ejabberd = handleTest ./xmpp/ejabberd.nix { }; + ejabberd = runTest ./xmpp/ejabberd.nix; elk = handleTestOn [ "x86_64-linux" ] ./elk.nix { }; emacs-daemon = runTest ./emacs-daemon.nix; - endlessh = handleTest ./endlessh.nix { }; - endlessh-go = handleTest ./endlessh-go.nix { }; - engelsystem = handleTest ./engelsystem.nix { }; - enlightenment = handleTest ./enlightenment.nix { }; - env = handleTest ./env.nix { }; - envfs = handleTest ./envfs.nix { }; + endlessh = runTest ./endlessh.nix; + endlessh-go = runTest ./endlessh-go.nix; + engelsystem = runTest ./engelsystem.nix; + enlightenment = runTest ./enlightenment.nix; + env = runTest ./env.nix; + envfs = runTest ./envfs.nix; envoy = runTest { imports = [ ./envoy.nix ]; _module.args.envoyPackage = pkgs.envoy; @@ -438,10 +438,10 @@ in imports = [ ./envoy.nix ]; _module.args.envoyPackage = pkgs.envoy-bin; }; - ergo = handleTest ./ergo.nix { }; - ergochat = handleTest ./ergochat.nix { }; - eris-server = handleTest ./eris-server.nix { }; - esphome = handleTest ./esphome.nix { }; + ergo = runTest ./ergo.nix; + ergochat = runTest ./ergochat.nix; + eris-server = runTest ./eris-server.nix; + esphome = runTest ./esphome.nix; etc = pkgs.callPackage ../modules/system/etc/test.nix { inherit evalMinimalConfig; }; activation = pkgs.callPackage ../modules/system/activation/test.nix { }; activation-lib = pkgs.callPackage ../modules/system/activation/lib/test.nix { }; @@ -452,20 +452,20 @@ in activation-perlless = runTest ./activation/perlless.nix; etcd = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd.nix { }; etcd-cluster = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd-cluster.nix { }; - etebase-server = handleTest ./etebase-server.nix { }; - etesync-dav = handleTest ./etesync-dav.nix { }; + etebase-server = runTest ./etebase-server.nix; + etesync-dav = runTest ./etesync-dav.nix; evcc = runTest ./evcc.nix; fail2ban = runTest ./fail2ban.nix; - fakeroute = handleTest ./fakeroute.nix { }; + fakeroute = runTest ./fakeroute.nix; fancontrol = runTest ./fancontrol.nix; - fanout = handleTest ./fanout.nix { }; + fanout = runTest ./fanout.nix; fcitx5 = handleTest ./fcitx5 { }; fedimintd = runTest ./fedimintd.nix; - fenics = handleTest ./fenics.nix { }; - ferm = handleTest ./ferm.nix { }; + fenics = runTest ./fenics.nix; + ferm = runTest ./ferm.nix; ferretdb = handleTest ./ferretdb.nix { }; fider = runTest ./fider.nix; - filesender = handleTest ./filesender.nix { }; + filesender = runTest ./filesender.nix; filesystems-overlayfs = runTest ./filesystems-overlayfs.nix; firefly-iii = runTest ./firefly-iii.nix; firefly-iii-data-importer = runTest ./firefly-iii-data-importer.nix; @@ -490,23 +490,23 @@ in imports = [ ./firefox.nix ]; _module.args.firefoxPackage = pkgs.firefox-esr-128; }; - firefoxpwa = handleTest ./firefoxpwa.nix { }; - firejail = handleTest ./firejail.nix { }; + firefoxpwa = runTest ./firefoxpwa.nix; + firejail = runTest ./firejail.nix; firewall = handleTest ./firewall.nix { nftables = false; }; firewall-nftables = handleTest ./firewall.nix { nftables = true; }; fish = runTest ./fish.nix; - firezone = handleTest ./firezone/firezone.nix { }; + firezone = runTest ./firezone/firezone.nix; flannel = handleTestOn [ "x86_64-linux" ] ./flannel.nix { }; - flaresolverr = handleTest ./flaresolverr.nix { }; - flood = handleTest ./flood.nix { }; + flaresolverr = runTest ./flaresolverr.nix; + flood = runTest ./flood.nix; floorp = runTest { imports = [ ./firefox.nix ]; _module.args.firefoxPackage = pkgs.floorp; }; fluent-bit = runTest ./fluent-bit.nix; - fluentd = handleTest ./fluentd.nix { }; - fluidd = handleTest ./fluidd.nix { }; - fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix { }; + fluentd = runTest ./fluentd.nix; + fluidd = runTest ./fluidd.nix; + fontconfig-default-fonts = runTest ./fontconfig-default-fonts.nix; forgejo = import ./forgejo.nix { inherit runTest; forgejoPackage = pkgs.forgejo; @@ -516,69 +516,69 @@ in forgejoPackage = pkgs.forgejo-lts; }; freenet = runTest ./freenet.nix; - freeswitch = handleTest ./freeswitch.nix { }; + freeswitch = runTest ./freeswitch.nix; freetube = discoverTests (import ./freetube.nix); freshrss = handleTest ./freshrss { }; frigate = runTest ./frigate.nix; froide-govplan = runTest ./web-apps/froide-govplan.nix; - frp = handleTest ./frp.nix { }; - frr = handleTest ./frr.nix { }; + frp = runTest ./frp.nix; + frr = runTest ./frr.nix; fsck = handleTest ./fsck.nix { }; fsck-systemd-stage-1 = handleTest ./fsck.nix { systemdStage1 = true; }; - ft2-clone = handleTest ./ft2-clone.nix { }; - legit = handleTest ./legit.nix { }; - mimir = handleTest ./mimir.nix { }; - gancio = handleTest ./gancio.nix { }; + ft2-clone = runTest ./ft2-clone.nix; + legit = runTest ./legit.nix; + mimir = runTest ./mimir.nix; + gancio = runTest ./gancio.nix; garage = handleTest ./garage { }; gatus = runTest ./gatus.nix; gemstash = handleTest ./gemstash.nix { }; geoclue2 = runTest ./geoclue2.nix; geoserver = runTest ./geoserver.nix; gerrit = runTest ./gerrit.nix; - geth = handleTest ./geth.nix { }; - ghostunnel = handleTest ./ghostunnel.nix { }; - gitdaemon = handleTest ./gitdaemon.nix { }; + geth = runTest ./geth.nix; + ghostunnel = runTest ./ghostunnel.nix; + gitdaemon = runTest ./gitdaemon.nix; gitea = handleTest ./gitea.nix { giteaPackage = pkgs.gitea; }; github-runner = runTest ./github-runner.nix; gitlab = runTest ./gitlab.nix; - gitolite = handleTest ./gitolite.nix { }; - gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix { }; + gitolite = runTest ./gitolite.nix; + gitolite-fcgiwrap = runTest ./gitolite-fcgiwrap.nix; glance = runTest ./glance.nix; glances = runTest ./glances.nix; glitchtip = runTest ./glitchtip.nix; - glusterfs = handleTest ./glusterfs.nix { }; + glusterfs = runTest ./glusterfs.nix; gnome = runTest ./gnome.nix; - gnome-extensions = handleTest ./gnome-extensions.nix { }; - gnome-flashback = handleTest ./gnome-flashback.nix { }; - gnome-xorg = handleTest ./gnome-xorg.nix { }; - gns3-server = handleTest ./gns3-server.nix { }; - gnupg = handleTest ./gnupg.nix { }; - goatcounter = handleTest ./goatcounter.nix { }; + gnome-extensions = runTest ./gnome-extensions.nix; + gnome-flashback = runTest ./gnome-flashback.nix; + gnome-xorg = runTest ./gnome-xorg.nix; + gns3-server = runTest ./gns3-server.nix; + gnupg = runTest ./gnupg.nix; + goatcounter = runTest ./goatcounter.nix; go-camo = handleTest ./go-camo.nix { }; go-neb = runTest ./go-neb.nix; - gobgpd = handleTest ./gobgpd.nix { }; - gocd-agent = handleTest ./gocd-agent.nix { }; - gocd-server = handleTest ./gocd-server.nix { }; + gobgpd = runTest ./gobgpd.nix; + gocd-agent = runTest ./gocd-agent.nix; + gocd-server = runTest ./gocd-server.nix; gokapi = runTest ./gokapi.nix; - gollum = handleTest ./gollum.nix { }; - gonic = handleTest ./gonic.nix { }; + gollum = runTest ./gollum.nix; + gonic = runTest ./gonic.nix; google-oslogin = handleTest ./google-oslogin { }; - gopro-tool = handleTest ./gopro-tool.nix { }; - goss = handleTest ./goss.nix { }; - gotenberg = handleTest ./gotenberg.nix { }; - gotify-server = handleTest ./gotify-server.nix { }; + gopro-tool = runTest ./gopro-tool.nix; + goss = runTest ./goss.nix; + gotenberg = runTest ./gotenberg.nix; + gotify-server = runTest ./gotify-server.nix; gotosocial = runTest ./web-apps/gotosocial.nix; grafana = handleTest ./grafana { }; - graphite = handleTest ./graphite.nix { }; + graphite = runTest ./graphite.nix; grav = runTest ./web-apps/grav.nix; - graylog = handleTest ./graylog.nix { }; - greetd-no-shadow = handleTest ./greetd-no-shadow.nix { }; + graylog = runTest ./graylog.nix; + greetd-no-shadow = runTest ./greetd-no-shadow.nix; grocy = runTest ./grocy.nix; grow-partition = runTest ./grow-partition.nix; - grub = handleTest ./grub.nix { }; - guacamole-server = handleTest ./guacamole-server.nix { }; + grub = runTest ./grub.nix; + guacamole-server = runTest ./guacamole-server.nix; guix = handleTest ./guix { }; - gvisor = handleTest ./gvisor.nix { }; + gvisor = runTest ./gvisor.nix; h2o = import ./web-servers/h2o { inherit recurseIntoAttrs runTest; }; hadoop = import ./hadoop { inherit handleTestOn; @@ -592,30 +592,30 @@ in inherit handleTestOn; package = pkgs.hadoop2; }; - haste-server = handleTest ./haste-server.nix { }; + haste-server = runTest ./haste-server.nix; haproxy = runTest ./haproxy.nix; - hardened = handleTest ./hardened.nix { }; + hardened = runTest ./hardened.nix; harmonia = runTest ./harmonia.nix; - headscale = handleTest ./headscale.nix { }; - healthchecks = handleTest ./web-apps/healthchecks.nix { }; + headscale = runTest ./headscale.nix; + healthchecks = runTest ./web-apps/healthchecks.nix; hbase2 = handleTest ./hbase.nix { package = pkgs.hbase2; }; hbase_2_5 = handleTest ./hbase.nix { package = pkgs.hbase_2_5; }; hbase_2_4 = handleTest ./hbase.nix { package = pkgs.hbase_2_4; }; hbase3 = handleTest ./hbase.nix { package = pkgs.hbase3; }; - hedgedoc = handleTest ./hedgedoc.nix { }; - herbstluftwm = handleTest ./herbstluftwm.nix { }; - homebox = handleTest ./homebox.nix { }; + hedgedoc = runTest ./hedgedoc.nix; + herbstluftwm = runTest ./herbstluftwm.nix; + homebox = runTest ./homebox.nix; homer = handleTest ./homer { }; homepage-dashboard = runTest ./homepage-dashboard.nix; honk = runTest ./honk.nix; installed-tests = pkgs.recurseIntoAttrs (handleTest ./installed-tests { }); - invidious = handleTest ./invidious.nix { }; - iosched = handleTest ./iosched.nix { }; - isolate = handleTest ./isolate.nix { }; - livebook-service = handleTest ./livebook-service.nix { }; - pyload = handleTest ./pyload.nix { }; + invidious = runTest ./invidious.nix; + iosched = runTest ./iosched.nix; + isolate = runTest ./isolate.nix; + livebook-service = runTest ./livebook-service.nix; + pyload = runTest ./pyload.nix; oci-containers = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./oci-containers.nix { }; - odoo = handleTest ./odoo.nix { }; + odoo = runTest ./odoo.nix; odoo17 = handleTest ./odoo.nix { package = pkgs.odoo17; }; odoo16 = handleTest ./odoo.nix { package = pkgs.odoo16; }; oncall = runTest ./web-apps/oncall.nix; @@ -627,20 +627,20 @@ in systemdStage1 = true; }; hitch = handleTest ./hitch { }; - hledger-web = handleTest ./hledger-web.nix { }; - hockeypuck = handleTest ./hockeypuck.nix { }; + hledger-web = runTest ./hledger-web.nix; + hockeypuck = runTest ./hockeypuck.nix; home-assistant = runTest ./home-assistant.nix; hostname = handleTest ./hostname.nix { }; - hound = handleTest ./hound.nix { }; + hound = runTest ./hound.nix; hub = runTest ./git/hub.nix; hydra = runTest ./hydra; - i3wm = handleTest ./i3wm.nix { }; + i3wm = runTest ./i3wm.nix; icingaweb2 = runTest ./icingaweb2.nix; - ifm = handleTest ./ifm.nix { }; - iftop = handleTest ./iftop.nix { }; - immich = handleTest ./web-apps/immich.nix { }; - immich-public-proxy = handleTest ./web-apps/immich-public-proxy.nix { }; - incron = handleTest ./incron.nix { }; + ifm = runTest ./ifm.nix; + iftop = runTest ./iftop.nix; + immich = runTest ./web-apps/immich.nix; + immich-public-proxy = runTest ./web-apps/immich-public-proxy.nix; + incron = runTest ./incron.nix; incus = pkgs.recurseIntoAttrs ( handleTest ./incus { lts = false; @@ -648,51 +648,51 @@ in } ); incus-lts = pkgs.recurseIntoAttrs (handleTest ./incus { inherit system pkgs; }); - influxdb = handleTest ./influxdb.nix { }; - influxdb2 = handleTest ./influxdb2.nix { }; + influxdb = runTest ./influxdb.nix; + influxdb2 = runTest ./influxdb2.nix; + initrd-luks-empty-passphrase = runTest ./initrd-luks-empty-passphrase.nix; initrd-network-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn { }; initrd-network-ssh = handleTest ./initrd-network-ssh { }; - initrd-luks-empty-passphrase = handleTest ./initrd-luks-empty-passphrase.nix { }; - initrdNetwork = handleTest ./initrd-network.nix { }; initrd-secrets = handleTest ./initrd-secrets.nix { }; initrd-secrets-changing = handleTest ./initrd-secrets-changing.nix { }; - input-remapper = handleTest ./input-remapper.nix { }; - inspircd = handleTest ./inspircd.nix { }; + initrdNetwork = runTest ./initrd-network.nix; + input-remapper = runTest ./input-remapper.nix; + inspircd = runTest ./inspircd.nix; installer = handleTest ./installer.nix { }; installer-systemd-stage-1 = handleTest ./installer-systemd-stage-1.nix { }; - intune = handleTest ./intune.nix { }; + intune = runTest ./intune.nix; invoiceplane = runTest ./invoiceplane.nix; - iodine = handleTest ./iodine.nix { }; - ipv6 = handleTest ./ipv6.nix { }; - iscsi-multipath-root = handleTest ./iscsi-multipath-root.nix { }; - iscsi-root = handleTest ./iscsi-root.nix { }; - isso = handleTest ./isso.nix { }; - jackett = handleTest ./jackett.nix { }; - jellyfin = handleTest ./jellyfin.nix { }; - jenkins = handleTest ./jenkins.nix { }; - jenkins-cli = handleTest ./jenkins-cli.nix { }; - jibri = handleTest ./jibri.nix { }; - jirafeau = handleTest ./jirafeau.nix { }; - jitsi-meet = handleTest ./jitsi-meet.nix { }; + iodine = runTest ./iodine.nix; + ipv6 = runTest ./ipv6.nix; + iscsi-multipath-root = runTest ./iscsi-multipath-root.nix; + iscsi-root = runTest ./iscsi-root.nix; + isso = runTest ./isso.nix; + jackett = runTest ./jackett.nix; + jellyfin = runTest ./jellyfin.nix; + jenkins = runTest ./jenkins.nix; + jenkins-cli = runTest ./jenkins-cli.nix; + jibri = runTest ./jibri.nix; + jirafeau = runTest ./jirafeau.nix; + jitsi-meet = runTest ./jitsi-meet.nix; jool = import ./jool.nix { inherit pkgs runTest; }; - jotta-cli = handleTest ./jotta-cli.nix { }; + jotta-cli = runTest ./jotta-cli.nix; k3s = handleTest ./k3s { }; kafka = handleTest ./kafka { }; kanboard = runTest ./web-apps/kanboard.nix; - kanidm = handleTest ./kanidm.nix { }; - kanidm-provisioning = handleTest ./kanidm-provisioning.nix { }; - karma = handleTest ./karma.nix { }; - kavita = handleTest ./kavita.nix { }; - kbd-setfont-decompress = handleTest ./kbd-setfont-decompress.nix { }; - kbd-update-search-paths-patch = handleTest ./kbd-update-search-paths-patch.nix { }; + kanidm = runTest ./kanidm.nix; + kanidm-provisioning = runTest ./kanidm-provisioning.nix; + karma = runTest ./karma.nix; + kavita = runTest ./kavita.nix; + kbd-setfont-decompress = runTest ./kbd-setfont-decompress.nix; + kbd-update-search-paths-patch = runTest ./kbd-update-search-paths-patch.nix; kea = runTest ./kea.nix; - keepalived = handleTest ./keepalived.nix { }; - keepassxc = handleTest ./keepassxc.nix { }; + keepalived = runTest ./keepalived.nix; + keepassxc = runTest ./keepassxc.nix; kerberos = handleTest ./kerberos/default.nix { }; kernel-generic = handleTest ./kernel-generic.nix { }; - kernel-latest-ath-user-regd = handleTest ./kernel-latest-ath-user-regd.nix { }; + kernel-latest-ath-user-regd = runTest ./kernel-latest-ath-user-regd.nix; kernel-rust = handleTest ./kernel-rust.nix { }; - keter = handleTest ./keter.nix { }; + keter = runTest ./keter.nix; kexec = runTest ./kexec.nix; keycloak = discoverTests (import ./keycloak.nix); keyd = handleTest ./keyd.nix { }; @@ -701,52 +701,52 @@ in kismet = runTest ./kismet.nix; kmonad = runTest ./kmonad.nix; knot = runTest ./knot.nix; - komga = handleTest ./komga.nix { }; + komga = runTest ./komga.nix; krb5 = discoverTests (import ./krb5); - ksm = handleTest ./ksm.nix { }; - kthxbye = handleTest ./kthxbye.nix { }; + ksm = runTest ./ksm.nix; + kthxbye = runTest ./kthxbye.nix; kubernetes = handleTestOn [ "x86_64-linux" ] ./kubernetes { }; kubo = import ./kubo { inherit recurseIntoAttrs runTest; }; - ladybird = handleTest ./ladybird.nix { }; - languagetool = handleTest ./languagetool.nix { }; - lanraragi = handleTest ./lanraragi.nix { }; + ladybird = runTest ./ladybird.nix; + languagetool = runTest ./languagetool.nix; + lanraragi = runTest ./lanraragi.nix; latestKernel.login = handleTest ./login.nix { latestKernel = true; }; lasuite-docs = runTest ./web-apps/lasuite-docs.nix; lavalink = runTest ./lavalink.nix; - leaps = handleTest ./leaps.nix { }; - lemmy = handleTest ./lemmy.nix { }; - libinput = handleTest ./libinput.nix { }; + leaps = runTest ./leaps.nix; + lemmy = runTest ./lemmy.nix; + libinput = runTest ./libinput.nix; librenms = runTest ./librenms.nix; - libresprite = handleTest ./libresprite.nix { }; + libresprite = runTest ./libresprite.nix; libreswan = runTest ./libreswan.nix; libreswan-nat = runTest ./libreswan-nat.nix; librewolf = runTest { imports = [ ./firefox.nix ]; _module.args.firefoxPackage = pkgs.librewolf; }; - libuiohook = handleTest ./libuiohook.nix { }; - libvirtd = handleTest ./libvirtd.nix { }; - lidarr = handleTest ./lidarr.nix { }; - lightdm = handleTest ./lightdm.nix { }; + libuiohook = runTest ./libuiohook.nix; + libvirtd = runTest ./libvirtd.nix; + lidarr = runTest ./lidarr.nix; + lightdm = runTest ./lightdm.nix; lighttpd = runTest ./lighttpd.nix; livekit = runTest ./networking/livekit.nix; - limesurvey = handleTest ./limesurvey.nix { }; + limesurvey = runTest ./limesurvey.nix; limine = import ./limine { inherit runTest; }; listmonk = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./listmonk.nix { }; litellm = runTest ./litellm.nix; - litestream = handleTest ./litestream.nix { }; + litestream = runTest ./litestream.nix; lk-jwt-service = runTest ./matrix/lk-jwt-service.nix; - lldap = handleTest ./lldap.nix { }; - localsend = handleTest ./localsend.nix { }; - locate = handleTest ./locate.nix { }; - login = handleTest ./login.nix { }; + lldap = runTest ./lldap.nix; + localsend = runTest ./localsend.nix; + locate = runTest ./locate.nix; + login = runTest ./login.nix; logrotate = runTest ./logrotate.nix; - loki = handleTest ./loki.nix { }; - luks = handleTest ./luks.nix { }; + loki = runTest ./loki.nix; + luks = runTest ./luks.nix; lvm2 = handleTest ./lvm2 { }; lxc = handleTest ./lxc { }; lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; }); - lxd-image-server = handleTest ./lxd-image-server.nix { }; + lxd-image-server = runTest ./lxd-image-server.nix; #logstash = handleTest ./logstash.nix {}; lomiri = discoverTests (import ./lomiri.nix); lomiri-calculator-app = runTest ./lomiri-calculator-app.nix; @@ -760,60 +760,60 @@ in lomiri-gallery-app = runTest ./lomiri-gallery-app.nix; lomiri-system-settings = runTest ./lomiri-system-settings.nix; lorri = handleTest ./lorri/default.nix { }; - lxqt = handleTest ./lxqt.nix { }; - ly = handleTest ./ly.nix { }; + lxqt = runTest ./lxqt.nix; + ly = runTest ./ly.nix; maddy = discoverTests (import ./maddy { inherit handleTest; }); - maestral = handleTest ./maestral.nix { }; + maestral = runTest ./maestral.nix; magic-wormhole-mailbox-server = runTest ./magic-wormhole-mailbox-server.nix; - magnetico = handleTest ./magnetico.nix { }; + magnetico = runTest ./magnetico.nix; mailcatcher = runTest ./mailcatcher.nix; mailhog = runTest ./mailhog.nix; mailpit = runTest ./mailpit.nix; mailman = runTest ./mailman.nix; man = runTest ./man.nix; mariadb-galera = handleTest ./mysql/mariadb-galera.nix { }; - marytts = handleTest ./marytts.nix { }; + marytts = runTest ./marytts.nix; mastodon = pkgs.recurseIntoAttrs (handleTest ./web-apps/mastodon { inherit handleTestOn; }); pixelfed = discoverTests (import ./web-apps/pixelfed { inherit handleTestOn; }); - mate = handleTest ./mate.nix { }; - mate-wayland = handleTest ./mate-wayland.nix { }; - matter-server = handleTest ./matter-server.nix { }; + mate = runTest ./mate.nix; + mate-wayland = runTest ./mate-wayland.nix; + matter-server = runTest ./matter-server.nix; matomo = runTest ./matomo.nix; matrix-alertmanager = runTest ./matrix/matrix-alertmanager.nix; matrix-appservice-irc = runTest ./matrix/appservice-irc.nix; - matrix-conduit = handleTest ./matrix/conduit.nix { }; + matrix-conduit = runTest ./matrix/conduit.nix; matrix-continuwuity = runTest ./matrix/continuwuity.nix; - matrix-synapse = handleTest ./matrix/synapse.nix { }; - matrix-synapse-workers = handleTest ./matrix/synapse-workers.nix { }; - mautrix-meta-postgres = handleTest ./matrix/mautrix-meta-postgres.nix { }; - mautrix-meta-sqlite = handleTest ./matrix/mautrix-meta-sqlite.nix { }; + matrix-synapse = runTest ./matrix/synapse.nix; + matrix-synapse-workers = runTest ./matrix/synapse-workers.nix; mattermost = handleTest ./mattermost { }; - mealie = handleTest ./mealie.nix { }; - mediamtx = handleTest ./mediamtx.nix { }; + mautrix-meta-postgres = runTest ./matrix/mautrix-meta-postgres.nix; + mautrix-meta-sqlite = runTest ./matrix/mautrix-meta-sqlite.nix; + mealie = runTest ./mealie.nix; + mediamtx = runTest ./mediamtx.nix; mediatomb = handleTest ./mediatomb.nix { }; mediawiki = handleTest ./mediawiki.nix { }; - meilisearch = handleTest ./meilisearch.nix { }; + meilisearch = runTest ./meilisearch.nix; memcached = runTest ./memcached.nix; - merecat = handleTest ./merecat.nix { }; - metabase = handleTest ./metabase.nix { }; - mihomo = handleTest ./mihomo.nix { }; - mindustry = handleTest ./mindustry.nix { }; - minecraft = handleTest ./minecraft.nix { }; - minecraft-server = handleTest ./minecraft-server.nix { }; - minidlna = handleTest ./minidlna.nix { }; - miniflux = handleTest ./miniflux.nix { }; - minio = handleTest ./minio.nix { }; + merecat = runTest ./merecat.nix; + metabase = runTest ./metabase.nix; + mihomo = runTest ./mihomo.nix; + mindustry = runTest ./mindustry.nix; + minecraft = runTest ./minecraft.nix; + minecraft-server = runTest ./minecraft-server.nix; + minidlna = runTest ./minidlna.nix; + miniflux = runTest ./miniflux.nix; + minio = runTest ./minio.nix; miracle-wm = runTest ./miracle-wm.nix; miriway = runTest ./miriway.nix; - misc = handleTest ./misc.nix { }; - misskey = handleTest ./misskey.nix { }; - mjolnir = handleTest ./matrix/mjolnir.nix { }; + misc = runTest ./misc.nix; + misskey = runTest ./misskey.nix; + mjolnir = runTest ./matrix/mjolnir.nix; mobilizon = runTest ./mobilizon.nix; - mod_perl = handleTest ./mod_perl.nix { }; - molly-brown = handleTest ./molly-brown.nix { }; - mollysocket = handleTest ./mollysocket.nix { }; - monado = handleTest ./monado.nix { }; - monetdb = handleTest ./monetdb.nix { }; + mod_perl = runTest ./mod_perl.nix; + molly-brown = runTest ./molly-brown.nix; + mollysocket = runTest ./mollysocket.nix; + monado = runTest ./monado.nix; + monetdb = runTest ./monetdb.nix; monica = runTest ./web-apps/monica.nix; mongodb = runTest ./mongodb.nix; mongodb-ce = runTest ( @@ -824,33 +824,33 @@ in } ); moodle = runTest ./moodle.nix; - moonraker = handleTest ./moonraker.nix { }; - mopidy = handleTest ./mopidy.nix { }; + moonraker = runTest ./moonraker.nix; + mopidy = runTest ./mopidy.nix; morph-browser = runTest ./morph-browser.nix; - morty = handleTest ./morty.nix { }; + morty = runTest ./morty.nix; mosquitto = runTest ./mosquitto.nix; - moosefs = handleTest ./moosefs.nix { }; + moosefs = runTest ./moosefs.nix; movim = import ./web-apps/movim { inherit recurseIntoAttrs runTest; }; mpd = runTest ./mpd.nix; mpv = runTest ./mpv.nix; - mtp = handleTest ./mtp.nix { }; - multipass = handleTest ./multipass.nix { }; + mtp = runTest ./mtp.nix; + multipass = runTest ./multipass.nix; mumble = runTest ./mumble.nix; # Fails on aarch64-linux at the PDF creation step - need to debug this on an # aarch64 machine.. musescore = handleTestOn [ "x86_64-linux" ] ./musescore.nix { }; music-assistant = runTest ./music-assistant.nix; - munin = handleTest ./munin.nix { }; - mutableUsers = handleTest ./mutable-users.nix { }; + munin = runTest ./munin.nix; + mutableUsers = runTest ./mutable-users.nix; mycelium = handleTest ./mycelium { }; - mympd = handleTest ./mympd.nix { }; + mympd = runTest ./mympd.nix; mysql = handleTest ./mysql/mysql.nix { }; mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix { }; mysql-backup = handleTest ./mysql/mysql-backup.nix { }; mysql-replication = handleTest ./mysql/mysql-replication.nix { }; n8n = runTest ./n8n.nix; nagios = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./nagios.nix { }; - nar-serve = handleTest ./nar-serve.nix { }; + nar-serve = runTest ./nar-serve.nix; nat.firewall = handleTest ./nat.nix { withFirewall = true; }; nat.standalone = handleTest ./nat.nix { withFirewall = false; }; nat.nftables.firewall = handleTest ./nat.nix { @@ -861,39 +861,39 @@ in withFirewall = false; nftables = true; }; - nats = handleTest ./nats.nix { }; - navidrome = handleTest ./navidrome.nix { }; - nbd = handleTest ./nbd.nix { }; - ncdns = handleTest ./ncdns.nix { }; + nats = runTest ./nats.nix; + navidrome = runTest ./navidrome.nix; + nbd = runTest ./nbd.nix; + ncdns = runTest ./ncdns.nix; ncps = runTest ./ncps.nix; ncps-custom-cache-datapath = runTest { imports = [ ./ncps.nix ]; defaults.services.ncps.cache.dataPath = "/path/to/ncps"; }; - ndppd = handleTest ./ndppd.nix { }; - nix-channel = pkgs.callPackage ../modules/config/nix-channel/test.nix { }; - nebula = handleTest ./nebula.nix { }; - netbird = handleTest ./netbird.nix { }; - nimdow = handleTest ./nimdow.nix { }; + ndppd = runTest ./ndppd.nix; + nebula = runTest ./nebula.nix; neo4j = handleTest ./neo4j.nix { }; - netdata = handleTest ./netdata.nix { }; + netbird = runTest ./netbird.nix; + netdata = runTest ./netdata.nix; + nimdow = runTest ./nimdow.nix; + nix-channel = pkgs.callPackage ../modules/config/nix-channel/test.nix { }; networking.scripted = handleTest ./networking/networkd-and-scripted.nix { networkd = false; }; networking.networkd = handleTest ./networking/networkd-and-scripted.nix { networkd = true; }; networking.networkmanager = handleTest ./networking/networkmanager.nix { }; netbox_3_7 = handleTest ./web-apps/netbox/default.nix { netbox = pkgs.netbox_3_7; }; netbox_4_1 = handleTest ./web-apps/netbox/default.nix { netbox = pkgs.netbox_4_1; }; netbox_4_2 = handleTest ./web-apps/netbox/default.nix { netbox = pkgs.netbox_4_2; }; - netbox-upgrade = handleTest ./web-apps/netbox-upgrade.nix { }; + netbox-upgrade = runTest ./web-apps/netbox-upgrade.nix; # TODO: put in networking.nix after the test becomes more complete - networkingProxy = handleTest ./networking-proxy.nix { }; + networkingProxy = runTest ./networking-proxy.nix; nextcloud = handleTest ./nextcloud { }; nextflow = runTestOn [ "x86_64-linux" ] ./nextflow.nix; nextjs-ollama-llm-ui = runTest ./web-apps/nextjs-ollama-llm-ui.nix; - nexus = handleTest ./nexus.nix { }; + nexus = runTest ./nexus.nix; # TODO: Test nfsv3 + Kerberos nfs3 = handleTest ./nfs { version = 3; }; nfs4 = handleTest ./nfs { version = 4; }; - nghttpx = handleTest ./nghttpx.nix { }; + nghttpx = runTest ./nghttpx.nix; nginx = runTest ./nginx.nix; nginx-auth = runTest ./nginx-auth.nix; nginx-etag = runTest ./nginx-etag.nix; @@ -903,7 +903,7 @@ in nginx-mime = runTest ./nginx-mime.nix; nginx-modsecurity = runTest ./nginx-modsecurity.nix; nginx-moreheaders = runTest ./nginx-moreheaders.nix; - nginx-njs = handleTest ./nginx-njs.nix { }; + nginx-njs = runTest ./nginx-njs.nix; nginx-proxyprotocol = runTest ./nginx-proxyprotocol/default.nix; nginx-pubhtml = runTest ./nginx-pubhtml.nix; nginx-redirectcode = runTest ./nginx-redirectcode.nix; @@ -913,16 +913,16 @@ in nginx-unix-socket = runTest ./nginx-unix-socket.nix; nginx-variants = import ./nginx-variants.nix { inherit pkgs runTest; }; nifi = runTestOn [ "x86_64-linux" ] ./web-apps/nifi.nix; - nitter = handleTest ./nitter.nix { }; - nix-config = handleTest ./nix-config.nix { }; + nitter = runTest ./nitter.nix; + nix-config = runTest ./nix-config.nix; nix-ld = runTest ./nix-ld.nix; nix-misc = handleTest ./nix/misc.nix { }; nix-upgrade = handleTest ./nix/upgrade.nix { inherit (pkgs) nixVersions; }; nix-required-mounts = runTest ./nix-required-mounts; nix-serve = runTest ./nix-serve.nix; - nix-serve-ssh = handleTest ./nix-serve-ssh.nix { }; + nix-serve-ssh = runTest ./nix-serve-ssh.nix; nixops = handleTest ./nixops/default.nix { }; - nixos-generate-config = handleTest ./nixos-generate-config.nix { }; + nixos-generate-config = runTest ./nixos-generate-config.nix; nixos-rebuild-install-bootloader = handleTestOn [ "x86_64-linux" ] ./nixos-rebuild-install-bootloader.nix { }; @@ -946,97 +946,97 @@ in _module.args.withNg = true; }; nixpkgs = pkgs.callPackage ../modules/misc/nixpkgs/test.nix { inherit evalMinimalConfig; }; - nixseparatedebuginfod = handleTest ./nixseparatedebuginfod.nix { }; + nixseparatedebuginfod = runTest ./nixseparatedebuginfod.nix; node-red = runTest ./node-red.nix; nomad = runTest ./nomad.nix; non-default-filesystems = handleTest ./non-default-filesystems.nix { }; non-switchable-system = runTest ./non-switchable-system.nix; noto-fonts = runTest ./noto-fonts.nix; - noto-fonts-cjk-qt-default-weight = handleTest ./noto-fonts-cjk-qt-default-weight.nix { }; + noto-fonts-cjk-qt-default-weight = runTest ./noto-fonts-cjk-qt-default-weight.nix; novacomd = handleTestOn [ "x86_64-linux" ] ./novacomd.nix { }; - npmrc = handleTest ./npmrc.nix { }; - nscd = handleTest ./nscd.nix { }; - nsd = handleTest ./nsd.nix { }; + npmrc = runTest ./npmrc.nix; + nscd = runTest ./nscd.nix; + nsd = runTest ./nsd.nix; ntfy-sh = handleTest ./ntfy-sh.nix { }; ntfy-sh-migration = handleTest ./ntfy-sh-migration.nix { }; - ntpd = handleTest ./ntpd.nix { }; - ntpd-rs = handleTest ./ntpd-rs.nix { }; + ntpd = runTest ./ntpd.nix; + ntpd-rs = runTest ./ntpd-rs.nix; nvidia-container-toolkit = runTest ./nvidia-container-toolkit.nix; - nvmetcfg = handleTest ./nvmetcfg.nix { }; - nzbget = handleTest ./nzbget.nix { }; - nzbhydra2 = handleTest ./nzbhydra2.nix { }; - ocis = handleTest ./ocis.nix { }; + nvmetcfg = runTest ./nvmetcfg.nix; + nzbget = runTest ./nzbget.nix; + nzbhydra2 = runTest ./nzbhydra2.nix; + ocis = runTest ./ocis.nix; oddjobd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./oddjobd.nix { }; obs-studio = runTest ./obs-studio.nix; - oh-my-zsh = handleTest ./oh-my-zsh.nix { }; + oh-my-zsh = runTest ./oh-my-zsh.nix; olivetin = runTest ./olivetin.nix; ollama = runTest ./ollama.nix; ollama-cuda = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./ollama-cuda.nix; ollama-rocm = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./ollama-rocm.nix; - ombi = handleTest ./ombi.nix { }; - openarena = handleTest ./openarena.nix { }; + ombi = runTest ./ombi.nix; + openarena = runTest ./openarena.nix; openbao = runTest ./openbao.nix; opencloud = runTest ./opencloud.nix; - openldap = handleTest ./openldap.nix { }; + openldap = runTest ./openldap.nix; + openresty-lua = runTest ./openresty-lua.nix; opensearch = discoverTests (import ./opensearch.nix); - openresty-lua = handleTest ./openresty-lua.nix { }; opensmtpd = handleTest ./opensmtpd.nix { }; opensmtpd-rspamd = handleTest ./opensmtpd-rspamd.nix { }; - opensnitch = handleTest ./opensnitch.nix { }; - openssh = handleTest ./openssh.nix { }; - octoprint = handleTest ./octoprint.nix { }; + opensnitch = runTest ./opensnitch.nix; + openssh = runTest ./openssh.nix; + octoprint = runTest ./octoprint.nix; openstack-image-metadata = (handleTestOn [ "x86_64-linux" ] ./openstack-image.nix { }).metadata or { }; openstack-image-userdata = (handleTestOn [ "x86_64-linux" ] ./openstack-image.nix { }).userdata or { }; - opentabletdriver = handleTest ./opentabletdriver.nix { }; - opentelemetry-collector = handleTest ./opentelemetry-collector.nix { }; - open-web-calendar = handleTest ./web-apps/open-web-calendar.nix { }; + opentabletdriver = runTest ./opentabletdriver.nix; + opentelemetry-collector = runTest ./opentelemetry-collector.nix; + open-web-calendar = runTest ./web-apps/open-web-calendar.nix; ocsinventory-agent = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./ocsinventory-agent.nix { }; orthanc = runTest ./orthanc.nix; - owncast = handleTest ./owncast.nix { }; - outline = handleTest ./outline.nix { }; + owncast = runTest ./owncast.nix; + outline = runTest ./outline.nix; i18n = runTest ./i18n.nix; image-contents = handleTest ./image-contents.nix { }; - openvscode-server = handleTest ./openvscode-server.nix { }; + openvscode-server = runTest ./openvscode-server.nix; open-webui = runTest ./open-webui.nix; openvswitch = runTest ./openvswitch.nix; - orangefs = handleTest ./orangefs.nix { }; + orangefs = runTest ./orangefs.nix; os-prober = handleTestOn [ "x86_64-linux" ] ./os-prober.nix { }; osquery = handleTestOn [ "x86_64-linux" ] ./osquery.nix { }; - osrm-backend = handleTest ./osrm-backend.nix { }; - overlayfs = handleTest ./overlayfs.nix { }; - pacemaker = handleTest ./pacemaker.nix { }; - packagekit = handleTest ./packagekit.nix { }; - pam-file-contents = handleTest ./pam/pam-file-contents.nix { }; - pam-oath-login = handleTest ./pam/pam-oath-login.nix { }; - pam-u2f = handleTest ./pam/pam-u2f.nix { }; - pam-ussh = handleTest ./pam/pam-ussh.nix { }; - pam-zfs-key = handleTest ./pam/zfs-key.nix { }; + osrm-backend = runTest ./osrm-backend.nix; + overlayfs = runTest ./overlayfs.nix; + pacemaker = runTest ./pacemaker.nix; + packagekit = runTest ./packagekit.nix; + pam-file-contents = runTest ./pam/pam-file-contents.nix; + pam-oath-login = runTest ./pam/pam-oath-login.nix; + pam-u2f = runTest ./pam/pam-u2f.nix; + pam-ussh = runTest ./pam/pam-ussh.nix; + pam-zfs-key = runTest ./pam/zfs-key.nix; paretosecurity = runTest ./paretosecurity.nix; - pass-secret-service = handleTest ./pass-secret-service.nix { }; + pass-secret-service = runTest ./pass-secret-service.nix; patroni = handleTestOn [ "x86_64-linux" ] ./patroni.nix { }; - pantalaimon = handleTest ./matrix/pantalaimon.nix { }; - pantheon = handleTest ./pantheon.nix { }; - pantheon-wayland = handleTest ./pantheon-wayland.nix { }; - paperless = handleTest ./paperless.nix { }; + pantalaimon = runTest ./matrix/pantalaimon.nix; + pantheon = runTest ./pantheon.nix; + pantheon-wayland = runTest ./pantheon-wayland.nix; + paperless = runTest ./paperless.nix; parsedmarc = handleTest ./parsedmarc { }; - password-option-override-ordering = handleTest ./password-option-override-ordering.nix { }; + password-option-override-ordering = runTest ./password-option-override-ordering.nix; pdns-recursor = runTest ./pdns-recursor.nix; - pds = handleTest ./pds.nix { }; - peerflix = handleTest ./peerflix.nix { }; - peering-manager = handleTest ./web-apps/peering-manager.nix { }; + pds = runTest ./pds.nix; + peerflix = runTest ./peerflix.nix; + peering-manager = runTest ./web-apps/peering-manager.nix; peertube = handleTestOn [ "x86_64-linux" ] ./web-apps/peertube.nix { }; - peroxide = handleTest ./peroxide.nix { }; + peroxide = runTest ./peroxide.nix; pgadmin4 = runTest ./pgadmin4.nix; pgbackrest = import ./pgbackrest { inherit runTest; }; - pgbouncer = handleTest ./pgbouncer.nix { }; + pgbouncer = runTest ./pgbouncer.nix; pghero = runTest ./pghero.nix; pgweb = runTest ./pgweb.nix; - pgmanage = handleTest ./pgmanage.nix { }; - phosh = handleTest ./phosh.nix { }; - photonvision = handleTest ./photonvision.nix { }; - photoprism = handleTest ./photoprism.nix { }; + pgmanage = runTest ./pgmanage.nix; + phosh = runTest ./phosh.nix; + photonvision = runTest ./photonvision.nix; + photoprism = runTest ./photoprism.nix; php = import ./php/default.nix { inherit runTest; php = pkgs.php; @@ -1057,43 +1057,43 @@ in inherit runTest; php = pkgs.php84; }; - phylactery = handleTest ./web-apps/phylactery.nix { }; - pict-rs = handleTest ./pict-rs.nix { }; - pingvin-share = handleTest ./pingvin-share.nix { }; + phylactery = runTest ./web-apps/phylactery.nix; + pict-rs = runTest ./pict-rs.nix; + pingvin-share = runTest ./pingvin-share.nix; pinnwand = runTest ./pinnwand.nix; - plantuml-server = handleTest ./plantuml-server.nix { }; - plasma-bigscreen = handleTest ./plasma-bigscreen.nix { }; - plasma5 = handleTest ./plasma5.nix { }; - plasma6 = handleTest ./plasma6.nix { }; - plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix { }; - plausible = handleTest ./plausible.nix { }; - playwright-python = handleTest ./playwright-python.nix { }; - please = handleTest ./please.nix { }; + plantuml-server = runTest ./plantuml-server.nix; + plasma-bigscreen = runTest ./plasma-bigscreen.nix; + plasma5 = runTest ./plasma5.nix; + plasma6 = runTest ./plasma6.nix; + plasma5-systemd-start = runTest ./plasma5-systemd-start.nix; + plausible = runTest ./plausible.nix; + playwright-python = runTest ./playwright-python.nix; + please = runTest ./please.nix; pleroma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./pleroma.nix { }; - plikd = handleTest ./plikd.nix { }; - plotinus = handleTest ./plotinus.nix { }; - pocket-id = handleTest ./pocket-id.nix { }; - podgrab = handleTest ./podgrab.nix { }; + plikd = runTest ./plikd.nix; + plotinus = runTest ./plotinus.nix; + pocket-id = runTest ./pocket-id.nix; + podgrab = runTest ./podgrab.nix; podman = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./podman/default.nix { }; podman-tls-ghostunnel = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./podman/tls-ghostunnel.nix { }; - polaris = handleTest ./polaris.nix { }; + polaris = runTest ./polaris.nix; pomerium = handleTestOn [ "x86_64-linux" ] ./pomerium.nix { }; - portunus = handleTest ./portunus.nix { }; + portunus = runTest ./portunus.nix; postfix = handleTest ./postfix.nix { }; postfix-raise-smtpd-tls-security-level = handleTest ./postfix-raise-smtpd-tls-security-level.nix { }; - postfixadmin = handleTest ./postfixadmin.nix { }; + postfixadmin = runTest ./postfixadmin.nix; postgres-websockets = runTest ./postgres-websockets.nix; postgresql = handleTest ./postgresql { }; postgrest = runTest ./postgrest.nix; - powerdns = handleTest ./powerdns.nix { }; + powerdns = runTest ./powerdns.nix; powerdns-admin = handleTest ./powerdns-admin.nix { }; - power-profiles-daemon = handleTest ./power-profiles-daemon.nix { }; - pppd = handleTest ./pppd.nix { }; + power-profiles-daemon = runTest ./power-profiles-daemon.nix; + pppd = runTest ./pppd.nix; predictable-interface-names = handleTest ./predictable-interface-names.nix { }; pretalx = runTest ./web-apps/pretalx.nix; prefect = runTest ./prefect.nix; @@ -1118,19 +1118,19 @@ in _module.args.socket = false; _module.args.listenTcp = false; }; - private-gpt = handleTest ./private-gpt.nix { }; + private-gpt = runTest ./private-gpt.nix; privatebin = runTest ./privatebin.nix; - privoxy = handleTest ./privoxy.nix { }; + privoxy = runTest ./privoxy.nix; prometheus = import ./prometheus { inherit runTest; }; prometheus-exporters = handleTest ./prometheus-exporters.nix { }; prosody = handleTest ./xmpp/prosody.nix { }; prosody-mysql = handleTest ./xmpp/prosody-mysql.nix { }; - proxy = handleTest ./proxy.nix { }; + proxy = runTest ./proxy.nix; prowlarr = runTest ./prowlarr.nix; - pt2-clone = handleTest ./pt2-clone.nix { }; - pykms = handleTest ./pykms.nix { }; - public-inbox = handleTest ./public-inbox.nix { }; - pufferpanel = handleTest ./pufferpanel.nix { }; + pt2-clone = runTest ./pt2-clone.nix; + pykms = runTest ./pykms.nix; + public-inbox = runTest ./public-inbox.nix; + pufferpanel = runTest ./pufferpanel.nix; pulseaudio = discoverTests (import ./pulseaudio.nix); qboot = handleTestOn [ "x86_64-linux" "i686-linux" ] ./qboot.nix { }; qemu-vm-restrictnetwork = handleTest ./qemu-vm-restrictnetwork.nix { }; @@ -1139,130 +1139,130 @@ in qemu-vm-store = runTest ./qemu-vm-store.nix; qgis = handleTest ./qgis.nix { package = pkgs.qgis; }; qgis-ltr = handleTest ./qgis.nix { package = pkgs.qgis-ltr; }; - qownnotes = handleTest ./qownnotes.nix { }; + qownnotes = runTest ./qownnotes.nix; qtile = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./qtile/default.nix; - quake3 = handleTest ./quake3.nix { }; - quicktun = handleTest ./quicktun.nix { }; - quickwit = handleTest ./quickwit.nix { }; - quorum = handleTest ./quorum.nix { }; - rabbitmq = handleTest ./rabbitmq.nix { }; - radarr = handleTest ./radarr.nix { }; - radicale = handleTest ./radicale.nix { }; + quake3 = runTest ./quake3.nix; + quicktun = runTest ./quicktun.nix; + quickwit = runTest ./quickwit.nix; + quorum = runTest ./quorum.nix; + rabbitmq = runTest ./rabbitmq.nix; + radarr = runTest ./radarr.nix; + radicale = runTest ./radicale.nix; radicle = runTest ./radicle.nix; ragnarwm = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./ragnarwm.nix; - rasdaemon = handleTest ./rasdaemon.nix { }; + rasdaemon = runTest ./rasdaemon.nix; rathole = runTest ./rathole.nix; - readarr = handleTest ./readarr.nix { }; - realm = handleTest ./realm.nix { }; + readarr = runTest ./readarr.nix; + realm = runTest ./realm.nix; readeck = runTest ./readeck.nix; rebuilderd = runTest ./rebuilderd.nix; redis = handleTest ./redis.nix { }; - redlib = handleTest ./redlib.nix { }; + redlib = runTest ./redlib.nix; redmine = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./redmine.nix { }; - renovate = handleTest ./renovate.nix { }; + renovate = runTest ./renovate.nix; replace-dependencies = handleTest ./replace-dependencies { }; reposilite = runTest ./reposilite.nix; - restartByActivationScript = handleTest ./restart-by-activation-script.nix { }; - restic-rest-server = handleTest ./restic-rest-server.nix { }; - restic = handleTest ./restic.nix { }; - retroarch = handleTest ./retroarch.nix { }; + restartByActivationScript = runTest ./restart-by-activation-script.nix; + restic-rest-server = runTest ./restic-rest-server.nix; + restic = runTest ./restic.nix; + retroarch = runTest ./retroarch.nix; rke2 = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./rke2 { }; rkvm = handleTest ./rkvm { }; rmfakecloud = runTest ./rmfakecloud.nix; - robustirc-bridge = handleTest ./robustirc-bridge.nix { }; - roundcube = handleTest ./roundcube.nix { }; + robustirc-bridge = runTest ./robustirc-bridge.nix; + rosenpass = runTest ./rosenpass.nix; + roundcube = runTest ./roundcube.nix; routinator = handleTest ./routinator.nix { }; - rosenpass = handleTest ./rosenpass.nix { }; rshim = handleTest ./rshim.nix { }; rspamd = handleTest ./rspamd.nix { }; - rspamd-trainer = handleTest ./rspamd-trainer.nix { }; + rspamd-trainer = runTest ./rspamd-trainer.nix; rss-bridge = handleTest ./web-apps/rss-bridge { }; rss2email = handleTest ./rss2email.nix { }; - rstudio-server = handleTest ./rstudio-server.nix { }; - rsyncd = handleTest ./rsyncd.nix { }; + rstudio-server = runTest ./rstudio-server.nix; + rsyncd = runTest ./rsyncd.nix; rsyslogd = handleTest ./rsyslogd.nix { }; rtkit = runTest ./rtkit.nix; - rtorrent = handleTest ./rtorrent.nix { }; + rtorrent = runTest ./rtorrent.nix; rush = runTest ./rush.nix; - rustls-libssl = handleTest ./rustls-libssl.nix { }; - rxe = handleTest ./rxe.nix { }; - sabnzbd = handleTest ./sabnzbd.nix { }; + rustls-libssl = runTest ./rustls-libssl.nix; + rxe = runTest ./rxe.nix; + sabnzbd = runTest ./sabnzbd.nix; samba = runTest ./samba.nix; - samba-wsdd = handleTest ./samba-wsdd.nix { }; - sane = handleTest ./sane.nix { }; - sanoid = handleTest ./sanoid.nix { }; - saunafs = handleTest ./saunafs.nix { }; + samba-wsdd = runTest ./samba-wsdd.nix; + sane = runTest ./sane.nix; + sanoid = runTest ./sanoid.nix; + saunafs = runTest ./saunafs.nix; scaphandre = handleTest ./scaphandre.nix { }; schleuder = handleTest ./schleuder.nix { }; scion-freestanding-deployment = handleTest ./scion/freestanding-deployment { }; scrutiny = runTest ./scrutiny.nix; scx = runTest ./scx/default.nix; sddm = handleTest ./sddm.nix { }; - sdl3 = handleTest ./sdl3.nix { }; - seafile = handleTest ./seafile.nix { }; + sdl3 = runTest ./sdl3.nix; + seafile = runTest ./seafile.nix; searx = runTest ./searx.nix; - seatd = handleTest ./seatd.nix { }; + seatd = runTest ./seatd.nix; send = runTest ./send.nix; - service-runner = handleTest ./service-runner.nix { }; + service-runner = runTest ./service-runner.nix; servo = runTest ./servo.nix; shadps4 = runTest ./shadps4.nix; sftpgo = runTest ./sftpgo.nix; - sfxr-qt = handleTest ./sfxr-qt.nix { }; - sgt-puzzles = handleTest ./sgt-puzzles.nix { }; - shadow = handleTest ./shadow.nix { }; + sfxr-qt = runTest ./sfxr-qt.nix; + sgt-puzzles = runTest ./sgt-puzzles.nix; + shadow = runTest ./shadow.nix; shadowsocks = handleTest ./shadowsocks { }; - shattered-pixel-dungeon = handleTest ./shattered-pixel-dungeon.nix { }; - shiori = handleTest ./shiori.nix { }; + shattered-pixel-dungeon = runTest ./shattered-pixel-dungeon.nix; + shiori = runTest ./shiori.nix; signal-desktop = runTest ./signal-desktop.nix; - silverbullet = handleTest ./silverbullet.nix { }; - simple = handleTest ./simple.nix { }; - sing-box = handleTest ./sing-box.nix { }; - slimserver = handleTest ./slimserver.nix { }; - slurm = handleTest ./slurm.nix { }; - snmpd = handleTest ./snmpd.nix { }; - smokeping = handleTest ./smokeping.nix { }; + silverbullet = runTest ./silverbullet.nix; + simple = runTest ./simple.nix; + sing-box = runTest ./sing-box.nix; + slimserver = runTest ./slimserver.nix; + slurm = runTest ./slurm.nix; + snmpd = runTest ./snmpd.nix; + smokeping = runTest ./smokeping.nix; snapcast = runTest ./snapcast.nix; - snapper = handleTest ./snapper.nix { }; + snapper = runTest ./snapper.nix; snipe-it = runTest ./web-apps/snipe-it.nix; - soapui = handleTest ./soapui.nix { }; - soft-serve = handleTest ./soft-serve.nix { }; - sogo = handleTest ./sogo.nix { }; - soju = handleTest ./soju.nix { }; - solanum = handleTest ./solanum.nix { }; - sonarr = handleTest ./sonarr.nix { }; - sonic-server = handleTest ./sonic-server.nix { }; + soapui = runTest ./soapui.nix; + soft-serve = runTest ./soft-serve.nix; + sogo = runTest ./sogo.nix; + soju = runTest ./soju.nix; + solanum = runTest ./solanum.nix; + sonarr = runTest ./sonarr.nix; + sonic-server = runTest ./sonic-server.nix; sourcehut = handleTest ./sourcehut { }; - spacecookie = handleTest ./spacecookie.nix { }; + spacecookie = runTest ./spacecookie.nix; spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark { }; spiped = runTest ./spiped.nix; - sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix { }; - squid = handleTest ./squid.nix { }; + sqlite3-to-mysql = runTest ./sqlite3-to-mysql.nix; + squid = runTest ./squid.nix; sslh = handleTest ./sslh.nix { }; - ssh-agent-auth = handleTest ./ssh-agent-auth.nix { }; - ssh-audit = handleTest ./ssh-audit.nix { }; + ssh-agent-auth = runTest ./ssh-agent-auth.nix; + ssh-audit = runTest ./ssh-audit.nix; sssd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd.nix { }; sssd-ldap = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd-ldap.nix { }; - stalwart-mail = handleTest ./stalwart-mail.nix { }; + stalwart-mail = runTest ./stalwart-mail.nix; stargazer = runTest ./web-servers/stargazer.nix; starship = runTest ./starship.nix; stash = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./stash.nix { }; static-web-server = runTest ./web-servers/static-web-server.nix; step-ca = handleTestOn [ "x86_64-linux" ] ./step-ca.nix { }; stratis = handleTest ./stratis { }; - strongswan-swanctl = handleTest ./strongswan-swanctl.nix { }; + strongswan-swanctl = runTest ./strongswan-swanctl.nix; stub-ld = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./stub-ld.nix { }; stunnel = handleTest ./stunnel.nix { }; - sudo = handleTest ./sudo.nix { }; + sudo = runTest ./sudo.nix; sudo-rs = runTest ./sudo-rs.nix; - sunshine = handleTest ./sunshine.nix { }; - suricata = handleTest ./suricata.nix { }; + sunshine = runTest ./sunshine.nix; + suricata = runTest ./suricata.nix; suwayomi-server = handleTest ./suwayomi-server.nix { }; - swap-file-btrfs = handleTest ./swap-file-btrfs.nix { }; - swap-partition = handleTest ./swap-partition.nix { }; - swap-random-encryption = handleTest ./swap-random-encryption.nix { }; + swap-file-btrfs = runTest ./swap-file-btrfs.nix; + swap-partition = runTest ./swap-partition.nix; + swap-random-encryption = runTest ./swap-random-encryption.nix; swapspace = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./swapspace.nix { }; - sway = handleTest ./sway.nix { }; - swayfx = handleTest ./swayfx.nix { }; + sway = runTest ./sway.nix; + swayfx = runTest ./swayfx.nix; switchTest = runTest { imports = [ ./switch-test.nix ]; defaults.system.switch.enableNg = false; @@ -1271,160 +1271,160 @@ in imports = [ ./switch-test.nix ]; defaults.system.switch.enableNg = true; }; - sx = handleTest ./sx.nix { }; - sympa = handleTest ./sympa.nix { }; - syncthing = handleTest ./syncthing.nix { }; - syncthing-no-settings = handleTest ./syncthing-no-settings.nix { }; - syncthing-init = handleTest ./syncthing-init.nix { }; - syncthing-many-devices = handleTest ./syncthing-many-devices.nix { }; + sx = runTest ./sx.nix; + sympa = runTest ./sympa.nix; + syncthing = runTest ./syncthing.nix; + syncthing-no-settings = runTest ./syncthing-no-settings.nix; + syncthing-init = runTest ./syncthing-init.nix; + syncthing-many-devices = runTest ./syncthing-many-devices.nix; syncthing-folders = runTest ./syncthing-folders.nix; - syncthing-relay = handleTest ./syncthing-relay.nix { }; + syncthing-relay = runTest ./syncthing-relay.nix; sysinit-reactivation = runTest ./sysinit-reactivation.nix; - systemd = handleTest ./systemd.nix { }; - systemd-analyze = handleTest ./systemd-analyze.nix { }; + systemd = runTest ./systemd.nix; + systemd-analyze = runTest ./systemd-analyze.nix; systemd-binfmt = handleTestOn [ "x86_64-linux" ] ./systemd-binfmt.nix { }; systemd-boot = handleTest ./systemd-boot.nix { }; - systemd-bpf = handleTest ./systemd-bpf.nix { }; + systemd-bpf = runTest ./systemd-bpf.nix; systemd-confinement = handleTest ./systemd-confinement { }; - systemd-coredump = handleTest ./systemd-coredump.nix { }; - systemd-cryptenroll = handleTest ./systemd-cryptenroll.nix { }; - systemd-credentials-tpm2 = handleTest ./systemd-credentials-tpm2.nix { }; - systemd-escaping = handleTest ./systemd-escaping.nix { }; - systemd-initrd-bridge = handleTest ./systemd-initrd-bridge.nix { }; - systemd-initrd-btrfs-raid = handleTest ./systemd-initrd-btrfs-raid.nix { }; - systemd-initrd-luks-fido2 = handleTest ./systemd-initrd-luks-fido2.nix { }; - systemd-initrd-luks-keyfile = handleTest ./systemd-initrd-luks-keyfile.nix { }; + systemd-coredump = runTest ./systemd-coredump.nix; + systemd-cryptenroll = runTest ./systemd-cryptenroll.nix; + systemd-credentials-tpm2 = runTest ./systemd-credentials-tpm2.nix; + systemd-escaping = runTest ./systemd-escaping.nix; + systemd-initrd-bridge = runTest ./systemd-initrd-bridge.nix; + systemd-initrd-btrfs-raid = runTest ./systemd-initrd-btrfs-raid.nix; + systemd-initrd-luks-fido2 = runTest ./systemd-initrd-luks-fido2.nix; + systemd-initrd-luks-keyfile = runTest ./systemd-initrd-luks-keyfile.nix; systemd-initrd-luks-empty-passphrase = handleTest ./initrd-luks-empty-passphrase.nix { systemdStage1 = true; }; - systemd-initrd-luks-password = handleTest ./systemd-initrd-luks-password.nix { }; - systemd-initrd-luks-tpm2 = handleTest ./systemd-initrd-luks-tpm2.nix { }; - systemd-initrd-luks-unl0kr = handleTest ./systemd-initrd-luks-unl0kr.nix { }; - systemd-initrd-modprobe = handleTest ./systemd-initrd-modprobe.nix { }; - systemd-initrd-shutdown = handleTest ./systemd-shutdown.nix { systemdStage1 = true; }; - systemd-initrd-simple = runTest ./systemd-initrd-simple.nix; - systemd-initrd-swraid = handleTest ./systemd-initrd-swraid.nix { }; - systemd-initrd-vconsole = handleTest ./systemd-initrd-vconsole.nix { }; + systemd-initrd-luks-password = runTest ./systemd-initrd-luks-password.nix; + systemd-initrd-luks-tpm2 = runTest ./systemd-initrd-luks-tpm2.nix; + systemd-initrd-luks-unl0kr = runTest ./systemd-initrd-luks-unl0kr.nix; + systemd-initrd-modprobe = runTest ./systemd-initrd-modprobe.nix; systemd-initrd-networkd = handleTest ./systemd-initrd-networkd.nix { }; - systemd-initrd-networkd-ssh = handleTest ./systemd-initrd-networkd-ssh.nix { }; + systemd-initrd-networkd-ssh = runTest ./systemd-initrd-networkd-ssh.nix; systemd-initrd-networkd-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn { systemdStage1 = true; }; - systemd-initrd-vlan = handleTest ./systemd-initrd-vlan.nix { }; - systemd-journal = handleTest ./systemd-journal.nix { }; - systemd-journal-gateway = handleTest ./systemd-journal-gateway.nix { }; - systemd-journal-upload = handleTest ./systemd-journal-upload.nix { }; + systemd-initrd-shutdown = handleTest ./systemd-shutdown.nix { systemdStage1 = true; }; + systemd-initrd-simple = runTest ./systemd-initrd-simple.nix; + systemd-initrd-swraid = runTest ./systemd-initrd-swraid.nix; + systemd-initrd-vconsole = runTest ./systemd-initrd-vconsole.nix; + systemd-initrd-vlan = runTest ./systemd-initrd-vlan.nix; + systemd-journal = runTest ./systemd-journal.nix; + systemd-journal-gateway = runTest ./systemd-journal-gateway.nix; + systemd-journal-upload = runTest ./systemd-journal-upload.nix; systemd-lock-handler = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./systemd-lock-handler.nix; - systemd-machinectl = handleTest ./systemd-machinectl.nix { }; - systemd-networkd = handleTest ./systemd-networkd.nix { }; - systemd-networkd-bridge = handleTest ./systemd-networkd-bridge.nix { }; - systemd-networkd-dhcpserver = handleTest ./systemd-networkd-dhcpserver.nix { }; + systemd-machinectl = runTest ./systemd-machinectl.nix; + systemd-networkd = runTest ./systemd-networkd.nix; + systemd-networkd-bridge = runTest ./systemd-networkd-bridge.nix; + systemd-networkd-dhcpserver = runTest ./systemd-networkd-dhcpserver.nix; systemd-networkd-dhcpserver-static-leases = handleTest ./systemd-networkd-dhcpserver-static-leases.nix { }; systemd-networkd-ipv6-prefix-delegation = handleTest ./systemd-networkd-ipv6-prefix-delegation.nix { }; - systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix { }; - systemd-no-tainted = handleTest ./systemd-no-tainted.nix { }; - systemd-nspawn = handleTest ./systemd-nspawn.nix { }; - systemd-nspawn-configfile = handleTest ./systemd-nspawn-configfile.nix { }; - systemd-oomd = handleTest ./systemd-oomd.nix { }; - systemd-portabled = handleTest ./systemd-portabled.nix { }; + systemd-networkd-vrf = runTest ./systemd-networkd-vrf.nix; + systemd-no-tainted = runTest ./systemd-no-tainted.nix; + systemd-nspawn = runTest ./systemd-nspawn.nix; + systemd-nspawn-configfile = runTest ./systemd-nspawn-configfile.nix; + systemd-oomd = runTest ./systemd-oomd.nix; + systemd-portabled = runTest ./systemd-portabled.nix; systemd-repart = handleTest ./systemd-repart.nix { }; - systemd-resolved = handleTest ./systemd-resolved.nix { }; + systemd-resolved = runTest ./systemd-resolved.nix; systemd-ssh-proxy = runTest ./systemd-ssh-proxy.nix; - systemd-shutdown = handleTest ./systemd-shutdown.nix { }; + systemd-shutdown = runTest ./systemd-shutdown.nix; systemd-sysupdate = runTest ./systemd-sysupdate.nix; systemd-sysusers-mutable = runTest ./systemd-sysusers-mutable.nix; systemd-sysusers-immutable = runTest ./systemd-sysusers-immutable.nix; systemd-sysusers-password-option-override-ordering = runTest ./systemd-sysusers-password-option-override-ordering.nix; - systemd-timesyncd = handleTest ./systemd-timesyncd.nix { }; - systemd-timesyncd-nscd-dnssec = handleTest ./systemd-timesyncd-nscd-dnssec.nix { }; - systemd-user-linger = handleTest ./systemd-user-linger.nix { }; - systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix { }; - systemd-misc = handleTest ./systemd-misc.nix { }; - systemd-userdbd = handleTest ./systemd-userdbd.nix { }; - systemd-homed = handleTest ./systemd-homed.nix { }; + systemd-timesyncd = runTest ./systemd-timesyncd.nix; + systemd-timesyncd-nscd-dnssec = runTest ./systemd-timesyncd-nscd-dnssec.nix; + systemd-user-linger = runTest ./systemd-user-linger.nix; + systemd-user-tmpfiles-rules = runTest ./systemd-user-tmpfiles-rules.nix; + systemd-misc = runTest ./systemd-misc.nix; + systemd-userdbd = runTest ./systemd-userdbd.nix; + systemd-homed = runTest ./systemd-homed.nix; systemtap = handleTest ./systemtap.nix { }; startx = import ./startx.nix { inherit pkgs runTest; }; taler = handleTest ./taler { }; - tandoor-recipes = handleTest ./tandoor-recipes.nix { }; - tandoor-recipes-script-name = handleTest ./tandoor-recipes-script-name.nix { }; - tang = handleTest ./tang.nix { }; - taskserver = handleTest ./taskserver.nix { }; - taskchampion-sync-server = handleTest ./taskchampion-sync-server.nix { }; - tayga = handleTest ./tayga.nix { }; - technitium-dns-server = handleTest ./technitium-dns-server.nix { }; - teeworlds = handleTest ./teeworlds.nix { }; + tandoor-recipes = runTest ./tandoor-recipes.nix; + tandoor-recipes-script-name = runTest ./tandoor-recipes-script-name.nix; + tang = runTest ./tang.nix; + taskserver = runTest ./taskserver.nix; + taskchampion-sync-server = runTest ./taskchampion-sync-server.nix; + tayga = runTest ./tayga.nix; + technitium-dns-server = runTest ./technitium-dns-server.nix; + teeworlds = runTest ./teeworlds.nix; telegraf = runTest ./telegraf.nix; teleport = handleTest ./teleport.nix { }; teleports = runTest ./teleports.nix; thelounge = handleTest ./thelounge.nix { }; terminal-emulators = handleTest ./terminal-emulators.nix { }; thanos = handleTest ./thanos.nix { }; - tiddlywiki = handleTest ./tiddlywiki.nix { }; + tiddlywiki = runTest ./tiddlywiki.nix; tigervnc = handleTest ./tigervnc.nix { }; tika = runTest ./tika.nix; - timezone = handleTest ./timezone.nix { }; + timezone = runTest ./timezone.nix; timidity = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./timidity { }; tinc = handleTest ./tinc { }; - tinydns = handleTest ./tinydns.nix { }; - tinyproxy = handleTest ./tinyproxy.nix { }; - tinywl = handleTest ./tinywl.nix { }; - tmate-ssh-server = handleTest ./tmate-ssh-server.nix { }; - tomcat = handleTest ./tomcat.nix { }; - tor = handleTest ./tor.nix { }; + tinydns = runTest ./tinydns.nix; + tinyproxy = runTest ./tinyproxy.nix; + tinywl = runTest ./tinywl.nix; + tmate-ssh-server = runTest ./tmate-ssh-server.nix; + tomcat = runTest ./tomcat.nix; + tor = runTest ./tor.nix; tpm-ek = handleTest ./tpm-ek { }; traefik = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./traefik.nix; - trafficserver = handleTest ./trafficserver.nix { }; - transfer-sh = handleTest ./transfer-sh.nix { }; + trafficserver = runTest ./trafficserver.nix; + transfer-sh = runTest ./transfer-sh.nix; transmission_3 = handleTest ./transmission.nix { transmission = pkgs.transmission_3; }; transmission_4 = handleTest ./transmission.nix { transmission = pkgs.transmission_4; }; # tracee requires bpf tracee = handleTestOn [ "x86_64-linux" ] ./tracee.nix { }; - trezord = handleTest ./trezord.nix { }; - trickster = handleTest ./trickster.nix { }; + trezord = runTest ./trezord.nix; + trickster = runTest ./trickster.nix; trilium-server = handleTestOn [ "x86_64-linux" ] ./trilium-server.nix { }; - tsm-client-gui = handleTest ./tsm-client-gui.nix { }; - ttyd = handleTest ./web-servers/ttyd.nix { }; - tt-rss = handleTest ./web-apps/tt-rss.nix { }; - txredisapi = handleTest ./txredisapi.nix { }; - tuptime = handleTest ./tuptime.nix { }; - turbovnc-headless-server = handleTest ./turbovnc-headless-server.nix { }; - turn-rs = handleTest ./turn-rs.nix { }; + tsm-client-gui = runTest ./tsm-client-gui.nix; + ttyd = runTest ./web-servers/ttyd.nix; + tt-rss = runTest ./web-apps/tt-rss.nix; + txredisapi = runTest ./txredisapi.nix; + tuptime = runTest ./tuptime.nix; + turbovnc-headless-server = runTest ./turbovnc-headless-server.nix; + turn-rs = runTest ./turn-rs.nix; tusd = runTest ./tusd/default.nix; tuxguitar = runTest ./tuxguitar.nix; twingate = runTest ./twingate.nix; - typesense = handleTest ./typesense.nix { }; + typesense = runTest ./typesense.nix; tzupdate = runTest ./tzupdate.nix; - ucarp = handleTest ./ucarp.nix { }; - udisks2 = handleTest ./udisks2.nix { }; - ulogd = handleTest ./ulogd/ulogd.nix { }; - umurmur = handleTest ./umurmur.nix { }; - unbound = handleTest ./unbound.nix { }; + ucarp = runTest ./ucarp.nix; + udisks2 = runTest ./udisks2.nix; + ulogd = runTest ./ulogd/ulogd.nix; + umurmur = runTest ./umurmur.nix; + unbound = runTest ./unbound.nix; unifi = runTest ./unifi.nix; unit-php = runTest ./web-servers/unit-php.nix; - unit-perl = handleTest ./web-servers/unit-perl.nix { }; + unit-perl = runTest ./web-servers/unit-perl.nix; upnp.iptables = handleTest ./upnp.nix { useNftables = false; }; upnp.nftables = handleTest ./upnp.nix { useNftables = true; }; - uptermd = handleTest ./uptermd.nix { }; - uptime-kuma = handleTest ./uptime-kuma.nix { }; - urn-timer = handleTest ./urn-timer.nix { }; - usbguard = handleTest ./usbguard.nix { }; + uptermd = runTest ./uptermd.nix; + uptime-kuma = runTest ./uptime-kuma.nix; + urn-timer = runTest ./urn-timer.nix; + usbguard = runTest ./usbguard.nix; userborn = runTest ./userborn.nix; userborn-mutable-users = runTest ./userborn-mutable-users.nix; userborn-immutable-users = runTest ./userborn-immutable-users.nix; userborn-mutable-etc = runTest ./userborn-mutable-etc.nix; userborn-immutable-etc = runTest ./userborn-immutable-etc.nix; - user-activation-scripts = handleTest ./user-activation-scripts.nix { }; + user-activation-scripts = runTest ./user-activation-scripts.nix; user-enable-option = runTest ./user-enable-option.nix; user-expiry = runTest ./user-expiry.nix; - user-home-mode = handleTest ./user-home-mode.nix { }; - ustreamer = handleTest ./ustreamer.nix { }; - uwsgi = handleTest ./uwsgi.nix { }; - v2ray = handleTest ./v2ray.nix { }; + user-home-mode = runTest ./user-home-mode.nix; + ustreamer = runTest ./ustreamer.nix; + uwsgi = runTest ./uwsgi.nix; + v2ray = runTest ./v2ray.nix; varnish60 = runTest { imports = [ ./varnish.nix ]; _module.args.package = pkgs.varnish60; @@ -1433,43 +1433,43 @@ in imports = [ ./varnish.nix ]; _module.args.package = pkgs.varnish77; }; - vault = handleTest ./vault.nix { }; - vault-agent = handleTest ./vault-agent.nix { }; - vault-dev = handleTest ./vault-dev.nix { }; - vault-postgresql = handleTest ./vault-postgresql.nix { }; + vault = runTest ./vault.nix; + vault-agent = runTest ./vault-agent.nix; + vault-dev = runTest ./vault-dev.nix; + vault-postgresql = runTest ./vault-postgresql.nix; vaultwarden = discoverTests (import ./vaultwarden.nix); - vdirsyncer = handleTest ./vdirsyncer.nix { }; + vdirsyncer = runTest ./vdirsyncer.nix; vector = handleTest ./vector { }; velocity = runTest ./velocity.nix; - vengi-tools = handleTest ./vengi-tools.nix { }; + vengi-tools = runTest ./vengi-tools.nix; victoriametrics = handleTest ./victoriametrics { }; - vikunja = handleTest ./vikunja.nix { }; + vikunja = runTest ./vikunja.nix; virtualbox = handleTestOn [ "x86_64-linux" ] ./virtualbox.nix { }; vm-variant = handleTest ./vm-variant.nix { }; vscode-remote-ssh = handleTestOn [ "x86_64-linux" ] ./vscode-remote-ssh.nix { }; vscodium = discoverTests (import ./vscodium.nix); - vsftpd = handleTest ./vsftpd.nix { }; - waagent = handleTest ./waagent.nix { }; + vsftpd = runTest ./vsftpd.nix; + waagent = runTest ./waagent.nix; wakapi = runTest ./wakapi.nix; - warzone2100 = handleTest ./warzone2100.nix { }; - wasabibackend = handleTest ./wasabibackend.nix { }; + warzone2100 = runTest ./warzone2100.nix; + wasabibackend = runTest ./wasabibackend.nix; wastebin = runTest ./wastebin.nix; - watchdogd = handleTest ./watchdogd.nix { }; + watchdogd = runTest ./watchdogd.nix; webhook = runTest ./webhook.nix; - weblate = handleTest ./web-apps/weblate.nix { }; - whisparr = handleTest ./whisparr.nix { }; + weblate = runTest ./web-apps/weblate.nix; + whisparr = runTest ./whisparr.nix; whoami = runTest ./whoami.nix; - whoogle-search = handleTest ./whoogle-search.nix { }; + whoogle-search = runTest ./whoogle-search.nix; wiki-js = runTest ./wiki-js.nix; wine = handleTest ./wine.nix { }; wireguard = handleTest ./wireguard { }; - wg-access-server = handleTest ./wg-access-server.nix { }; - without-nix = handleTest ./without-nix.nix { }; - wmderland = handleTest ./wmderland.nix { }; - workout-tracker = handleTest ./workout-tracker.nix { }; + wg-access-server = runTest ./wg-access-server.nix; + without-nix = runTest ./without-nix.nix; + wmderland = runTest ./wmderland.nix; + workout-tracker = runTest ./workout-tracker.nix; wpa_supplicant = import ./wpa_supplicant.nix { inherit pkgs runTest; }; wordpress = runTest ./wordpress.nix; - wrappers = handleTest ./wrappers.nix { }; + wrappers = runTest ./wrappers.nix; writefreely = import ./web-apps/writefreely.nix { inherit pkgs runTest; }; wstunnel = runTest ./wstunnel.nix; xandikos = runTest ./xandikos.nix; diff --git a/nixos/tests/ax25.nix b/nixos/tests/ax25.nix index f1092d5de101..df4e3de04cb1 100644 --- a/nixos/tests/ax25.nix +++ b/nixos/tests/ax25.nix @@ -1,131 +1,128 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let +{ pkgs, lib, ... }: - baud = 57600; - tty = "/dev/ttyACM0"; - port = "tnc0"; - socatPort = 1234; +let + baud = 57600; + tty = "/dev/ttyACM0"; + port = "tnc0"; + socatPort = 1234; - createAX25Node = nodeId: { + createAX25Node = nodeId: { + boot.kernelPackages = pkgs.linuxPackages_ham; + boot.kernelModules = [ "ax25" ]; - boot.kernelPackages = pkgs.linuxPackages_ham; - boot.kernelModules = [ "ax25" ]; + networking.firewall.allowedTCPPorts = [ socatPort ]; - networking.firewall.allowedTCPPorts = [ socatPort ]; + environment.systemPackages = with pkgs; [ + libax25 + ax25-tools + ax25-apps + socat + ]; - environment.systemPackages = with pkgs; [ - libax25 - ax25-tools - ax25-apps - socat + services.ax25.axports."${port}" = { + inherit baud tty; + enable = true; + callsign = "NOCALL-${toString nodeId}"; + description = "mocked tnc"; + }; + + services.ax25.axlisten = { + enable = true; + }; + + # All mocks radios will connect back to socat-broker on node 1 in order to get + # all messages that are "broadcasted over the ether" + systemd.services.ax25-mock-hardware = { + description = "mock AX.25 TNC and Radio"; + wantedBy = [ "default.target" ]; + before = [ + "ax25-kissattach-${port}.service" + "axlisten.service" ]; - - services.ax25.axports."${port}" = { - inherit baud tty; - enable = true; - callsign = "NOCALL-${toString nodeId}"; - description = "mocked tnc"; - }; - - services.ax25.axlisten = { - enable = true; - }; - - # All mocks radios will connect back to socat-broker on node 1 in order to get - # all messages that are "broadcasted over the ether" - systemd.services.ax25-mock-hardware = { - description = "mock AX.25 TNC and Radio"; - wantedBy = [ "default.target" ]; - before = [ - "ax25-kissattach-${port}.service" - "axlisten.service" - ]; - after = [ "network.target" ]; - serviceConfig = { - Type = "exec"; - ExecStart = "${pkgs.socat}/bin/socat -d -d tcp:192.168.1.1:${toString socatPort} pty,link=${tty},b${toString baud},raw"; - }; + after = [ "network.target" ]; + serviceConfig = { + Type = "exec"; + ExecStart = "${pkgs.socat}/bin/socat -d -d tcp:192.168.1.1:${toString socatPort} pty,link=${tty},b${toString baud},raw"; }; }; - in - { - name = "ax25Simple"; - nodes = { - node1 = lib.mkMerge [ - (createAX25Node 1) - # mimicking radios on the same frequency - { - systemd.services.ax25-mock-ether = { - description = "mock radio ether"; - wantedBy = [ "default.target" ]; - requires = [ "network.target" ]; - before = [ "ax25-mock-hardware.service" ]; - # broken needs access to "ss" or "netstat" - path = [ pkgs.iproute2 ]; - serviceConfig = { - Type = "exec"; - ExecStart = "${pkgs.socat}/bin/socat-broker.sh tcp4-listen:${toString socatPort}"; - }; - postStart = "${pkgs.coreutils}/bin/sleep 2"; + }; +in +{ + name = "ax25Simple"; + nodes = { + node1 = lib.mkMerge [ + (createAX25Node 1) + # mimicking radios on the same frequency + { + systemd.services.ax25-mock-ether = { + description = "mock radio ether"; + wantedBy = [ "default.target" ]; + requires = [ "network.target" ]; + before = [ "ax25-mock-hardware.service" ]; + # broken needs access to "ss" or "netstat" + path = [ pkgs.iproute2 ]; + serviceConfig = { + Type = "exec"; + ExecStart = "${pkgs.socat}/bin/socat-broker.sh tcp4-listen:${toString socatPort}"; }; - } - ]; - node2 = createAX25Node 2; - node3 = createAX25Node 3; - }; - testScript = - { ... }: - '' - def wait_for_machine(m): - m.succeed("lsmod | grep ax25") - m.wait_for_unit("ax25-axports.target") - m.wait_for_unit("axlisten.service") - m.fail("journalctl -o cat -u axlisten.service | grep -i \"no AX.25 port data configured\"") + postStart = "${pkgs.coreutils}/bin/sleep 2"; + }; + } + ]; + node2 = createAX25Node 2; + node3 = createAX25Node 3; + }; + testScript = + { ... }: + '' + def wait_for_machine(m): + m.succeed("lsmod | grep ax25") + m.wait_for_unit("ax25-axports.target") + m.wait_for_unit("axlisten.service") + m.fail("journalctl -o cat -u axlisten.service | grep -i \"no AX.25 port data configured\"") - # start the first node since the socat-broker needs to be running - node1.start() - node1.wait_for_unit("ax25-mock-ether.service") - wait_for_machine(node1) + # start the first node since the socat-broker needs to be running + node1.start() + node1.wait_for_unit("ax25-mock-ether.service") + wait_for_machine(node1) - node2.start() - node3.start() - wait_for_machine(node2) - wait_for_machine(node3) + node2.start() + node3.start() + wait_for_machine(node2) + wait_for_machine(node3) - # Node 1 -> Node 2 - node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-2") - node2.sleep(1) - node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-2 ctl I00\" | grep hello") + # Node 1 -> Node 2 + node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-2") + node2.sleep(1) + node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-2 ctl I00\" | grep hello") - # Node 1 -> Node 3 - node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-3") - node3.sleep(1) - node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-3 ctl I00\" | grep hello") + # Node 1 -> Node 3 + node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-3") + node3.sleep(1) + node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-3 ctl I00\" | grep hello") - # Node 2 -> Node 1 - # must sleep due to previous ax25_call lingering - node2.sleep(5) - node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-1") - node1.sleep(1) - node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-1 ctl I00\" | grep hello") + # Node 2 -> Node 1 + # must sleep due to previous ax25_call lingering + node2.sleep(5) + node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-1") + node1.sleep(1) + node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-1 ctl I00\" | grep hello") - # Node 2 -> Node 3 - node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-3") - node3.sleep(1) - node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-3 ctl I00\" | grep hello") + # Node 2 -> Node 3 + node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-3") + node3.sleep(1) + node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-3 ctl I00\" | grep hello") - # Node 3 -> Node 1 - # must sleep due to previous ax25_call lingering - node3.sleep(5) - node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-1") - node1.sleep(1) - node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-1 ctl I00\" | grep hello") + # Node 3 -> Node 1 + # must sleep due to previous ax25_call lingering + node3.sleep(5) + node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-1") + node1.sleep(1) + node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-1 ctl I00\" | grep hello") - # Node 3 -> Node 2 - node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-2") - node2.sleep(1) - node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-2 ctl I00\" | grep hello") - ''; - } -) + # Node 3 -> Node 2 + node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-2") + node2.sleep(1) + node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-2 ctl I00\" | grep hello") + ''; +} diff --git a/nixos/tests/benchexec.nix b/nixos/tests/benchexec.nix index 218bfb5afba4..25e53179bf5a 100644 --- a/nixos/tests/benchexec.nix +++ b/nixos/tests/benchexec.nix @@ -1,64 +1,62 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - user = "alice"; - in - { - name = "benchexec"; +{ pkgs, lib, ... }: +let + user = "alice"; +in +{ + name = "benchexec"; - nodes.benchexec = { - imports = [ ./common/user-account.nix ]; + nodes.benchexec = { + imports = [ ./common/user-account.nix ]; - programs.benchexec = { - enable = true; - users = [ user ]; - }; - }; - - testScript = - { ... }: - let - runexec = lib.getExe' pkgs.benchexec "runexec"; - echo = builtins.toString pkgs.benchexec; - test = lib.getExe ( - pkgs.writeShellApplication rec { - name = "test"; - meta.mainProgram = name; - text = "echo '${echo}'"; - } - ); - wd = "/tmp"; - stdout = "${wd}/runexec.out"; - stderr = "${wd}/runexec.err"; - in - '' - start_all() - machine.wait_for_unit("multi-user.target") - benchexec.succeed(''''\ - systemd-run \ - --property='StandardOutput=file:${stdout}' \ - --property='StandardError=file:${stderr}' \ - --unit=runexec --wait --user --machine='${user}@' \ - --working-directory ${wd} \ - '${runexec}' \ - --debug \ - --read-only-dir / \ - --hidden-dir /home \ - '${test}' \ - '''') - benchexec.succeed("grep -s '${echo}' ${wd}/output.log") - benchexec.succeed("test \"$(grep -Ec '((start|wall|cpu)time|memory)=' ${stdout})\" = 4") - benchexec.succeed("! grep -E '(WARNING|ERROR)' ${stderr}") - ''; - - interactive.nodes.benchexec.services.kmscon = { + programs.benchexec = { enable = true; - fonts = [ - { - name = "Fira Code"; - package = pkgs.fira-code; - } - ]; + users = [ user ]; }; - } -) + }; + + testScript = + { ... }: + let + runexec = lib.getExe' pkgs.benchexec "runexec"; + echo = builtins.toString pkgs.benchexec; + test = lib.getExe ( + pkgs.writeShellApplication rec { + name = "test"; + meta.mainProgram = name; + text = "echo '${echo}'"; + } + ); + wd = "/tmp"; + stdout = "${wd}/runexec.out"; + stderr = "${wd}/runexec.err"; + in + '' + start_all() + machine.wait_for_unit("multi-user.target") + benchexec.succeed(''''\ + systemd-run \ + --property='StandardOutput=file:${stdout}' \ + --property='StandardError=file:${stderr}' \ + --unit=runexec --wait --user --machine='${user}@' \ + --working-directory ${wd} \ + '${runexec}' \ + --debug \ + --read-only-dir / \ + --hidden-dir /home \ + '${test}' \ + '''') + benchexec.succeed("grep -s '${echo}' ${wd}/output.log") + benchexec.succeed("test \"$(grep -Ec '((start|wall|cpu)time|memory)=' ${stdout})\" = 4") + benchexec.succeed("! grep -E '(WARNING|ERROR)' ${stderr}") + ''; + + interactive.nodes.benchexec.services.kmscon = { + enable = true; + fonts = [ + { + name = "Fira Code"; + package = pkgs.fira-code; + } + ]; + }; +} diff --git a/nixos/tests/bitcoind.nix b/nixos/tests/bitcoind.nix index d588e055b14a..a4ec0beb51b5 100644 --- a/nixos/tests/bitcoind.nix +++ b/nixos/tests/bitcoind.nix @@ -1,57 +1,55 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "bitcoind"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ _1000101 ]; - }; +{ pkgs, ... }: +{ + name = "bitcoind"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ _1000101 ]; + }; - nodes.machine = - { ... }: - { - services.bitcoind."mainnet" = { - enable = true; - rpc = { - port = 8332; - users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033"; - users.rpc2.passwordHMAC = "1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225"; - }; - }; - - environment.etc."test.blank".text = ""; - services.bitcoind."testnet" = { - enable = true; - configFile = "/etc/test.blank"; - testnet = true; - rpc = { - port = 18332; - }; - extraCmdlineOptions = [ - "-rpcuser=rpc" - "-rpcpassword=rpc" - "-rpcauth=rpc2:1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225" - ]; + nodes.machine = + { ... }: + { + services.bitcoind."mainnet" = { + enable = true; + rpc = { + port = 8332; + users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033"; + users.rpc2.passwordHMAC = "1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225"; }; }; - testScript = '' - start_all() + environment.etc."test.blank".text = ""; + services.bitcoind."testnet" = { + enable = true; + configFile = "/etc/test.blank"; + testnet = true; + rpc = { + port = 18332; + }; + extraCmdlineOptions = [ + "-rpcuser=rpc" + "-rpcpassword=rpc" + "-rpcauth=rpc2:1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225" + ]; + }; + }; - machine.wait_for_unit("bitcoind-mainnet.service") - machine.wait_for_unit("bitcoind-testnet.service") + testScript = '' + start_all() - machine.wait_until_succeeds( - 'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' ' - ) - machine.wait_until_succeeds( - 'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' ' - ) - machine.wait_until_succeeds( - 'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' ' - ) - machine.wait_until_succeeds( - 'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' ' - ) - ''; - } -) + machine.wait_for_unit("bitcoind-mainnet.service") + machine.wait_for_unit("bitcoind-testnet.service") + + machine.wait_until_succeeds( + 'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' ' + ) + machine.wait_until_succeeds( + 'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' ' + ) + machine.wait_until_succeeds( + 'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' ' + ) + machine.wait_until_succeeds( + 'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' ' + ) + ''; +} diff --git a/nixos/tests/bittorrent.nix b/nixos/tests/bittorrent.nix index 83d9168a6fa5..b62eddc4f663 100644 --- a/nixos/tests/bittorrent.nix +++ b/nixos/tests/bittorrent.nix @@ -6,199 +6,197 @@ # which only works if the first client successfully uses the UPnP-IGD # protocol to poke a hole in the NAT. -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let +let - # Some random file to serve. - file = pkgs.hello.src; + # Some random file to serve. + file = pkgs.hello.src; - internalRouterAddress = "192.168.3.1"; - internalClient1Address = "192.168.3.2"; - externalRouterAddress = "80.100.100.1"; - externalClient2Address = "80.100.100.2"; - externalTrackerAddress = "80.100.100.3"; + internalRouterAddress = "192.168.3.1"; + internalClient1Address = "192.168.3.2"; + externalRouterAddress = "80.100.100.1"; + externalClient2Address = "80.100.100.2"; + externalTrackerAddress = "80.100.100.3"; - download-dir = "/var/lib/transmission/Downloads"; - transmissionConfig = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.transmission_3 ]; - services.transmission = { - enable = true; - settings = { - dht-enabled = false; - message-level = 2; - inherit download-dir; - }; + download-dir = "/var/lib/transmission/Downloads"; + transmissionConfig = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.transmission_3 ]; + services.transmission = { + enable = true; + settings = { + dht-enabled = false; + message-level = 2; + inherit download-dir; }; }; - in - - { - name = "bittorrent"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - domenkozar - rob - bobvanderlinden - ]; }; +in - nodes = { - tracker = - { pkgs, ... }: - { - imports = [ transmissionConfig ]; +{ + name = "bittorrent"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + domenkozar + rob + bobvanderlinden + ]; + }; - virtualisation.vlans = [ 1 ]; - networking.firewall.enable = false; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = externalTrackerAddress; - prefixLength = 24; - } - ]; + nodes = { + tracker = + { pkgs, ... }: + { + imports = [ transmissionConfig ]; - # We need Apache on the tracker to serve the torrents. - services.httpd = { - enable = true; - virtualHosts = { - "torrentserver.org" = { - adminAddr = "foo@example.org"; - documentRoot = "/tmp"; - }; + virtualisation.vlans = [ 1 ]; + networking.firewall.enable = false; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = externalTrackerAddress; + prefixLength = 24; + } + ]; + + # We need Apache on the tracker to serve the torrents. + services.httpd = { + enable = true; + virtualHosts = { + "torrentserver.org" = { + adminAddr = "foo@example.org"; + documentRoot = "/tmp"; }; }; - services.opentracker.enable = true; }; + services.opentracker.enable = true; + }; - router = - { pkgs, nodes, ... }: - { - virtualisation.vlans = [ - 1 - 2 - ]; - networking.nat.enable = true; - networking.nat.internalInterfaces = [ "eth2" ]; - networking.nat.externalInterface = "eth1"; - networking.firewall.enable = true; - networking.firewall.trustedInterfaces = [ "eth2" ]; - networking.interfaces.eth0.ipv4.addresses = [ ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = externalRouterAddress; - prefixLength = 24; - } - ]; - networking.interfaces.eth2.ipv4.addresses = [ - { - address = internalRouterAddress; - prefixLength = 24; - } - ]; - services.miniupnpd = { - enable = true; - externalInterface = "eth1"; - internalIPs = [ "eth2" ]; - appendConfig = '' - ext_ip=${externalRouterAddress} - ''; - }; + router = + { pkgs, nodes, ... }: + { + virtualisation.vlans = [ + 1 + 2 + ]; + networking.nat.enable = true; + networking.nat.internalInterfaces = [ "eth2" ]; + networking.nat.externalInterface = "eth1"; + networking.firewall.enable = true; + networking.firewall.trustedInterfaces = [ "eth2" ]; + networking.interfaces.eth0.ipv4.addresses = [ ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = externalRouterAddress; + prefixLength = 24; + } + ]; + networking.interfaces.eth2.ipv4.addresses = [ + { + address = internalRouterAddress; + prefixLength = 24; + } + ]; + services.miniupnpd = { + enable = true; + externalInterface = "eth1"; + internalIPs = [ "eth2" ]; + appendConfig = '' + ext_ip=${externalRouterAddress} + ''; }; + }; - client1 = - { pkgs, nodes, ... }: - { - imports = [ transmissionConfig ]; - environment.systemPackages = [ pkgs.miniupnpc ]; + client1 = + { pkgs, nodes, ... }: + { + imports = [ transmissionConfig ]; + environment.systemPackages = [ pkgs.miniupnpc ]; - virtualisation.vlans = [ 2 ]; - networking.interfaces.eth0.ipv4.addresses = [ ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = internalClient1Address; - prefixLength = 24; - } - ]; - networking.defaultGateway = internalRouterAddress; - networking.firewall.enable = false; - }; + virtualisation.vlans = [ 2 ]; + networking.interfaces.eth0.ipv4.addresses = [ ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = internalClient1Address; + prefixLength = 24; + } + ]; + networking.defaultGateway = internalRouterAddress; + networking.firewall.enable = false; + }; - client2 = - { pkgs, ... }: - { - imports = [ transmissionConfig ]; + client2 = + { pkgs, ... }: + { + imports = [ transmissionConfig ]; - virtualisation.vlans = [ 1 ]; - networking.interfaces.eth0.ipv4.addresses = [ ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = externalClient2Address; - prefixLength = 24; - } - ]; - networking.firewall.enable = false; - }; - }; + virtualisation.vlans = [ 1 ]; + networking.interfaces.eth0.ipv4.addresses = [ ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = externalClient2Address; + prefixLength = 24; + } + ]; + networking.firewall.enable = false; + }; + }; - testScript = - { nodes, ... }: - '' - start_all() + testScript = + { nodes, ... }: + '' + start_all() - # Wait for network and miniupnpd. - router.systemctl("start network-online.target") - router.wait_for_unit("network-online.target") - router.wait_for_unit("miniupnpd") + # Wait for network and miniupnpd. + router.systemctl("start network-online.target") + router.wait_for_unit("network-online.target") + router.wait_for_unit("miniupnpd") - # Create the torrent. - tracker.succeed("mkdir ${download-dir}/data") - tracker.succeed( - "cp ${file} ${download-dir}/data/test.tar.bz2" - ) - tracker.succeed( - "transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent" - ) - tracker.succeed("chmod 644 /tmp/test.torrent") + # Create the torrent. + tracker.succeed("mkdir ${download-dir}/data") + tracker.succeed( + "cp ${file} ${download-dir}/data/test.tar.bz2" + ) + tracker.succeed( + "transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent" + ) + tracker.succeed("chmod 644 /tmp/test.torrent") - # Start the tracker. !!! use a less crappy tracker - tracker.systemctl("start network-online.target") - tracker.wait_for_unit("network-online.target") - tracker.wait_for_unit("opentracker.service") - tracker.wait_for_open_port(6969) + # Start the tracker. !!! use a less crappy tracker + tracker.systemctl("start network-online.target") + tracker.wait_for_unit("network-online.target") + tracker.wait_for_unit("opentracker.service") + tracker.wait_for_open_port(6969) - # Start the initial seeder. - tracker.succeed( - "transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data" - ) + # Start the initial seeder. + tracker.succeed( + "transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data" + ) - # Now we should be able to download from the client behind the NAT. - tracker.wait_for_unit("httpd") - client1.systemctl("start network-online.target") - client1.wait_for_unit("network-online.target") - client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &") - client1.wait_for_file("${download-dir}/test.tar.bz2") - client1.succeed( - "cmp ${download-dir}/test.tar.bz2 ${file}" - ) + # Now we should be able to download from the client behind the NAT. + tracker.wait_for_unit("httpd") + client1.systemctl("start network-online.target") + client1.wait_for_unit("network-online.target") + client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &") + client1.wait_for_file("${download-dir}/test.tar.bz2") + client1.succeed( + "cmp ${download-dir}/test.tar.bz2 ${file}" + ) - # Bring down the initial seeder. - tracker.stop_job("transmission") + # Bring down the initial seeder. + tracker.stop_job("transmission") - # Now download from the second client. This can only succeed if - # the first client created a NAT hole in the router. - client2.systemctl("start network-online.target") - client2.wait_for_unit("network-online.target") - client2.succeed( - "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &" - ) - client2.wait_for_file("${download-dir}/test.tar.bz2") - client2.succeed( - "cmp ${download-dir}/test.tar.bz2 ${file}" - ) - ''; - } -) + # Now download from the second client. This can only succeed if + # the first client created a NAT hole in the router. + client2.systemctl("start network-online.target") + client2.wait_for_unit("network-online.target") + client2.succeed( + "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &" + ) + client2.wait_for_file("${download-dir}/test.tar.bz2") + client2.succeed( + "cmp ${download-dir}/test.tar.bz2 ${file}" + ) + ''; +} diff --git a/nixos/tests/blockbook-frontend.nix b/nixos/tests/blockbook-frontend.nix index 60bc59d8ec8d..bf6d6bb458c4 100644 --- a/nixos/tests/blockbook-frontend.nix +++ b/nixos/tests/blockbook-frontend.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "blockbook-frontend"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ _1000101 ]; - }; +{ pkgs, ... }: +{ + name = "blockbook-frontend"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ _1000101 ]; + }; - nodes.machine = - { ... }: - { - services.blockbook-frontend."test" = { - enable = true; - }; - services.bitcoind.mainnet = { - enable = true; - rpc = { - port = 8030; - users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033"; - }; + nodes.machine = + { ... }: + { + services.blockbook-frontend."test" = { + enable = true; + }; + services.bitcoind.mainnet = { + enable = true; + rpc = { + port = 8030; + users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033"; }; }; + }; - testScript = '' - start_all() - machine.wait_for_unit("blockbook-frontend-test.service") + testScript = '' + start_all() + machine.wait_for_unit("blockbook-frontend-test.service") - machine.wait_for_open_port(9030) + machine.wait_for_open_port(9030) - machine.succeed("curl -sSfL http://localhost:9030 | grep 'Blockbook'") - ''; - } -) + machine.succeed("curl -sSfL http://localhost:9030 | grep 'Blockbook'") + ''; +} diff --git a/nixos/tests/boot-stage1.nix b/nixos/tests/boot-stage1.nix index c700b79b27fe..e76af3e1dc6f 100644 --- a/nixos/tests/boot-stage1.nix +++ b/nixos/tests/boot-stage1.nix @@ -1,193 +1,191 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "boot-stage1"; +{ pkgs, ... }: +{ + name = "boot-stage1"; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - boot.extraModulePackages = - let - compileKernelModule = - name: source: - pkgs.runCommandCC name - rec { - inherit source; - kdev = config.boot.kernelPackages.kernel.dev; - kver = config.boot.kernelPackages.kernel.modDirVersion; - ksrc = "${kdev}/lib/modules/${kver}/build"; - hardeningDisable = [ "pic" ]; - nativeBuildInputs = kdev.moduleBuildDependencies; - } - '' - echo "obj-m += $name.o" > Makefile - echo "$source" > "$name.c" - make -C "$ksrc" M=$(pwd) modules - install -vD "$name.ko" "$out/lib/modules/$kver/$name.ko" - ''; - - # This spawns a kthread which just waits until it gets a signal and - # terminates if that is the case. We want to make sure that nothing during - # the boot process kills any kthread by accident, like what happened in - # issue #15226. - kcanary = compileKernelModule "kcanary" '' - #include - #include - #include - #include - #include - #include - #include - #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) - #include - #endif - - MODULE_LICENSE("GPL"); - - struct task_struct *canaryTask; - - static int kcanary(void *nothing) - { - allow_signal(SIGINT); - allow_signal(SIGTERM); - allow_signal(SIGKILL); - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout_interruptible(msecs_to_jiffies(100)); - if (signal_pending(current)) break; - } - return 0; + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + boot.extraModulePackages = + let + compileKernelModule = + name: source: + pkgs.runCommandCC name + rec { + inherit source; + kdev = config.boot.kernelPackages.kernel.dev; + kver = config.boot.kernelPackages.kernel.modDirVersion; + ksrc = "${kdev}/lib/modules/${kver}/build"; + hardeningDisable = [ "pic" ]; + nativeBuildInputs = kdev.moduleBuildDependencies; } - - static int kcanaryInit(void) - { - kthread_run(&kcanary, NULL, "kcanary"); - return 0; - } - - static void kcanaryExit(void) - { - kthread_stop(canaryTask); - } - - module_init(kcanaryInit); - module_exit(kcanaryExit); - ''; - - in - lib.singleton kcanary; - - boot.initrd.kernelModules = [ "kcanary" ]; - - boot.initrd.extraUtilsCommands = - let - compile = - name: source: - pkgs.runCommandCC name { inherit source; } '' - mkdir -p "$out/bin" - echo "$source" | gcc -Wall -o "$out/bin/$name" -xc - + '' + echo "obj-m += $name.o" > Makefile + echo "$source" > "$name.c" + make -C "$ksrc" M=$(pwd) modules + install -vD "$name.ko" "$out/lib/modules/$kver/$name.ko" ''; - daemonize = - name: source: - compile name '' + # This spawns a kthread which just waits until it gets a signal and + # terminates if that is the case. We want to make sure that nothing during + # the boot process kills any kthread by accident, like what happened in + # issue #15226. + kcanary = compileKernelModule "kcanary" '' + #include + #include + #include + #include + #include + #include + #include + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + #include + #endif + + MODULE_LICENSE("GPL"); + + struct task_struct *canaryTask; + + static int kcanary(void *nothing) + { + allow_signal(SIGINT); + allow_signal(SIGTERM); + allow_signal(SIGKILL); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout_interruptible(msecs_to_jiffies(100)); + if (signal_pending(current)) break; + } + return 0; + } + + static int kcanaryInit(void) + { + kthread_run(&kcanary, NULL, "kcanary"); + return 0; + } + + static void kcanaryExit(void) + { + kthread_stop(canaryTask); + } + + module_init(kcanaryInit); + module_exit(kcanaryExit); + ''; + + in + lib.singleton kcanary; + + boot.initrd.kernelModules = [ "kcanary" ]; + + boot.initrd.extraUtilsCommands = + let + compile = + name: source: + pkgs.runCommandCC name { inherit source; } '' + mkdir -p "$out/bin" + echo "$source" | gcc -Wall -o "$out/bin/$name" -xc - + ''; + + daemonize = + name: source: + compile name '' + #include + #include + + void runSource(void) { + ${source} + } + + int main(void) { + if (fork() > 0) return 0; + setsid(); + runSource(); + return 1; + } + ''; + + mkCmdlineCanary = + { + name, + cmdline ? "", + source ? "", + }: + (daemonize name '' + char *argv[] = {"${cmdline}", NULL}; + execvp("${name}-child", argv); + '') + // { + child = compile "${name}-child" '' #include #include - void runSource(void) { - ${source} - } - int main(void) { - if (fork() > 0) return 0; - setsid(); - runSource(); + ${source} + while (1) sleep(1); return 1; } ''; + }; - mkCmdlineCanary = - { - name, - cmdline ? "", - source ? "", - }: - (daemonize name '' - char *argv[] = {"${cmdline}", NULL}; - execvp("${name}-child", argv); - '') - // { - child = compile "${name}-child" '' - #include - #include + copyCanaries = lib.concatMapStrings (canary: '' + ${lib.optionalString (canary ? child) '' + copy_bin_and_libs "${canary.child}/bin/${canary.child.name}" + ''} + copy_bin_and_libs "${canary}/bin/${canary.name}" + ''); - int main(void) { - ${source} - while (1) sleep(1); - return 1; - } - ''; - }; + in + copyCanaries [ + # Simple canary process which just sleeps forever and should be killed by + # stage 2. + (daemonize "canary1" "while (1) sleep(1);") - copyCanaries = lib.concatMapStrings (canary: '' - ${lib.optionalString (canary ? child) '' - copy_bin_and_libs "${canary.child}/bin/${canary.child.name}" - ''} - copy_bin_and_libs "${canary}/bin/${canary.name}" - ''); + # We want this canary process to try mimicking a kthread using a cmdline + # with a zero length so we can make sure that the process is properly + # killed in stage 1. + (mkCmdlineCanary { + name = "canary2"; + source = '' + FILE *f; + f = fopen("/run/canary2.pid", "w"); + fprintf(f, "%d\n", getpid()); + fclose(f); + ''; + }) - in - copyCanaries [ - # Simple canary process which just sleeps forever and should be killed by - # stage 2. - (daemonize "canary1" "while (1) sleep(1);") + # This canary process mimics a storage daemon, which we do NOT want to be + # killed before going into stage 2. For more on root storage daemons, see: + # https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/ + (mkCmdlineCanary { + name = "canary3"; + cmdline = "@canary3"; + }) + ]; - # We want this canary process to try mimicking a kthread using a cmdline - # with a zero length so we can make sure that the process is properly - # killed in stage 1. - (mkCmdlineCanary { - name = "canary2"; - source = '' - FILE *f; - f = fopen("/run/canary2.pid", "w"); - fprintf(f, "%d\n", getpid()); - fclose(f); - ''; - }) + boot.initrd.postMountCommands = '' + canary1 + canary2 + canary3 + # Make sure the pidfile of canary 2 is created so that we still can get + # its former pid after the killing spree starts next within stage 1. + while [ ! -s /run/canary2.pid ]; do sleep 0.1; done + ''; + }; - # This canary process mimics a storage daemon, which we do NOT want to be - # killed before going into stage 2. For more on root storage daemons, see: - # https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/ - (mkCmdlineCanary { - name = "canary3"; - cmdline = "@canary3"; - }) - ]; + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed("test -s /run/canary2.pid") + machine.fail("pgrep -a canary1") + machine.fail("kill -0 $(< /run/canary2.pid)") + machine.succeed('pgrep -a -f "^@canary3$"') + machine.succeed('pgrep -a -f "^\\[kcanary\\]$"') + ''; - boot.initrd.postMountCommands = '' - canary1 - canary2 - canary3 - # Make sure the pidfile of canary 2 is created so that we still can get - # its former pid after the killing spree starts next within stage 1. - while [ ! -s /run/canary2.pid ]; do sleep 0.1; done - ''; - }; - - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed("test -s /run/canary2.pid") - machine.fail("pgrep -a canary1") - machine.fail("kill -0 $(< /run/canary2.pid)") - machine.succeed('pgrep -a -f "^@canary3$"') - machine.succeed('pgrep -a -f "^\\[kcanary\\]$"') - ''; - - meta.maintainers = with pkgs.lib.maintainers; [ aszlig ]; - } -) + meta.maintainers = with pkgs.lib.maintainers; [ aszlig ]; +} diff --git a/nixos/tests/boot-stage2.nix b/nixos/tests/boot-stage2.nix index 90f0ac01207c..13d7b66fe67e 100644 --- a/nixos/tests/boot-stage2.nix +++ b/nixos/tests/boot-stage2.nix @@ -1,73 +1,71 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "boot-stage2"; +{ pkgs, ... }: +{ + name = "boot-stage2"; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - virtualisation = { - emptyDiskImages = [ 256 ]; + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + virtualisation = { + emptyDiskImages = [ 256 ]; - # Mount an ext4 as the upper layer of the Nix store. - fileSystems = { - "/nix/store" = lib.mkForce { - device = "/dev/vdb"; # the above disk image - fsType = "ext4"; + # Mount an ext4 as the upper layer of the Nix store. + fileSystems = { + "/nix/store" = lib.mkForce { + device = "/dev/vdb"; # the above disk image + fsType = "ext4"; - # data=journal always displays after errors=remount-ro; this is only needed because of the overlay - # and #375257 will trigger with `errors=remount-ro` on a non-overlaid store: - # see ordering in https://github.com/torvalds/linux/blob/v6.12/fs/ext4/super.c#L2974 - options = [ - "defaults" - "errors=remount-ro" - "data=journal" - ]; - }; + # data=journal always displays after errors=remount-ro; this is only needed because of the overlay + # and #375257 will trigger with `errors=remount-ro` on a non-overlaid store: + # see ordering in https://github.com/torvalds/linux/blob/v6.12/fs/ext4/super.c#L2974 + options = [ + "defaults" + "errors=remount-ro" + "data=journal" + ]; }; }; - - boot = { - initrd = { - # Format the upper Nix store. - postDeviceCommands = '' - ${pkgs.e2fsprogs}/bin/mkfs.ext4 /dev/vdb - ''; - - # Overlay the RO store onto it. - # Note that bug #375257 can be triggered without an overlay, - # using the errors=remount-ro option (or similar) or with an overlay where any of the - # paths ends in 'ro'. The offending mountpoint also has to be the last (top) one - # if an option ending in 'ro' is the last in the list, so test both cases here. - postMountCommands = '' - mkdir -p /mnt-root/nix/store/ro /mnt-root/nix/store/rw /mnt-root/nix/store/work - mount --bind /mnt-root/nix/.ro-store /mnt-root/nix/store/ro - mount -t overlay overlay \ - -o lowerdir=/mnt-root/nix/store/ro,upperdir=/mnt-root/nix/store/rw,workdir=/mnt-root/nix/store/work \ - /mnt-root/nix/store - ''; - - kernelModules = [ "overlay" ]; - }; - - postBootCommands = '' - touch /etc/post-boot-ran - mount - ''; - }; }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed("test /etc/post-boot-ran") - machine.fail("touch /nix/store/should-not-work"); - ''; + boot = { + initrd = { + # Format the upper Nix store. + postDeviceCommands = '' + ${pkgs.e2fsprogs}/bin/mkfs.ext4 /dev/vdb + ''; - meta.maintainers = with pkgs.lib.maintainers; [ numinit ]; - } -) + # Overlay the RO store onto it. + # Note that bug #375257 can be triggered without an overlay, + # using the errors=remount-ro option (or similar) or with an overlay where any of the + # paths ends in 'ro'. The offending mountpoint also has to be the last (top) one + # if an option ending in 'ro' is the last in the list, so test both cases here. + postMountCommands = '' + mkdir -p /mnt-root/nix/store/ro /mnt-root/nix/store/rw /mnt-root/nix/store/work + mount --bind /mnt-root/nix/.ro-store /mnt-root/nix/store/ro + mount -t overlay overlay \ + -o lowerdir=/mnt-root/nix/store/ro,upperdir=/mnt-root/nix/store/rw,workdir=/mnt-root/nix/store/work \ + /mnt-root/nix/store + ''; + + kernelModules = [ "overlay" ]; + }; + + postBootCommands = '' + touch /etc/post-boot-ran + mount + ''; + }; + }; + + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed("test /etc/post-boot-ran") + machine.fail("touch /nix/store/should-not-work"); + ''; + + meta.maintainers = with pkgs.lib.maintainers; [ numinit ]; +} diff --git a/nixos/tests/borgbackup.nix b/nixos/tests/borgbackup.nix index 2ee324f1de2a..88d67c09f031 100644 --- a/nixos/tests/borgbackup.nix +++ b/nixos/tests/borgbackup.nix @@ -1,276 +1,274 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - passphrase = "supersecret"; - dataDir = "/ran:dom/data"; - subDir = "not_anything_here"; - excludedSubDirFile = "not_this_file_either"; - excludeFile = "not_this_file"; - keepFile = "important_file"; - keepFileData = "important_data"; - localRepo = "/root/back:up"; - # a repository on a file system which is not mounted automatically - localRepoMount = "/noAutoMount"; - archiveName = "my_archive"; - remoteRepo = "borg@server:."; # No need to specify path - privateKey = pkgs.writeText "id_ed25519" '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe - RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw - AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg - 9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ= - -----END OPENSSH PRIVATE KEY----- - ''; - publicKey = '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client - ''; - privateKeyAppendOnly = pkgs.writeText "id_ed25519" '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8 - cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw - AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8 - IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ= - -----END OPENSSH PRIVATE KEY----- - ''; - publicKeyAppendOnly = '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client - ''; +let + passphrase = "supersecret"; + dataDir = "/ran:dom/data"; + subDir = "not_anything_here"; + excludedSubDirFile = "not_this_file_either"; + excludeFile = "not_this_file"; + keepFile = "important_file"; + keepFileData = "important_data"; + localRepo = "/root/back:up"; + # a repository on a file system which is not mounted automatically + localRepoMount = "/noAutoMount"; + archiveName = "my_archive"; + remoteRepo = "borg@server:."; # No need to specify path + privateKey = pkgs.writeText "id_ed25519" '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe + RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw + AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg + 9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ= + -----END OPENSSH PRIVATE KEY----- + ''; + publicKey = '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client + ''; + privateKeyAppendOnly = pkgs.writeText "id_ed25519" '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8 + cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw + AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8 + IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ= + -----END OPENSSH PRIVATE KEY----- + ''; + publicKeyAppendOnly = '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client + ''; - in - { - name = "borgbackup"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ dotlambda ]; - }; +in +{ + name = "borgbackup"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ dotlambda ]; + }; - nodes = { - client = - { ... }: - { - virtualisation.fileSystems.${localRepoMount} = { - device = "tmpfs"; - fsType = "tmpfs"; - options = [ "noauto" ]; + nodes = { + client = + { ... }: + { + virtualisation.fileSystems.${localRepoMount} = { + device = "tmpfs"; + fsType = "tmpfs"; + options = [ "noauto" ]; + }; + + services.borgbackup.jobs = { + + local = { + paths = dataDir; + repo = localRepo; + preHook = '' + # Don't append a timestamp + archiveName="${archiveName}" + ''; + encryption = { + mode = "repokey"; + inherit passphrase; + }; + compression = "auto,zlib,9"; + prune.keep = { + within = "1y"; + yearly = 5; + }; + exclude = [ "*/${excludeFile}" ]; + extraCreateArgs = [ + "--exclude-caches" + "--exclude-if-present" + ".dont backup" + ]; + postHook = "echo post"; + startAt = [ ]; # Do not run automatically }; - services.borgbackup.jobs = { + localMount = { + paths = dataDir; + repo = localRepoMount; + encryption.mode = "none"; + startAt = [ ]; + }; - local = { - paths = dataDir; - repo = localRepo; - preHook = '' - # Don't append a timestamp - archiveName="${archiveName}" - ''; - encryption = { - mode = "repokey"; - inherit passphrase; - }; - compression = "auto,zlib,9"; - prune.keep = { - within = "1y"; - yearly = 5; - }; - exclude = [ "*/${excludeFile}" ]; - extraCreateArgs = [ - "--exclude-caches" - "--exclude-if-present" - ".dont backup" - ]; - postHook = "echo post"; - startAt = [ ]; # Do not run automatically - }; + remote = { + paths = dataDir; + repo = remoteRepo; + encryption.mode = "none"; + startAt = [ ]; + environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; + }; - localMount = { - paths = dataDir; - repo = localRepoMount; - encryption.mode = "none"; - startAt = [ ]; - }; + remoteAppendOnly = { + paths = dataDir; + repo = remoteRepo; + encryption.mode = "none"; + startAt = [ ]; + environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly"; + }; - remote = { - paths = dataDir; - repo = remoteRepo; - encryption.mode = "none"; - startAt = [ ]; - environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; - }; + commandSuccess = { + dumpCommand = pkgs.writeScript "commandSuccess" '' + echo -n test + ''; + repo = remoteRepo; + encryption.mode = "none"; + startAt = [ ]; + environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; + }; - remoteAppendOnly = { - paths = dataDir; - repo = remoteRepo; - encryption.mode = "none"; - startAt = [ ]; - environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly"; - }; + commandFail = { + dumpCommand = "${pkgs.coreutils}/bin/false"; + repo = remoteRepo; + encryption.mode = "none"; + startAt = [ ]; + environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; + }; - commandSuccess = { - dumpCommand = pkgs.writeScript "commandSuccess" '' - echo -n test - ''; - repo = remoteRepo; - encryption.mode = "none"; - startAt = [ ]; - environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; - }; + sleepInhibited = { + inhibitsSleep = true; + # Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung + dumpCommand = pkgs.writeScript "sleepInhibited" '' + cat /dev/zero + ''; + repo = remoteRepo; + encryption.mode = "none"; + startAt = [ ]; + environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; + }; - commandFail = { - dumpCommand = "${pkgs.coreutils}/bin/false"; - repo = remoteRepo; - encryption.mode = "none"; - startAt = [ ]; - environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; - }; - - sleepInhibited = { - inhibitsSleep = true; - # Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung - dumpCommand = pkgs.writeScript "sleepInhibited" '' - cat /dev/zero - ''; - repo = remoteRepo; - encryption.mode = "none"; - startAt = [ ]; - environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519"; - }; + }; + }; + server = + { ... }: + { + services.openssh = { + enable = true; + settings = { + PasswordAuthentication = false; + KbdInteractiveAuthentication = false; }; }; - server = - { ... }: - { - services.openssh = { - enable = true; - settings = { - PasswordAuthentication = false; - KbdInteractiveAuthentication = false; - }; - }; - - services.borgbackup.repos.repo1 = { - authorizedKeys = [ publicKey ]; - path = "/data/borgbackup"; - }; - - # Second repo to make sure the authorizedKeys options are merged correctly - services.borgbackup.repos.repo2 = { - authorizedKeysAppendOnly = [ publicKeyAppendOnly ]; - path = "/data/borgbackup"; - quota = ".5G"; - }; + services.borgbackup.repos.repo1 = { + authorizedKeys = [ publicKey ]; + path = "/data/borgbackup"; }; - }; - testScript = '' - start_all() + # Second repo to make sure the authorizedKeys options are merged correctly + services.borgbackup.repos.repo2 = { + authorizedKeysAppendOnly = [ publicKeyAppendOnly ]; + path = "/data/borgbackup"; + quota = ".5G"; + }; + }; + }; - client.fail('test -d "${remoteRepo}"') + testScript = '' + start_all() - client.succeed( - "cp ${privateKey} /root/id_ed25519" - ) - client.succeed("chmod 0600 /root/id_ed25519") - client.succeed( - "cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly" - ) - client.succeed("chmod 0600 /root/id_ed25519.appendOnly") + client.fail('test -d "${remoteRepo}"') - client.succeed("mkdir -p ${dataDir}/${subDir}") - client.succeed("touch ${dataDir}/${excludeFile}") - client.succeed("touch '${dataDir}/${subDir}/.dont backup'") - client.succeed("touch ${dataDir}/${subDir}/${excludedSubDirFile}") - client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}") + client.succeed( + "cp ${privateKey} /root/id_ed25519" + ) + client.succeed("chmod 0600 /root/id_ed25519") + client.succeed( + "cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly" + ) + client.succeed("chmod 0600 /root/id_ed25519.appendOnly") - with subtest("local"): - borg = "BORG_PASSPHRASE='${passphrase}' borg" - client.systemctl("start --wait borgbackup-job-local") - client.fail("systemctl is-failed borgbackup-job-local") - # Make sure exactly one archive has been created - assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0 - # Make sure excludeFile has been excluded - client.fail( - "{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg) - ) - # Make sure excludedSubDirFile has been excluded - client.fail( - "{} list '${localRepo}::${archiveName}' | grep -qF '${subDir}/${excludedSubDirFile}".format(borg) - ) - # Make sure keepFile has the correct content - client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg)) - assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}") - # Make sure the same is true when using `borg mount` - client.succeed( - "mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format( - borg - ) - ) - assert "${keepFileData}" in client.succeed( - "cat /mnt/borg/${dataDir}/${keepFile}" - ) + client.succeed("mkdir -p ${dataDir}/${subDir}") + client.succeed("touch ${dataDir}/${excludeFile}") + client.succeed("touch '${dataDir}/${subDir}/.dont backup'") + client.succeed("touch ${dataDir}/${subDir}/${excludedSubDirFile}") + client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}") - with subtest("localMount"): - # the file system for the repo should not be already mounted - client.fail("mount | grep ${localRepoMount}") - # ensure trying to write to the mountpoint before the fs is mounted fails - client.succeed("chattr +i ${localRepoMount}") - borg = "borg" - client.systemctl("start --wait borgbackup-job-localMount") - client.fail("systemctl is-failed borgbackup-job-localMount") - # Make sure exactly one archive has been created - assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0 + with subtest("local"): + borg = "BORG_PASSPHRASE='${passphrase}' borg" + client.systemctl("start --wait borgbackup-job-local") + client.fail("systemctl is-failed borgbackup-job-local") + # Make sure exactly one archive has been created + assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0 + # Make sure excludeFile has been excluded + client.fail( + "{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg) + ) + # Make sure excludedSubDirFile has been excluded + client.fail( + "{} list '${localRepo}::${archiveName}' | grep -qF '${subDir}/${excludedSubDirFile}".format(borg) + ) + # Make sure keepFile has the correct content + client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg)) + assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}") + # Make sure the same is true when using `borg mount` + client.succeed( + "mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format( + borg + ) + ) + assert "${keepFileData}" in client.succeed( + "cat /mnt/borg/${dataDir}/${keepFile}" + ) - with subtest("remote"): - borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg" - server.wait_for_unit("sshd.service") - client.wait_for_unit("network.target") - client.systemctl("start --wait borgbackup-job-remote") - client.fail("systemctl is-failed borgbackup-job-remote") + with subtest("localMount"): + # the file system for the repo should not be already mounted + client.fail("mount | grep ${localRepoMount}") + # ensure trying to write to the mountpoint before the fs is mounted fails + client.succeed("chattr +i ${localRepoMount}") + borg = "borg" + client.systemctl("start --wait borgbackup-job-localMount") + client.fail("systemctl is-failed borgbackup-job-localMount") + # Make sure exactly one archive has been created + assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0 - # Make sure we can't access repos other than the specified one - client.fail("{} list borg\@server:wrong".format(borg)) + with subtest("remote"): + borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg" + server.wait_for_unit("sshd.service") + client.wait_for_unit("network.target") + client.systemctl("start --wait borgbackup-job-remote") + client.fail("systemctl is-failed borgbackup-job-remote") - # TODO: Make sure that data is actually deleted + # Make sure we can't access repos other than the specified one + client.fail("{} list borg\@server:wrong".format(borg)) - with subtest("remoteAppendOnly"): - borg = ( - "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg" - ) - server.wait_for_unit("sshd.service") - client.wait_for_unit("network.target") - client.systemctl("start --wait borgbackup-job-remoteAppendOnly") - client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly") + # TODO: Make sure that data is actually deleted - # Make sure we can't access repos other than the specified one - client.fail("{} list borg\@server:wrong".format(borg)) + with subtest("remoteAppendOnly"): + borg = ( + "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg" + ) + server.wait_for_unit("sshd.service") + client.wait_for_unit("network.target") + client.systemctl("start --wait borgbackup-job-remoteAppendOnly") + client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly") - # TODO: Make sure that data is not actually deleted + # Make sure we can't access repos other than the specified one + client.fail("{} list borg\@server:wrong".format(borg)) - with subtest("commandSuccess"): - server.wait_for_unit("sshd.service") - client.wait_for_unit("network.target") - client.systemctl("start --wait borgbackup-job-commandSuccess") - client.fail("systemctl is-failed borgbackup-job-commandSuccess") - id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip() - client.succeed(f"borg-job-commandSuccess extract ::{id} stdin") - assert "test" == client.succeed("cat stdin") + # TODO: Make sure that data is not actually deleted - with subtest("commandFail"): - server.wait_for_unit("sshd.service") - client.wait_for_unit("network.target") - client.systemctl("start --wait borgbackup-job-commandFail") - client.succeed("systemctl is-failed borgbackup-job-commandFail") + with subtest("commandSuccess"): + server.wait_for_unit("sshd.service") + client.wait_for_unit("network.target") + client.systemctl("start --wait borgbackup-job-commandSuccess") + client.fail("systemctl is-failed borgbackup-job-commandSuccess") + id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip() + client.succeed(f"borg-job-commandSuccess extract ::{id} stdin") + assert "test" == client.succeed("cat stdin") - with subtest("sleepInhibited"): - server.wait_for_unit("sshd.service") - client.wait_for_unit("network.target") - client.fail("systemd-inhibit --list | grep -q borgbackup") - client.systemctl("start borgbackup-job-sleepInhibited") - client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup") - client.systemctl("stop borgbackup-job-sleepInhibited") - ''; - } -) + with subtest("commandFail"): + server.wait_for_unit("sshd.service") + client.wait_for_unit("network.target") + client.systemctl("start --wait borgbackup-job-commandFail") + client.succeed("systemctl is-failed borgbackup-job-commandFail") + + with subtest("sleepInhibited"): + server.wait_for_unit("sshd.service") + client.wait_for_unit("network.target") + client.fail("systemd-inhibit --list | grep -q borgbackup") + client.systemctl("start borgbackup-job-sleepInhibited") + client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup") + client.systemctl("stop borgbackup-job-sleepInhibited") + ''; +} diff --git a/nixos/tests/borgmatic.nix b/nixos/tests/borgmatic.nix index be2542ed64ca..e89e2b05e4ef 100644 --- a/nixos/tests/borgmatic.nix +++ b/nixos/tests/borgmatic.nix @@ -1,28 +1,26 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "borgmatic"; - nodes.machine = - { ... }: - { - services.borgmatic = { - enable = true; - settings = { - source_directories = [ "/home" ]; - repositories = [ - { - label = "local"; - path = "/var/backup"; - } - ]; - keep_daily = 7; - }; +{ pkgs, ... }: +{ + name = "borgmatic"; + nodes.machine = + { ... }: + { + services.borgmatic = { + enable = true; + settings = { + source_directories = [ "/home" ]; + repositories = [ + { + label = "local"; + path = "/var/backup"; + } + ]; + keep_daily = 7; }; }; + }; - testScript = '' - machine.succeed("borgmatic rcreate -e none") - machine.succeed("borgmatic") - ''; - } -) + testScript = '' + machine.succeed("borgmatic rcreate -e none") + machine.succeed("borgmatic") + ''; +} diff --git a/nixos/tests/bpftune.nix b/nixos/tests/bpftune.nix index 454e27272d5b..f94e3a98e5cf 100644 --- a/nixos/tests/bpftune.nix +++ b/nixos/tests/bpftune.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { +{ lib, pkgs, ... }: +{ - name = "bpftune"; + name = "bpftune"; - meta = { - maintainers = with lib.maintainers; [ nickcao ]; - }; + meta = { + maintainers = with lib.maintainers; [ nickcao ]; + }; - nodes = { - machine = - { pkgs, ... }: - { - services.bpftune.enable = true; - }; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.bpftune.enable = true; + }; + }; - testScript = '' - machine.wait_for_unit("bpftune.service") - machine.wait_for_console_text("bpftune works") - ''; + testScript = '' + machine.wait_for_unit("bpftune.service") + machine.wait_for_console_text("bpftune works") + ''; - } -) +} diff --git a/nixos/tests/breitbandmessung.nix b/nixos/tests/breitbandmessung.nix index 414fd09cfa54..809a995cfcc5 100644 --- a/nixos/tests/breitbandmessung.nix +++ b/nixos/tests/breitbandmessung.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "breitbandmessung"; - meta.maintainers = with lib.maintainers; [ b4dm4n ]; +{ lib, ... }: +{ + name = "breitbandmessung"; + meta.maintainers = with lib.maintainers; [ b4dm4n ]; - nodes.machine = - { pkgs, ... }: - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; - # increase screen size to make the whole program visible - virtualisation.resolution = { - x = 1280; - y = 1024; - }; - - test-support.displayManager.auto.user = "alice"; - - environment.systemPackages = with pkgs; [ breitbandmessung ]; - environment.variables.XAUTHORITY = "/home/alice/.Xauthority"; - - # breitbandmessung is unfree - nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "breitbandmessung" ]; + # increase screen size to make the whole program visible + virtualisation.resolution = { + x = 1280; + y = 1024; }; - enableOCR = true; + test-support.displayManager.auto.user = "alice"; - testScript = '' - machine.wait_for_x() - machine.execute("su - alice -c breitbandmessung >&2 &") - machine.wait_for_window("Breitbandmessung") - machine.wait_for_text("Breitbandmessung") - machine.wait_for_text("Datenschutz") - machine.screenshot("breitbandmessung") - ''; - } -) + environment.systemPackages = with pkgs; [ breitbandmessung ]; + environment.variables.XAUTHORITY = "/home/alice/.Xauthority"; + + # breitbandmessung is unfree + nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "breitbandmessung" ]; + }; + + enableOCR = true; + + testScript = '' + machine.wait_for_x() + machine.execute("su - alice -c breitbandmessung >&2 &") + machine.wait_for_window("Breitbandmessung") + machine.wait_for_text("Breitbandmessung") + machine.wait_for_text("Datenschutz") + machine.screenshot("breitbandmessung") + ''; +} diff --git a/nixos/tests/brscan5.nix b/nixos/tests/brscan5.nix index f61228bac41e..af70f362cb92 100644 --- a/nixos/tests/brscan5.nix +++ b/nixos/tests/brscan5.nix @@ -1,53 +1,51 @@ # integration tests for brscan5 sane driver # -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "brscan5"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mattchrist ]; - }; +{ pkgs, ... }: +{ + name = "brscan5"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mattchrist ]; + }; - nodes.machine = - { pkgs, ... }: - { - nixpkgs.config.allowUnfree = true; - hardware.sane = { + nodes.machine = + { pkgs, ... }: + { + nixpkgs.config.allowUnfree = true; + hardware.sane = { + enable = true; + brscan5 = { enable = true; - brscan5 = { - enable = true; - netDevices = { - "a" = { - model = "ADS-1200"; - nodename = "BRW0080927AFBCE"; - }; - "b" = { - model = "ADS-1200"; - ip = "192.168.1.2"; - }; + netDevices = { + "a" = { + model = "ADS-1200"; + nodename = "BRW0080927AFBCE"; + }; + "b" = { + model = "ADS-1200"; + ip = "192.168.1.2"; }; }; }; }; + }; - testScript = '' - import re - # sane loads libsane-brother5.so.1 successfully, and scanimage doesn't die - strace = machine.succeed('strace scanimage -L 2>&1').split("\n") - regexp = 'openat\(.*libsane-brother5.so.1", O_RDONLY|O_CLOEXEC\) = \d\d*$' - assert len([x for x in strace if re.match(regexp,x)]) > 0 + testScript = '' + import re + # sane loads libsane-brother5.so.1 successfully, and scanimage doesn't die + strace = machine.succeed('strace scanimage -L 2>&1').split("\n") + regexp = 'openat\(.*libsane-brother5.so.1", O_RDONLY|O_CLOEXEC\) = \d\d*$' + assert len([x for x in strace if re.match(regexp,x)]) > 0 - # module creates a config - cfg = machine.succeed('cat /etc/opt/brother/scanner/brscan5/brsanenetdevice.cfg') - assert 'DEVICE=a , "ADS-1200" , 0x4f9:0x459 , NODENAME=BRW0080927AFBCE' in cfg - assert 'DEVICE=b , "ADS-1200" , 0x4f9:0x459 , IP-ADDRESS=192.168.1.2' in cfg + # module creates a config + cfg = machine.succeed('cat /etc/opt/brother/scanner/brscan5/brsanenetdevice.cfg') + assert 'DEVICE=a , "ADS-1200" , 0x4f9:0x459 , NODENAME=BRW0080927AFBCE' in cfg + assert 'DEVICE=b , "ADS-1200" , 0x4f9:0x459 , IP-ADDRESS=192.168.1.2' in cfg - # scanimage lists the two network scanners - scanimage = machine.succeed("scanimage -L") - print(scanimage) - assert """device `brother5:net1;dev0' is a Brother b ADS-1200""" in scanimage - assert """device `brother5:net1;dev1' is a Brother a ADS-1200""" in scanimage - ''; - } -) + # scanimage lists the two network scanners + scanimage = machine.succeed("scanimage -L") + print(scanimage) + assert """device `brother5:net1;dev0' is a Brother b ADS-1200""" in scanimage + assert """device `brother5:net1;dev1' is a Brother a ADS-1200""" in scanimage + ''; +} diff --git a/nixos/tests/btrbk-doas.nix b/nixos/tests/btrbk-doas.nix index aa146ff351bd..326998b16b49 100644 --- a/nixos/tests/btrbk-doas.nix +++ b/nixos/tests/btrbk-doas.nix @@ -1,128 +1,126 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - privateKey = '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe - RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw - AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg - 9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ= - -----END OPENSSH PRIVATE KEY----- - ''; - publicKey = '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv - ''; - in - { - name = "btrbk-doas"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ - symphorien - tu-maurice - ]; - }; +let + privateKey = '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe + RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw + AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg + 9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ= + -----END OPENSSH PRIVATE KEY----- + ''; + publicKey = '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv + ''; +in +{ + name = "btrbk-doas"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ + symphorien + tu-maurice + ]; + }; - nodes = { - archive = - { ... }: - { - security.sudo.enable = false; - security.doas.enable = true; - environment.systemPackages = with pkgs; [ btrfs-progs ]; - # note: this makes the privateKey world readable. - # don't do it with real ssh keys. - environment.etc."btrbk_key".text = privateKey; - services.btrbk = { - extraPackages = [ pkgs.lz4 ]; - instances = { - remote = { - onCalendar = "minutely"; - settings = { - ssh_identity = "/etc/btrbk_key"; - ssh_user = "btrbk"; - stream_compress = "lz4"; - volume = { - "ssh://main/mnt" = { - target = "/mnt"; - snapshot_dir = "btrbk/remote"; - subvolume = "to_backup"; - }; + nodes = { + archive = + { ... }: + { + security.sudo.enable = false; + security.doas.enable = true; + environment.systemPackages = with pkgs; [ btrfs-progs ]; + # note: this makes the privateKey world readable. + # don't do it with real ssh keys. + environment.etc."btrbk_key".text = privateKey; + services.btrbk = { + extraPackages = [ pkgs.lz4 ]; + instances = { + remote = { + onCalendar = "minutely"; + settings = { + ssh_identity = "/etc/btrbk_key"; + ssh_user = "btrbk"; + stream_compress = "lz4"; + volume = { + "ssh://main/mnt" = { + target = "/mnt"; + snapshot_dir = "btrbk/remote"; + subvolume = "to_backup"; }; }; }; }; }; }; + }; - main = - { ... }: - { - security.sudo.enable = false; - security.doas.enable = true; - environment.systemPackages = with pkgs; [ btrfs-progs ]; - services.openssh = { - enable = true; - passwordAuthentication = false; - kbdInteractiveAuthentication = false; - }; - services.btrbk = { - extraPackages = [ pkgs.lz4 ]; - sshAccess = [ - { - key = publicKey; - roles = [ - "source" - "send" - "info" - "delete" - ]; - } - ]; - instances = { - local = { - onCalendar = "minutely"; - settings = { - volume = { - "/mnt" = { - snapshot_dir = "btrbk/local"; - subvolume = "to_backup"; - }; + main = + { ... }: + { + security.sudo.enable = false; + security.doas.enable = true; + environment.systemPackages = with pkgs; [ btrfs-progs ]; + services.openssh = { + enable = true; + passwordAuthentication = false; + kbdInteractiveAuthentication = false; + }; + services.btrbk = { + extraPackages = [ pkgs.lz4 ]; + sshAccess = [ + { + key = publicKey; + roles = [ + "source" + "send" + "info" + "delete" + ]; + } + ]; + instances = { + local = { + onCalendar = "minutely"; + settings = { + volume = { + "/mnt" = { + snapshot_dir = "btrbk/local"; + subvolume = "to_backup"; }; }; }; }; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - # create btrfs partition at /mnt - for machine in (archive, main): - machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1") - machine.succeed("mkfs.btrfs /data_fs") - machine.succeed("mkdir /mnt") - machine.succeed("mount /data_fs /mnt") + # create btrfs partition at /mnt + for machine in (archive, main): + machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1") + machine.succeed("mkfs.btrfs /data_fs") + machine.succeed("mkdir /mnt") + machine.succeed("mount /data_fs /mnt") - # what to backup and where - main.succeed("btrfs subvolume create /mnt/to_backup") - main.succeed("mkdir -p /mnt/btrbk/{local,remote}") + # what to backup and where + main.succeed("btrfs subvolume create /mnt/to_backup") + main.succeed("mkdir -p /mnt/btrbk/{local,remote}") - # check that local snapshots work - with subtest("local"): - main.succeed("echo foo > /mnt/to_backup/bar") - main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo") - main.succeed("echo bar > /mnt/to_backup/bar") - main.succeed("cat /mnt/btrbk/local/*/bar | grep foo") + # check that local snapshots work + with subtest("local"): + main.succeed("echo foo > /mnt/to_backup/bar") + main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo") + main.succeed("echo bar > /mnt/to_backup/bar") + main.succeed("cat /mnt/btrbk/local/*/bar | grep foo") - # check that btrfs send/receive works and ssh access works - with subtest("remote"): - archive.wait_until_succeeds("cat /mnt/*/bar | grep bar") - main.succeed("echo baz > /mnt/to_backup/bar") - archive.succeed("cat /mnt/*/bar | grep bar") - ''; - } -) + # check that btrfs send/receive works and ssh access works + with subtest("remote"): + archive.wait_until_succeeds("cat /mnt/*/bar | grep bar") + main.succeed("echo baz > /mnt/to_backup/bar") + archive.succeed("cat /mnt/*/bar | grep bar") + ''; +} diff --git a/nixos/tests/btrbk-no-timer.nix b/nixos/tests/btrbk-no-timer.nix index c5bcb6edab68..c73dc1907aaa 100644 --- a/nixos/tests/btrbk-no-timer.nix +++ b/nixos/tests/btrbk-no-timer.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "btrbk-no-timer"; - meta.maintainers = with lib.maintainers; [ oxalica ]; +{ lib, pkgs, ... }: +{ + name = "btrbk-no-timer"; + meta.maintainers = with lib.maintainers; [ oxalica ]; - nodes.machine = - { ... }: - { - environment.systemPackages = with pkgs; [ btrfs-progs ]; - services.btrbk.instances.local = { - onCalendar = null; - settings.volume."/mnt" = { - snapshot_dir = "btrbk/local"; - subvolume = "to_backup"; - }; + nodes.machine = + { ... }: + { + environment.systemPackages = with pkgs; [ btrfs-progs ]; + services.btrbk.instances.local = { + onCalendar = null; + settings.volume."/mnt" = { + snapshot_dir = "btrbk/local"; + subvolume = "to_backup"; }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - # Create btrfs partition at /mnt - machine.succeed("truncate --size=128M /data_fs") - machine.succeed("mkfs.btrfs /data_fs") - machine.succeed("mkdir /mnt") - machine.succeed("mount /data_fs /mnt") - machine.succeed("btrfs subvolume create /mnt/to_backup") - machine.succeed("mkdir -p /mnt/btrbk/local") + # Create btrfs partition at /mnt + machine.succeed("truncate --size=128M /data_fs") + machine.succeed("mkfs.btrfs /data_fs") + machine.succeed("mkdir /mnt") + machine.succeed("mount /data_fs /mnt") + machine.succeed("btrfs subvolume create /mnt/to_backup") + machine.succeed("mkdir -p /mnt/btrbk/local") - # The service should not have any triggering timer. - unit = machine.get_unit_info('btrbk-local.service') - assert "TriggeredBy" not in unit + # The service should not have any triggering timer. + unit = machine.get_unit_info('btrbk-local.service') + assert "TriggeredBy" not in unit - # Manually starting the service should still work. - machine.succeed("echo foo > /mnt/to_backup/bar") - machine.start_job("btrbk-local.service") - machine.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo") - ''; - } -) + # Manually starting the service should still work. + machine.succeed("echo foo > /mnt/to_backup/bar") + machine.start_job("btrbk-local.service") + machine.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo") + ''; +} diff --git a/nixos/tests/btrbk-section-order.nix b/nixos/tests/btrbk-section-order.nix index 03762b3fee6b..226025464835 100644 --- a/nixos/tests/btrbk-section-order.nix +++ b/nixos/tests/btrbk-section-order.nix @@ -6,56 +6,54 @@ # order-sensitive config format. # # Issue: https://github.com/NixOS/nixpkgs/issues/195660 -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "btrbk-section-order"; - meta.maintainers = with lib.maintainers; [ oxalica ]; +{ lib, pkgs, ... }: +{ + name = "btrbk-section-order"; + meta.maintainers = with lib.maintainers; [ oxalica ]; - nodes.machine = - { ... }: - { - services.btrbk.instances.local = { - onCalendar = null; - settings = { - timestamp_format = "long"; - target."ssh://global-target/".ssh_user = "root"; - volume."/btrfs" = { - snapshot_dir = "/volume-snapshots"; - target."ssh://volume-target/".ssh_user = "root"; - subvolume."@subvolume" = { - snapshot_dir = "/subvolume-snapshots"; - target."ssh://subvolume-target/".ssh_user = "root"; - }; + nodes.machine = + { ... }: + { + services.btrbk.instances.local = { + onCalendar = null; + settings = { + timestamp_format = "long"; + target."ssh://global-target/".ssh_user = "root"; + volume."/btrfs" = { + snapshot_dir = "/volume-snapshots"; + target."ssh://volume-target/".ssh_user = "root"; + subvolume."@subvolume" = { + snapshot_dir = "/subvolume-snapshots"; + target."ssh://subvolume-target/".ssh_user = "root"; }; }; }; }; + }; - testScript = '' - import difflib - machine.wait_for_unit("basic.target") - got = machine.succeed("cat /etc/btrbk/local.conf").strip() - expect = """ - backend btrfs-progs-sudo - stream_compress no - timestamp_format long - target ssh://global-target/ + testScript = '' + import difflib + machine.wait_for_unit("basic.target") + got = machine.succeed("cat /etc/btrbk/local.conf").strip() + expect = """ + backend btrfs-progs-sudo + stream_compress no + timestamp_format long + target ssh://global-target/ + ssh_user root + volume /btrfs + snapshot_dir /volume-snapshots + target ssh://volume-target/ + ssh_user root + subvolume @subvolume + snapshot_dir /subvolume-snapshots + target ssh://subvolume-target/ ssh_user root - volume /btrfs - snapshot_dir /volume-snapshots - target ssh://volume-target/ - ssh_user root - subvolume @subvolume - snapshot_dir /subvolume-snapshots - target ssh://subvolume-target/ - ssh_user root - """.strip() - print(got) - if got != expect: - diff = difflib.unified_diff(expect.splitlines(keepends=True), got.splitlines(keepends=True), fromfile="expected", tofile="got") - print("".join(diff)) - assert got == expect - ''; - } -) + """.strip() + print(got) + if got != expect: + diff = difflib.unified_diff(expect.splitlines(keepends=True), got.splitlines(keepends=True), fromfile="expected", tofile="got") + print("".join(diff)) + assert got == expect + ''; +} diff --git a/nixos/tests/btrbk.nix b/nixos/tests/btrbk.nix index e6b3e1166ff3..10ca8c3ca95e 100644 --- a/nixos/tests/btrbk.nix +++ b/nixos/tests/btrbk.nix @@ -1,122 +1,120 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - privateKey = '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe - RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw - AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg - 9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ= - -----END OPENSSH PRIVATE KEY----- - ''; - publicKey = '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv - ''; - in - { - name = "btrbk"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ symphorien ]; - }; +let + privateKey = '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe + RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw + AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg + 9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ= + -----END OPENSSH PRIVATE KEY----- + ''; + publicKey = '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv + ''; +in +{ + name = "btrbk"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ symphorien ]; + }; - nodes = { - archive = - { ... }: - { - environment.systemPackages = with pkgs; [ btrfs-progs ]; - # note: this makes the privateKey world readable. - # don't do it with real ssh keys. - environment.etc."btrbk_key".text = privateKey; - services.btrbk = { - instances = { - remote = { - onCalendar = "minutely"; - settings = { - ssh_identity = "/etc/btrbk_key"; - ssh_user = "btrbk"; - stream_compress = "lz4"; - volume = { - "ssh://main/mnt" = { - target = "/mnt"; - snapshot_dir = "btrbk/remote"; - subvolume = "to_backup"; - }; + nodes = { + archive = + { ... }: + { + environment.systemPackages = with pkgs; [ btrfs-progs ]; + # note: this makes the privateKey world readable. + # don't do it with real ssh keys. + environment.etc."btrbk_key".text = privateKey; + services.btrbk = { + instances = { + remote = { + onCalendar = "minutely"; + settings = { + ssh_identity = "/etc/btrbk_key"; + ssh_user = "btrbk"; + stream_compress = "lz4"; + volume = { + "ssh://main/mnt" = { + target = "/mnt"; + snapshot_dir = "btrbk/remote"; + subvolume = "to_backup"; }; }; }; }; }; }; + }; - main = - { ... }: - { - environment.systemPackages = with pkgs; [ btrfs-progs ]; - services.openssh = { - enable = true; - settings = { - KbdInteractiveAuthentication = false; - PasswordAuthentication = false; - }; + main = + { ... }: + { + environment.systemPackages = with pkgs; [ btrfs-progs ]; + services.openssh = { + enable = true; + settings = { + KbdInteractiveAuthentication = false; + PasswordAuthentication = false; }; - services.btrbk = { - extraPackages = [ pkgs.lz4 ]; - sshAccess = [ - { - key = publicKey; - roles = [ - "source" - "send" - "info" - "delete" - ]; - } - ]; - instances = { - local = { - onCalendar = "minutely"; - settings = { - volume = { - "/mnt" = { - snapshot_dir = "btrbk/local"; - subvolume = "to_backup"; - }; + }; + services.btrbk = { + extraPackages = [ pkgs.lz4 ]; + sshAccess = [ + { + key = publicKey; + roles = [ + "source" + "send" + "info" + "delete" + ]; + } + ]; + instances = { + local = { + onCalendar = "minutely"; + settings = { + volume = { + "/mnt" = { + snapshot_dir = "btrbk/local"; + subvolume = "to_backup"; }; }; }; }; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - # create btrfs partition at /mnt - for machine in (archive, main): - machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1") - machine.succeed("mkfs.btrfs /data_fs") - machine.succeed("mkdir /mnt") - machine.succeed("mount /data_fs /mnt") + # create btrfs partition at /mnt + for machine in (archive, main): + machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1") + machine.succeed("mkfs.btrfs /data_fs") + machine.succeed("mkdir /mnt") + machine.succeed("mount /data_fs /mnt") - # what to backup and where - main.succeed("btrfs subvolume create /mnt/to_backup") - main.succeed("mkdir -p /mnt/btrbk/{local,remote}") + # what to backup and where + main.succeed("btrfs subvolume create /mnt/to_backup") + main.succeed("mkdir -p /mnt/btrbk/{local,remote}") - # check that local snapshots work - with subtest("local"): - main.succeed("echo foo > /mnt/to_backup/bar") - main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo") - main.succeed("echo bar > /mnt/to_backup/bar") - main.succeed("cat /mnt/btrbk/local/*/bar | grep foo") + # check that local snapshots work + with subtest("local"): + main.succeed("echo foo > /mnt/to_backup/bar") + main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo") + main.succeed("echo bar > /mnt/to_backup/bar") + main.succeed("cat /mnt/btrbk/local/*/bar | grep foo") - # check that btrfs send/receive works and ssh access works - with subtest("remote"): - archive.wait_until_succeeds("cat /mnt/*/bar | grep bar") - main.succeed("echo baz > /mnt/to_backup/bar") - archive.succeed("cat /mnt/*/bar | grep bar") - ''; - } -) + # check that btrfs send/receive works and ssh access works + with subtest("remote"): + archive.wait_until_succeeds("cat /mnt/*/bar | grep bar") + main.succeed("echo baz > /mnt/to_backup/bar") + archive.succeed("cat /mnt/*/bar | grep bar") + ''; +} diff --git a/nixos/tests/budgie.nix b/nixos/tests/budgie.nix index 5389a8d9da9d..04e5300ec4fb 100644 --- a/nixos/tests/budgie.nix +++ b/nixos/tests/budgie.nix @@ -1,104 +1,102 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "budgie"; +{ pkgs, lib, ... }: +{ + name = "budgie"; - meta.maintainers = lib.teams.budgie.members; + meta.maintainers = lib.teams.budgie.members; - nodes.machine = - { ... }: - { - imports = [ - ./common/user-account.nix - ]; + nodes.machine = + { ... }: + { + imports = [ + ./common/user-account.nix + ]; - services.xserver.enable = true; + services.xserver.enable = true; - services.xserver.displayManager = { - lightdm.enable = true; - autoLogin = { - enable = true; - user = "alice"; - }; - }; - - # We don't ship gnome-text-editor in Budgie module, we add this line mainly - # to catch eval issues related to this option. - environment.budgie.excludePackages = [ pkgs.gnome-text-editor ]; - - services.xserver.desktopManager.budgie = { + services.xserver.displayManager = { + lightdm.enable = true; + autoLogin = { enable = true; - extraPlugins = [ - pkgs.budgie-analogue-clock-applet - ]; + user = "alice"; }; }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"; - su = command: "su - ${user.name} -c '${env} ${command}'"; - in - '' - with subtest("Wait for login"): - # wait_for_x() checks graphical-session.target, which is expected to be - # inactive on Budgie before Budgie manages user session with systemd. - # https://github.com/BuddiesOfBudgie/budgie-desktop/blob/39e9f0895c978f76/src/session/budgie-desktop.in#L16 - # - # Previously this was unconditionally touched by xsessionWrapper but was - # changed in #233981 (we have Budgie:GNOME in XDG_CURRENT_DESKTOP). - # machine.wait_for_x() - machine.wait_until_succeeds('journalctl -t budgie-session-binary --grep "Entering running state"') - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") + # We don't ship gnome-text-editor in Budgie module, we add this line mainly + # to catch eval issues related to this option. + environment.budgie.excludePackages = [ pkgs.gnome-text-editor ]; - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + services.xserver.desktopManager.budgie = { + enable = true; + extraPlugins = [ + pkgs.budgie-analogue-clock-applet + ]; + }; + }; - with subtest("Check if Budgie session components actually start"): - for i in ["budgie-daemon", "budgie-panel", "budgie-wm", "budgie-desktop-view", "gsd-media-keys"]: - machine.wait_until_succeeds(f"pgrep -f {i}") - # We don't check xwininfo for budgie-wm. - # See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754 - machine.wait_for_window("budgie-daemon") - machine.wait_for_window("budgie-panel") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"; + su = command: "su - ${user.name} -c '${env} ${command}'"; + in + '' + with subtest("Wait for login"): + # wait_for_x() checks graphical-session.target, which is expected to be + # inactive on Budgie before Budgie manages user session with systemd. + # https://github.com/BuddiesOfBudgie/budgie-desktop/blob/39e9f0895c978f76/src/session/budgie-desktop.in#L16 + # + # Previously this was unconditionally touched by xsessionWrapper but was + # changed in #233981 (we have Budgie:GNOME in XDG_CURRENT_DESKTOP). + # machine.wait_for_x() + machine.wait_until_succeeds('journalctl -t budgie-session-binary --grep "Entering running state"') + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") - with subtest("Check if various environment variables are set"): - cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/budgie-wm)/environ" - machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Budgie:GNOME'") - machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie-desktop-with-plugins.pname}'") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Open run dialog"): - machine.send_key("alt-f2") - machine.wait_for_window("budgie-run-dialog") - machine.sleep(2) - machine.screenshot("run_dialog") - machine.send_key("esc") + with subtest("Check if Budgie session components actually start"): + for i in ["budgie-daemon", "budgie-panel", "budgie-wm", "budgie-desktop-view", "gsd-media-keys"]: + machine.wait_until_succeeds(f"pgrep -f {i}") + # We don't check xwininfo for budgie-wm. + # See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754 + machine.wait_for_window("budgie-daemon") + machine.wait_for_window("budgie-panel") - with subtest("Open Budgie Control Center"): - machine.succeed("${su "budgie-control-center >&2 &"}") - machine.wait_for_window("Budgie Control Center") + with subtest("Check if various environment variables are set"): + cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/budgie-wm)/environ" + machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Budgie:GNOME'") + machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie-desktop-with-plugins.pname}'") - with subtest("Lock the screen"): - machine.succeed("${su "budgie-screensaver-command -l >&2 &"}") - machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is active'") - machine.sleep(2) - machine.send_chars("${user.password}", delay=0.5) - machine.screenshot("budgie_screensaver") - machine.send_chars("\n") - machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is inactive'") - machine.sleep(2) + with subtest("Open run dialog"): + machine.send_key("alt-f2") + machine.wait_for_window("budgie-run-dialog") + machine.sleep(2) + machine.screenshot("run_dialog") + machine.send_key("esc") - with subtest("Open GNOME terminal"): - machine.succeed("${su "gnome-terminal"}") - machine.wait_for_window("${user.name}@machine: ~") + with subtest("Open Budgie Control Center"): + machine.succeed("${su "budgie-control-center >&2 &"}") + machine.wait_for_window("Budgie Control Center") - with subtest("Check if Budgie has ever coredumped"): - machine.fail("coredumpctl --json=short | grep budgie") - machine.sleep(10) - machine.screenshot("screen") - ''; - } -) + with subtest("Lock the screen"): + machine.succeed("${su "budgie-screensaver-command -l >&2 &"}") + machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is active'") + machine.sleep(2) + machine.send_chars("${user.password}", delay=0.5) + machine.screenshot("budgie_screensaver") + machine.send_chars("\n") + machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is inactive'") + machine.sleep(2) + + with subtest("Open GNOME terminal"): + machine.succeed("${su "gnome-terminal"}") + machine.wait_for_window("${user.name}@machine: ~") + + with subtest("Check if Budgie has ever coredumped"): + machine.fail("coredumpctl --json=short | grep budgie") + machine.sleep(10) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/buildkite-agents.nix b/nixos/tests/buildkite-agents.nix index e8934dbaaa5c..e4fe837ea8cc 100644 --- a/nixos/tests/buildkite-agents.nix +++ b/nixos/tests/buildkite-agents.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "buildkite-agent"; - meta.maintainers = with lib.maintainers; [ flokli ]; +{ + name = "buildkite-agent"; + meta.maintainers = with lib.maintainers; [ flokli ]; - nodes.machine = - { pkgs, ... }: - { - services.buildkite-agents = { - one = { - privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey; - tokenPath = (pkgs.writeText "my-token" "5678"); - }; - two = { - tokenPath = (pkgs.writeText "my-token" "1234"); - }; + nodes.machine = + { pkgs, ... }: + { + services.buildkite-agents = { + one = { + privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey; + tokenPath = (pkgs.writeText "my-token" "5678"); + }; + two = { + tokenPath = (pkgs.writeText "my-token" "1234"); }; }; + }; - testScript = '' - start_all() - # we can't wait on the unit to start up, as we obviously can't connect to buildkite, - # but we can look whether files are set up correctly + testScript = '' + start_all() + # we can't wait on the unit to start up, as we obviously can't connect to buildkite, + # but we can look whether files are set up correctly - machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg") - machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa") + machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg") + machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa") - machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg") - ''; - } -) + machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg") + ''; +} diff --git a/nixos/tests/c2fmzq.nix b/nixos/tests/c2fmzq.nix index 90d816785e1e..26c2a3349ba4 100644 --- a/nixos/tests/c2fmzq.nix +++ b/nixos/tests/c2fmzq.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "c2FmZQ"; - meta.maintainers = with lib.maintainers; [ hmenke ]; +{ pkgs, lib, ... }: +{ + name = "c2FmZQ"; + meta.maintainers = with lib.maintainers; [ hmenke ]; - nodes.machine = { - services.c2fmzq-server = { - enable = true; - port = 8080; - passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments - settings = { - verbose = 3; # debug - # make sure multiple freeform options evaluate - allow-new-accounts = true; - auto-approve-new-accounts = true; - licenses = false; - }; - }; - environment = { - sessionVariables = { - C2FMZQ_PASSPHRASE = "lol"; - C2FMZQ_API_SERVER = "http://localhost:8080"; - }; - systemPackages = [ - pkgs.c2fmzq - (pkgs.writeScriptBin "c2FmZQ-client-wrapper" '' - #!${pkgs.expect}/bin/expect -f - spawn c2FmZQ-client {*}$argv - expect { - "Enter password:" { send "$env(PASSWORD)\r" } - "Type YES to confirm:" { send "YES\r" } - timeout { exit 1 } - eof { exit 0 } - } - interact - '') - ]; + nodes.machine = { + services.c2fmzq-server = { + enable = true; + port = 8080; + passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments + settings = { + verbose = 3; # debug + # make sure multiple freeform options evaluate + allow-new-accounts = true; + auto-approve-new-accounts = true; + licenses = false; }; }; + environment = { + sessionVariables = { + C2FMZQ_PASSPHRASE = "lol"; + C2FMZQ_API_SERVER = "http://localhost:8080"; + }; + systemPackages = [ + pkgs.c2fmzq + (pkgs.writeScriptBin "c2FmZQ-client-wrapper" '' + #!${pkgs.expect}/bin/expect -f + spawn c2FmZQ-client {*}$argv + expect { + "Enter password:" { send "$env(PASSWORD)\r" } + "Type YES to confirm:" { send "YES\r" } + timeout { exit 1 } + eof { exit 0 } + } + interact + '') + ]; + }; + }; - testScript = - { nodes, ... }: - '' - machine.start() - machine.wait_for_unit("c2fmzq-server.service") - machine.wait_for_open_port(8080) + testScript = + { nodes, ... }: + '' + machine.start() + machine.wait_for_unit("c2fmzq-server.service") + machine.wait_for_open_port(8080) - with subtest("Create accounts for alice and bob"): - machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 create-account alice@example.com") - machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 create-account bob@example.com") + with subtest("Create accounts for alice and bob"): + machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 create-account alice@example.com") + machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 create-account bob@example.com") - with subtest("Log in as alice"): - machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 login alice@example.com") - msg = machine.succeed("c2FmZQ-client -v 3 status") - assert "Logged in as alice@example.com" in msg, f"ERROR: Not logged in as alice:\n{msg}" + with subtest("Log in as alice"): + machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 login alice@example.com") + msg = machine.succeed("c2FmZQ-client -v 3 status") + assert "Logged in as alice@example.com" in msg, f"ERROR: Not logged in as alice:\n{msg}" - with subtest("Create a new album, upload a file, and delete the uploaded file"): - machine.succeed("c2FmZQ-client -v 3 create-album 'Rarest Memes'") - machine.succeed("echo 'pls do not steal' > meme.txt") - machine.succeed("c2FmZQ-client -v 3 import meme.txt 'Rarest Memes'") - machine.succeed("c2FmZQ-client -v 3 sync") - machine.succeed("rm meme.txt") + with subtest("Create a new album, upload a file, and delete the uploaded file"): + machine.succeed("c2FmZQ-client -v 3 create-album 'Rarest Memes'") + machine.succeed("echo 'pls do not steal' > meme.txt") + machine.succeed("c2FmZQ-client -v 3 import meme.txt 'Rarest Memes'") + machine.succeed("c2FmZQ-client -v 3 sync") + machine.succeed("rm meme.txt") - with subtest("Share the album with bob"): - machine.succeed("c2FmZQ-client-wrapper -- -v 3 share 'Rarest Memes' bob@example.com") + with subtest("Share the album with bob"): + machine.succeed("c2FmZQ-client-wrapper -- -v 3 share 'Rarest Memes' bob@example.com") - with subtest("Log in as bob"): - machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 login bob@example.com") - msg = machine.succeed("c2FmZQ-client -v 3 status") - assert "Logged in as bob@example.com" in msg, f"ERROR: Not logged in as bob:\n{msg}" + with subtest("Log in as bob"): + machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 login bob@example.com") + msg = machine.succeed("c2FmZQ-client -v 3 status") + assert "Logged in as bob@example.com" in msg, f"ERROR: Not logged in as bob:\n{msg}" - with subtest("Download the shared file"): - machine.succeed("c2FmZQ-client -v 3 download 'shared/Rarest Memes/meme.txt'") - machine.succeed("c2FmZQ-client -v 3 export 'shared/Rarest Memes/meme.txt' .") - msg = machine.succeed("cat meme.txt") - assert "pls do not steal\n" == msg, f"File content is not the same:\n{msg}" + with subtest("Download the shared file"): + machine.succeed("c2FmZQ-client -v 3 download 'shared/Rarest Memes/meme.txt'") + machine.succeed("c2FmZQ-client -v 3 export 'shared/Rarest Memes/meme.txt' .") + msg = machine.succeed("cat meme.txt") + assert "pls do not steal\n" == msg, f"File content is not the same:\n{msg}" - with subtest("Test that PWA is served"): - msg = machine.succeed("curl -sSfL http://localhost:8080") - assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}" + with subtest("Test that PWA is served"): + msg = machine.succeed("curl -sSfL http://localhost:8080") + assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}" - with subtest("A setting with false value is properly passed"): - machine.succeed("systemctl show -p ExecStart --value c2fmzq-server.service | grep -F -- '--licenses=false'"); - ''; - } -) + with subtest("A setting with false value is properly passed"): + machine.succeed("systemctl show -p ExecStart --value c2fmzq-server.service | grep -F -- '--licenses=false'"); + ''; +} diff --git a/nixos/tests/cage.nix b/nixos/tests/cage.nix index e9c025e51021..585968b5e0e4 100644 --- a/nixos/tests/cage.nix +++ b/nixos/tests/cage.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "cage"; - meta = with pkgs.lib.maintainers; { - maintainers = [ matthewbauer ]; - }; +{ + name = "cage"; + meta = with pkgs.lib.maintainers; { + maintainers = [ matthewbauer ]; + }; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ ./common/user-account.nix ]; + { + imports = [ ./common/user-account.nix ]; - fonts.packages = with pkgs; [ dejavu_fonts ]; + fonts.packages = with pkgs; [ dejavu_fonts ]; - services.cage = { - enable = true; - user = "alice"; - program = "${pkgs.xterm}/bin/xterm"; - }; - - # Need to switch to a different GPU driver than the default one (-vga std) so that Cage can launch: - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + services.cage = { + enable = true; + user = "alice"; + program = "${pkgs.xterm}/bin/xterm"; }; - enableOCR = true; + # Need to switch to a different GPU driver than the default one (-vga std) so that Cage can launch: + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + }; - testScript = - { nodes, ... }: - let - user = nodes.machine.config.users.users.alice; - in - '' - with subtest("Wait for cage to boot up"): - start_all() - machine.wait_for_file("/run/user/${toString user.uid}/wayland-0.lock") - machine.wait_until_succeeds("pgrep xterm") - machine.wait_for_text("alice@machine") - machine.screenshot("screen") - ''; - } -) + enableOCR = true; + + testScript = + { nodes, ... }: + let + user = nodes.machine.config.users.users.alice; + in + '' + with subtest("Wait for cage to boot up"): + start_all() + machine.wait_for_file("/run/user/${toString user.uid}/wayland-0.lock") + machine.wait_until_succeeds("pgrep xterm") + machine.wait_for_text("alice@machine") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/cagebreak.nix b/nixos/tests/cagebreak.nix index 2c07833b142f..13012281a151 100644 --- a/nixos/tests/cagebreak.nix +++ b/nixos/tests/cagebreak.nix @@ -1,72 +1,70 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - cagebreakConfigfile = pkgs.writeText "config" '' - workspaces 1 - escape C-t - bind t exec env DISPLAY=:0 ${pkgs.xterm}/bin/xterm -cm -pc - ''; - in - { - name = "cagebreak"; - meta = with pkgs.lib.maintainers; { - maintainers = [ berbiche ]; +let + cagebreakConfigfile = pkgs.writeText "config" '' + workspaces 1 + escape C-t + bind t exec env DISPLAY=:0 ${pkgs.xterm}/bin/xterm -cm -pc + ''; +in +{ + name = "cagebreak"; + meta = with pkgs.lib.maintainers; { + maintainers = [ berbiche ]; + }; + + nodes.machine = + { config, ... }: + { + # Automatically login on tty1 as a normal user: + imports = [ ./common/user-account.nix ]; + services.getty.autologinUser = "alice"; + programs.bash.loginShellInit = '' + if [ "$(tty)" = "/dev/tty1" ]; then + set -e + + mkdir -p ~/.config/cagebreak + cp -f ${cagebreakConfigfile} ~/.config/cagebreak/config + + cagebreak + fi + ''; + + hardware.graphics.enable = true; + programs.xwayland.enable = true; + security.polkit.enable = true; + environment.systemPackages = [ + pkgs.cagebreak + pkgs.wayland-utils + ]; + + # Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch: + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; }; - nodes.machine = - { config, ... }: - { - # Automatically login on tty1 as a normal user: - imports = [ ./common/user-account.nix ]; - services.getty.autologinUser = "alice"; - programs.bash.loginShellInit = '' - if [ "$(tty)" = "/dev/tty1" ]; then - set -e + enableOCR = true; - mkdir -p ~/.config/cagebreak - cp -f ${cagebreakConfigfile} ~/.config/cagebreak/config + testScript = + { nodes, ... }: + let + user = nodes.machine.config.users.users.alice; + XDG_RUNTIME_DIR = "/run/user/${toString user.uid}"; + in + '' + start_all() + machine.wait_for_unit("multi-user.target") + machine.wait_for_file("${XDG_RUNTIME_DIR}/wayland-0") - cagebreak - fi - ''; + with subtest("ensure wayland works with wayinfo from wallutils"): + print(machine.succeed("env XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR} wayland-info")) - hardware.graphics.enable = true; - programs.xwayland.enable = true; - security.polkit.enable = true; - environment.systemPackages = [ - pkgs.cagebreak - pkgs.wayland-utils - ]; - - # Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch: - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; - }; - - enableOCR = true; - - testScript = - { nodes, ... }: - let - user = nodes.machine.config.users.users.alice; - XDG_RUNTIME_DIR = "/run/user/${toString user.uid}"; - in - '' - start_all() - machine.wait_for_unit("multi-user.target") - machine.wait_for_file("${XDG_RUNTIME_DIR}/wayland-0") - - with subtest("ensure wayland works with wayinfo from wallutils"): - print(machine.succeed("env XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR} wayland-info")) - - # TODO: Fix the XWayland test (log the cagebreak output to debug): - # with subtest("ensure xwayland works with xterm"): - # machine.send_key("ctrl-t") - # machine.send_key("t") - # machine.wait_until_succeeds("pgrep xterm") - # machine.wait_for_text("${user.name}@machine") - # machine.screenshot("screen") - # machine.send_key("ctrl-d") - ''; - } -) + # TODO: Fix the XWayland test (log the cagebreak output to debug): + # with subtest("ensure xwayland works with xterm"): + # machine.send_key("ctrl-t") + # machine.send_key("t") + # machine.wait_until_succeeds("pgrep xterm") + # machine.wait_for_text("${user.name}@machine") + # machine.screenshot("screen") + # machine.send_key("ctrl-d") + ''; +} diff --git a/nixos/tests/canaille.nix b/nixos/tests/canaille.nix index 58e81e058de0..fa0d33b0f2e2 100644 --- a/nixos/tests/canaille.nix +++ b/nixos/tests/canaille.nix @@ -1,62 +1,60 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - certs = import ./common/acme/server/snakeoil-certs.nix; - inherit (certs) domain; - in - { - name = "canaille"; - meta.maintainers = with pkgs.lib.maintainers; [ erictapen ]; +{ pkgs, ... }: +let + certs = import ./common/acme/server/snakeoil-certs.nix; + inherit (certs) domain; +in +{ + name = "canaille"; + meta.maintainers = with pkgs.lib.maintainers; [ erictapen ]; - nodes.server = - { pkgs, lib, ... }: - { - services.canaille = { - enable = true; - secretKeyFile = pkgs.writeText "canaille-secret-key" '' - this is not a secret key - ''; - settings = { - SERVER_NAME = domain; - }; + nodes.server = + { pkgs, lib, ... }: + { + services.canaille = { + enable = true; + secretKeyFile = pkgs.writeText "canaille-secret-key" '' + this is not a secret key + ''; + settings = { + SERVER_NAME = domain; }; - - services.nginx.virtualHosts."${domain}" = { - enableACME = lib.mkForce false; - sslCertificate = certs."${domain}".cert; - sslCertificateKey = certs."${domain}".key; - }; - - networking.hosts."::1" = [ "${domain}" ]; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; - - users.users.canaille.shell = pkgs.bashInteractive; - - security.pki.certificateFiles = [ certs.ca.cert ]; }; - nodes.client = - { nodes, ... }: - { - networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${domain}" ]; - security.pki.certificateFiles = [ certs.ca.cert ]; + services.nginx.virtualHosts."${domain}" = { + enableACME = lib.mkForce false; + sslCertificate = certs."${domain}".cert; + sslCertificateKey = certs."${domain}".key; }; - testScript = - { ... }: - '' - import json + networking.hosts."::1" = [ "${domain}" ]; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; - start_all() - server.wait_for_unit("canaille.socket") - server.wait_until_succeeds("curl -f https://${domain}") - server.succeed("sudo -iu canaille -- canaille create user --user-name admin --password adminpass --emails admin@${domain}") - json_str = server.succeed("sudo -iu canaille -- canaille get user") - assert json.loads(json_str)[0]["user_name"] == "admin" - server.succeed("sudo -iu canaille -- canaille config check") - ''; - } -) + users.users.canaille.shell = pkgs.bashInteractive; + + security.pki.certificateFiles = [ certs.ca.cert ]; + }; + + nodes.client = + { nodes, ... }: + { + networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${domain}" ]; + security.pki.certificateFiles = [ certs.ca.cert ]; + }; + + testScript = + { ... }: + '' + import json + + start_all() + server.wait_for_unit("canaille.socket") + server.wait_until_succeeds("curl -f https://${domain}") + server.succeed("sudo -iu canaille -- canaille create user --user-name admin --password adminpass --emails admin@${domain}") + json_str = server.succeed("sudo -iu canaille -- canaille get user") + assert json.loads(json_str)[0]["user_name"] == "admin" + server.succeed("sudo -iu canaille -- canaille config check") + ''; +} diff --git a/nixos/tests/castopod.nix b/nixos/tests/castopod.nix index 3d62cc0711fe..608526c728c6 100644 --- a/nixos/tests/castopod.nix +++ b/nixos/tests/castopod.nix @@ -1,250 +1,248 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "castopod"; - meta = with lib.maintainers; { - maintainers = [ alexoundos ]; +{ pkgs, lib, ... }: +{ + name = "castopod"; + meta = with lib.maintainers; { + maintainers = [ alexoundos ]; + }; + + nodes.castopod = + { nodes, ... }: + { + # otherwise 500 MiB file upload fails! + virtualisation.diskSize = 512 + 3 * 512; + + networking.firewall.allowedTCPPorts = [ 80 ]; + networking.extraHosts = lib.strings.concatStringsSep "\n" ( + lib.attrsets.mapAttrsToList ( + name: _: "127.0.0.1 ${name}" + ) nodes.castopod.services.nginx.virtualHosts + ); + + services.castopod = { + enable = true; + database.createLocally = true; + localDomain = "castopod.example.com"; + maxUploadSize = "512M"; + }; }; - nodes.castopod = - { nodes, ... }: - { - # otherwise 500 MiB file upload fails! - virtualisation.diskSize = 512 + 3 * 512; + nodes.client = + { + nodes, + pkgs, + lib, + ... + }: + let + domain = nodes.castopod.services.castopod.localDomain; - networking.firewall.allowedTCPPorts = [ 80 ]; - networking.extraHosts = lib.strings.concatStringsSep "\n" ( - lib.attrsets.mapAttrsToList ( - name: _: "127.0.0.1 ${name}" - ) nodes.castopod.services.nginx.virtualHosts - ); + getIP = node: (builtins.head node.networking.interfaces.eth1.ipv4.addresses).address; - services.castopod = { - enable = true; - database.createLocally = true; - localDomain = "castopod.example.com"; - maxUploadSize = "512M"; - }; - }; + targetPodcastSize = 500 * 1024 * 1024; + lameMp3Bitrate = 348300; + lameMp3FileAdjust = -800; + targetPodcastDuration = toString ((targetPodcastSize + lameMp3FileAdjust) / (lameMp3Bitrate / 8)); + bannerWidth = 3000; + banner = pkgs.runCommand "gen-castopod-cover.jpg" { } '' + ${pkgs.imagemagick}/bin/magick ` + `-background green -bordercolor white -gravity northwest xc:black ` + `-duplicate 99 ` + `-seed 1 -resize "%[fx:rand()*72+24]" ` + `-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 16x36 ` + `-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "150x50!" ` + `+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append ` + `-resize ${toString bannerWidth} -quality 1 $out + ''; - nodes.client = - { - nodes, - pkgs, - lib, - ... - }: - let - domain = nodes.castopod.services.castopod.localDomain; + coverWidth = toString 3000; + cover = pkgs.runCommand "gen-castopod-banner.jpg" { } '' + ${pkgs.imagemagick}/bin/magick ` + `-background white -bordercolor white -gravity northwest xc:black ` + `-duplicate 99 ` + `-seed 1 -resize "%[fx:rand()*72+24]" ` + `-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 36x36 ` + `-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "144x144!" ` + `+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append ` + `-resize ${coverWidth} -quality 1 $out + ''; + in + { + networking.extraHosts = lib.strings.concatStringsSep "\n" ( + lib.attrsets.mapAttrsToList ( + name: _: "${getIP nodes.castopod} ${name}" + ) nodes.castopod.services.nginx.virtualHosts + ); - getIP = node: (builtins.head node.networking.interfaces.eth1.ipv4.addresses).address; + environment.systemPackages = + let + username = "admin"; + email = "admin@${domain}"; + password = "Abcd1234"; + podcastTitle = "Some Title"; + episodeTitle = "Episode Title"; + browser-test = + pkgs.writers.writePython3Bin "browser-test" + { + libraries = [ pkgs.python3Packages.selenium ]; + flakeIgnore = [ + "E124" + "E501" + ]; + } + '' + from selenium.webdriver.common.by import By + from selenium.webdriver import Firefox + from selenium.webdriver.firefox.options import Options + from selenium.webdriver.firefox.service import Service + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + from subprocess import STDOUT + import logging - targetPodcastSize = 500 * 1024 * 1024; - lameMp3Bitrate = 348300; - lameMp3FileAdjust = -800; - targetPodcastDuration = toString ((targetPodcastSize + lameMp3FileAdjust) / (lameMp3Bitrate / 8)); - bannerWidth = 3000; - banner = pkgs.runCommand "gen-castopod-cover.jpg" { } '' - ${pkgs.imagemagick}/bin/magick ` - `-background green -bordercolor white -gravity northwest xc:black ` - `-duplicate 99 ` - `-seed 1 -resize "%[fx:rand()*72+24]" ` - `-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 16x36 ` - `-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "150x50!" ` - `+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append ` - `-resize ${toString bannerWidth} -quality 1 $out - ''; + selenium_logger = logging.getLogger("selenium") + selenium_logger.setLevel(logging.DEBUG) + selenium_logger.addHandler(logging.StreamHandler()) - coverWidth = toString 3000; - cover = pkgs.runCommand "gen-castopod-banner.jpg" { } '' - ${pkgs.imagemagick}/bin/magick ` - `-background white -bordercolor white -gravity northwest xc:black ` - `-duplicate 99 ` - `-seed 1 -resize "%[fx:rand()*72+24]" ` - `-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 36x36 ` - `-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "144x144!" ` - `+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append ` - `-resize ${coverWidth} -quality 1 $out - ''; - in - { - networking.extraHosts = lib.strings.concatStringsSep "\n" ( - lib.attrsets.mapAttrsToList ( - name: _: "${getIP nodes.castopod} ${name}" - ) nodes.castopod.services.nginx.virtualHosts - ); + options = Options() + options.add_argument('--headless') + service = Service(log_output=STDOUT) + driver = Firefox(options=options, service=service) + driver = Firefox(options=options) + driver.implicitly_wait(30) + driver.set_page_load_timeout(60) - environment.systemPackages = - let - username = "admin"; - email = "admin@${domain}"; - password = "Abcd1234"; - podcastTitle = "Some Title"; - episodeTitle = "Episode Title"; - browser-test = - pkgs.writers.writePython3Bin "browser-test" - { - libraries = [ pkgs.python3Packages.selenium ]; - flakeIgnore = [ - "E124" - "E501" - ]; - } - '' - from selenium.webdriver.common.by import By - from selenium.webdriver import Firefox - from selenium.webdriver.firefox.options import Options - from selenium.webdriver.firefox.service import Service - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - from subprocess import STDOUT - import logging + # install ########################################################## - selenium_logger = logging.getLogger("selenium") - selenium_logger.setLevel(logging.DEBUG) - selenium_logger.addHandler(logging.StreamHandler()) + driver.get('http://${domain}/cp-install') - options = Options() - options.add_argument('--headless') - service = Service(log_output=STDOUT) - driver = Firefox(options=options, service=service) - driver = Firefox(options=options) - driver.implicitly_wait(30) - driver.set_page_load_timeout(60) + wait = WebDriverWait(driver, 20) - # install ########################################################## + wait.until(EC.title_contains("installer")) - driver.get('http://${domain}/cp-install') + driver.find_element(By.CSS_SELECTOR, '#username').send_keys( + '${username}' + ) + driver.find_element(By.CSS_SELECTOR, '#email').send_keys( + '${email}' + ) + driver.find_element(By.CSS_SELECTOR, '#password').send_keys( + '${password}' + ) + driver.find_element(By.XPATH, + "//button[contains(., 'Finish install')]" + ).click() - wait = WebDriverWait(driver, 20) + wait.until(EC.title_contains("Auth")) - wait.until(EC.title_contains("installer")) + driver.find_element(By.CSS_SELECTOR, '#email').send_keys( + '${email}' + ) + driver.find_element(By.CSS_SELECTOR, '#password').send_keys( + '${password}' + ) + driver.find_element(By.XPATH, + "//button[contains(., 'Login')]" + ).click() - driver.find_element(By.CSS_SELECTOR, '#username').send_keys( - '${username}' - ) - driver.find_element(By.CSS_SELECTOR, '#email').send_keys( - '${email}' - ) - driver.find_element(By.CSS_SELECTOR, '#password').send_keys( - '${password}' - ) - driver.find_element(By.XPATH, - "//button[contains(., 'Finish install')]" - ).click() + wait.until(EC.title_contains("Admin dashboard")) - wait.until(EC.title_contains("Auth")) + # create podcast ################################################### - driver.find_element(By.CSS_SELECTOR, '#email').send_keys( - '${email}' - ) - driver.find_element(By.CSS_SELECTOR, '#password').send_keys( - '${password}' - ) - driver.find_element(By.XPATH, - "//button[contains(., 'Login')]" - ).click() + driver.get('http://${domain}/admin/podcasts/new') - wait.until(EC.title_contains("Admin dashboard")) + wait.until(EC.title_contains("Create podcast")) - # create podcast ################################################### + driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( + '${cover}' + ) + driver.find_element(By.CSS_SELECTOR, '#banner').send_keys( + '${banner}' + ) + driver.find_element(By.CSS_SELECTOR, '#title').send_keys( + '${podcastTitle}' + ) + driver.find_element(By.CSS_SELECTOR, '#handle').send_keys( + 'some_handle' + ) + driver.find_element(By.CSS_SELECTOR, '#description').send_keys( + 'Some description' + ) + driver.find_element(By.CSS_SELECTOR, '#owner_name').send_keys( + 'Owner Name' + ) + driver.find_element(By.CSS_SELECTOR, '#owner_email').send_keys( + 'owner@email.xyz' + ) + driver.find_element(By.XPATH, + "//button[contains(., 'Create podcast')]" + ).click() - driver.get('http://${domain}/admin/podcasts/new') + wait.until(EC.title_contains("${podcastTitle}")) - wait.until(EC.title_contains("Create podcast")) + driver.find_element(By.XPATH, + "//span[contains(., 'Add an episode')]" + ).click() - driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( - '${cover}' - ) - driver.find_element(By.CSS_SELECTOR, '#banner').send_keys( - '${banner}' - ) - driver.find_element(By.CSS_SELECTOR, '#title').send_keys( - '${podcastTitle}' - ) - driver.find_element(By.CSS_SELECTOR, '#handle').send_keys( - 'some_handle' - ) - driver.find_element(By.CSS_SELECTOR, '#description').send_keys( - 'Some description' - ) - driver.find_element(By.CSS_SELECTOR, '#owner_name').send_keys( - 'Owner Name' - ) - driver.find_element(By.CSS_SELECTOR, '#owner_email').send_keys( - 'owner@email.xyz' - ) - driver.find_element(By.XPATH, - "//button[contains(., 'Create podcast')]" - ).click() + wait.until(EC.title_contains("Add an episode")) - wait.until(EC.title_contains("${podcastTitle}")) + # upload podcast ################################################### - driver.find_element(By.XPATH, - "//span[contains(., 'Add an episode')]" - ).click() + driver.find_element(By.CSS_SELECTOR, '#audio_file').send_keys( + '/tmp/podcast.mp3' + ) + driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( + '${cover}' + ) + driver.find_element(By.CSS_SELECTOR, '#description').send_keys( + 'Episode description' + ) + driver.find_element(By.CSS_SELECTOR, '#title').send_keys( + '${episodeTitle}' + ) + driver.find_element(By.XPATH, + "//button[contains(., 'Create episode')]" + ).click() - wait.until(EC.title_contains("Add an episode")) + wait.until(EC.title_contains("${episodeTitle}")) - # upload podcast ################################################### - - driver.find_element(By.CSS_SELECTOR, '#audio_file').send_keys( - '/tmp/podcast.mp3' - ) - driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( - '${cover}' - ) - driver.find_element(By.CSS_SELECTOR, '#description').send_keys( - 'Episode description' - ) - driver.find_element(By.CSS_SELECTOR, '#title').send_keys( - '${episodeTitle}' - ) - driver.find_element(By.XPATH, - "//button[contains(., 'Create episode')]" - ).click() - - wait.until(EC.title_contains("${episodeTitle}")) - - driver.close() - driver.quit() - ''; - in - [ - pkgs.firefox-unwrapped - pkgs.geckodriver - browser-test - (pkgs.writeShellApplication { - name = "build-mp3"; - runtimeInputs = with pkgs; [ - sox - lame - ]; - text = '' - out=/tmp/podcast.mp3 - sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 ` - `| lame --noreplaygain --cbr -q 9 -b 320 - $out - FILESIZE="$(stat -c%s $out)" - [ "$FILESIZE" -gt 0 ] - [ "$FILESIZE" -le "${toString targetPodcastSize}" ] + driver.close() + driver.quit() ''; - }) - ]; - }; + in + [ + pkgs.firefox-unwrapped + pkgs.geckodriver + browser-test + (pkgs.writeShellApplication { + name = "build-mp3"; + runtimeInputs = with pkgs; [ + sox + lame + ]; + text = '' + out=/tmp/podcast.mp3 + sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 ` + `| lame --noreplaygain --cbr -q 9 -b 320 - $out + FILESIZE="$(stat -c%s $out)" + [ "$FILESIZE" -gt 0 ] + [ "$FILESIZE" -le "${toString targetPodcastSize}" ] + ''; + }) + ]; + }; - testScript = '' - start_all() - castopod.wait_for_unit("castopod-setup.service") - castopod.wait_for_file("/run/phpfpm/castopod.sock") - castopod.wait_for_unit("nginx.service") - castopod.wait_for_open_port(80) - castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com") + testScript = '' + start_all() + castopod.wait_for_unit("castopod-setup.service") + castopod.wait_for_file("/run/phpfpm/castopod.sock") + castopod.wait_for_unit("nginx.service") + castopod.wait_for_open_port(80) + castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com") - client.succeed("build-mp3") + client.succeed("build-mp3") - with subtest("Create superadmin, log in, create and upload a podcast"): - client.succeed(\ - "PYTHONUNBUFFERED=1 systemd-cat -t browser-test browser-test") - ''; - } -) + with subtest("Create superadmin, log in, create and upload a podcast"): + client.succeed(\ + "PYTHONUNBUFFERED=1 systemd-cat -t browser-test browser-test") + ''; +} diff --git a/nixos/tests/charliecloud.nix b/nixos/tests/charliecloud.nix index 957ec6ea4374..abdb2d877c19 100644 --- a/nixos/tests/charliecloud.nix +++ b/nixos/tests/charliecloud.nix @@ -1,49 +1,47 @@ # This test checks charliecloud image construction and run -import ./make-test-python.nix ( - { pkgs, ... }: - let +{ pkgs, ... }: +let - dockerfile = pkgs.writeText "Dockerfile" '' - FROM nix - RUN mkdir /home /tmp - RUN touch /etc/passwd /etc/group - CMD ["true"] - ''; + dockerfile = pkgs.writeText "Dockerfile" '' + FROM nix + RUN mkdir /home /tmp + RUN touch /etc/passwd /etc/group + CMD ["true"] + ''; - in - { - name = "charliecloud"; - meta = with pkgs.lib.maintainers; { - maintainers = [ bzizou ]; - }; +in +{ + name = "charliecloud"; + meta = with pkgs.lib.maintainers; { + maintainers = [ bzizou ]; + }; - nodes = { - host = - { ... }: - { - environment.systemPackages = [ pkgs.charliecloud ]; - virtualisation.docker.enable = true; - users.users.alice = { - isNormalUser = true; - extraGroups = [ "docker" ]; - }; + nodes = { + host = + { ... }: + { + environment.systemPackages = [ pkgs.charliecloud ]; + virtualisation.docker.enable = true; + users.users.alice = { + isNormalUser = true; + extraGroups = [ "docker" ]; }; - }; + }; + }; - testScript = '' - host.start() - host.wait_for_unit("docker.service") - host.succeed( - 'su - alice -c "docker load --input=${pkgs.dockerTools.examples.nix}"' - ) - host.succeed( - "cp ${dockerfile} /home/alice/Dockerfile" - ) - host.succeed('su - alice -c "ch-build -t hello ."') - host.succeed('su - alice -c "ch-builder2tar hello /var/tmp"') - host.succeed('su - alice -c "ch-tar2dir /var/tmp/hello.tar.gz /var/tmp"') - host.succeed('su - alice -c "ch-run /var/tmp/hello -- echo Running_From_Container_OK"') - ''; - } -) + testScript = '' + host.start() + host.wait_for_unit("docker.service") + host.succeed( + 'su - alice -c "docker load --input=${pkgs.dockerTools.examples.nix}"' + ) + host.succeed( + "cp ${dockerfile} /home/alice/Dockerfile" + ) + host.succeed('su - alice -c "ch-build -t hello ."') + host.succeed('su - alice -c "ch-builder2tar hello /var/tmp"') + host.succeed('su - alice -c "ch-tar2dir /var/tmp/hello.tar.gz /var/tmp"') + host.succeed('su - alice -c "ch-run /var/tmp/hello -- echo Running_From_Container_OK"') + ''; +} diff --git a/nixos/tests/cinnamon-wayland.nix b/nixos/tests/cinnamon-wayland.nix index 1b472f863099..a80199956cc2 100644 --- a/nixos/tests/cinnamon-wayland.nix +++ b/nixos/tests/cinnamon-wayland.nix @@ -1,84 +1,82 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "cinnamon-wayland"; +{ pkgs, lib, ... }: +{ + name = "cinnamon-wayland"; - meta.maintainers = lib.teams.cinnamon.members; + meta.maintainers = lib.teams.cinnamon.members; - nodes.machine = - { nodes, ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.xserver.desktopManager.cinnamon.enable = true; - services.displayManager = { - autoLogin.enable = true; - autoLogin.user = nodes.machine.users.users.alice.name; - defaultSession = "cinnamon-wayland"; - }; - - # For the sessionPath subtest. - services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ]; + nodes.machine = + { nodes, ... }: + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.xserver.desktopManager.cinnamon.enable = true; + services.displayManager = { + autoLogin.enable = true; + autoLogin.user = nodes.machine.users.users.alice.name; + defaultSession = "cinnamon-wayland"; }; - enableOCR = true; + # For the sessionPath subtest. + services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ]; + }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus"; - su = command: "su - ${user.name} -c '${env} ${command}'"; + enableOCR = true; - # Call javascript in cinnamon (the shell), returns a tuple (success, output), - # where `success` is true if the dbus call was successful and `output` is what - # the javascript evaluates to. - eval = - name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}"; - in - '' - machine.wait_for_unit("display-manager.service") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus"; + su = command: "su - ${user.name} -c '${env} ${command}'"; - with subtest("Wait for wayland server"): - machine.wait_for_file("/run/user/${toString user.uid}/wayland-0") + # Call javascript in cinnamon (the shell), returns a tuple (success, output), + # where `success` is true if the dbus call was successful and `output` is what + # the javascript evaluates to. + eval = + name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}"; + in + '' + machine.wait_for_unit("display-manager.service") - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + with subtest("Wait for wayland server"): + machine.wait_for_file("/run/user/${toString user.uid}/wayland-0") - with subtest("Wait for the Cinnamon shell"): - # Correct output should be (true, '2') - # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187 - machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Check if Cinnamon components actually start"): - for i in ["csd-media-keys", "xapp-sn-watcher", "nemo-desktop"]: - machine.wait_until_succeeds(f"pgrep -f {i}") - machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'") - machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'") + with subtest("Wait for the Cinnamon shell"): + # Correct output should be (true, '2') + # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187 + machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'") - with subtest("Check if sessionPath option actually works"): - machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste") + with subtest("Check if Cinnamon components actually start"): + for i in ["csd-media-keys", "xapp-sn-watcher", "nemo-desktop"]: + machine.wait_until_succeeds(f"pgrep -f {i}") + machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'") + machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'") - with subtest("Open Cinnamon Settings"): - machine.succeed("${su "cinnamon-settings themes >&2 &"}") - machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'") - machine.wait_for_text('(Style|Appearance|Color)') - machine.sleep(2) - machine.screenshot("cinnamon_settings") + with subtest("Check if sessionPath option actually works"): + machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste") - with subtest("Check if screensaver works"): - # This is not supported at the moment. - # https://trello.com/b/HHs01Pab/cinnamon-wayland - machine.execute("${su "cinnamon-screensaver-command -l >&2 &"}") - machine.wait_until_succeeds("journalctl -b --grep 'cinnamon-screensaver is disabled in wayland sessions'") + with subtest("Open Cinnamon Settings"): + machine.succeed("${su "cinnamon-settings themes >&2 &"}") + machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'") + machine.wait_for_text('(Style|Appearance|Color)') + machine.sleep(2) + machine.screenshot("cinnamon_settings") - with subtest("Open GNOME Terminal"): - machine.succeed("${su "dbus-launch gnome-terminal"}") - machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'") - machine.sleep(2) + with subtest("Check if screensaver works"): + # This is not supported at the moment. + # https://trello.com/b/HHs01Pab/cinnamon-wayland + machine.execute("${su "cinnamon-screensaver-command -l >&2 &"}") + machine.wait_until_succeeds("journalctl -b --grep 'cinnamon-screensaver is disabled in wayland sessions'") - with subtest("Check if Cinnamon has ever coredumped"): - machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'") - ''; - } -) + with subtest("Open GNOME Terminal"): + machine.succeed("${su "dbus-launch gnome-terminal"}") + machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'") + machine.sleep(2) + + with subtest("Check if Cinnamon has ever coredumped"): + machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'") + ''; +} diff --git a/nixos/tests/cinnamon.nix b/nixos/tests/cinnamon.nix index 34777f8ee7b2..3230d7b07358 100644 --- a/nixos/tests/cinnamon.nix +++ b/nixos/tests/cinnamon.nix @@ -1,104 +1,102 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "cinnamon"; +{ pkgs, lib, ... }: +{ + name = "cinnamon"; - meta.maintainers = lib.teams.cinnamon.members; + meta.maintainers = lib.teams.cinnamon.members; - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.xserver.desktopManager.cinnamon.enable = true; + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.xserver.desktopManager.cinnamon.enable = true; - # We don't ship gnome-text-editor in Cinnamon module, we add this line mainly - # to catch eval issues related to this option. - environment.cinnamon.excludePackages = [ pkgs.gnome-text-editor ]; + # We don't ship gnome-text-editor in Cinnamon module, we add this line mainly + # to catch eval issues related to this option. + environment.cinnamon.excludePackages = [ pkgs.gnome-text-editor ]; - # For the sessionPath subtest. - services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ]; + # For the sessionPath subtest. + services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ]; - # For OCR test. - services.xserver.displayManager.lightdm.greeters.slick.extraConfig = '' - enable-hidpi = on - ''; - }; - - enableOCR = true; - - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"; - su = command: "su - ${user.name} -c '${env} ${command}'"; - - # Call javascript in cinnamon (the shell), returns a tuple (success, output), - # where `success` is true if the dbus call was successful and `output` is what - # the javascript evaluates to. - eval = - name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}"; - in - '' - machine.wait_for_unit("display-manager.service") - - with subtest("Test if we can see username in slick-greeter"): - machine.wait_for_text("${user.description}") - machine.screenshot("slick_greeter_lightdm") - - with subtest("Login with slick-greeter"): - machine.send_chars("${user.password}\n") - machine.wait_for_x() - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") - - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - - with subtest("Wait for the Cinnamon shell"): - # Correct output should be (true, '2') - # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187 - machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'") - - with subtest("Check if Cinnamon components actually start"): - for i in ["csd-media-keys", "cinnamon-killer-daemon", "xapp-sn-watcher", "nemo-desktop"]: - machine.wait_until_succeeds(f"pgrep -f {i}") - machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'") - machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'") - - with subtest("Check if sessionPath option actually works"): - machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste") - - with subtest("Open Cinnamon Settings"): - machine.succeed("${su "cinnamon-settings themes >&2 &"}") - machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'") - machine.wait_for_text('(Style|Appearance|Color)') - machine.sleep(2) - machine.screenshot("cinnamon_settings") - - with subtest("Lock the screen"): - machine.succeed("${su "cinnamon-screensaver-command -l >&2 &"}") - machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is active'") - machine.sleep(2) - machine.screenshot("cinnamon_screensaver") - machine.send_chars("${user.password}\n", delay=0.2) - machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is inactive'") - machine.sleep(2) - - with subtest("Open GNOME Terminal"): - machine.succeed("${su "gnome-terminal"}") - machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'") - machine.sleep(2) - - with subtest("Open virtual keyboard"): - machine.succeed("${su "dbus-send --print-reply --dest=org.Cinnamon /org/Cinnamon org.Cinnamon.ToggleKeyboard"}") - machine.wait_for_text('(Ctrl|Alt)') - machine.sleep(2) - machine.screenshot("cinnamon_virtual_keyboard") - - with subtest("Check if Cinnamon has ever coredumped"): - machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'") + # For OCR test. + services.xserver.displayManager.lightdm.greeters.slick.extraConfig = '' + enable-hidpi = on ''; - } -) + }; + + enableOCR = true; + + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"; + su = command: "su - ${user.name} -c '${env} ${command}'"; + + # Call javascript in cinnamon (the shell), returns a tuple (success, output), + # where `success` is true if the dbus call was successful and `output` is what + # the javascript evaluates to. + eval = + name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}"; + in + '' + machine.wait_for_unit("display-manager.service") + + with subtest("Test if we can see username in slick-greeter"): + machine.wait_for_text("${user.description}") + machine.screenshot("slick_greeter_lightdm") + + with subtest("Login with slick-greeter"): + machine.send_chars("${user.password}\n") + machine.wait_for_x() + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") + + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + + with subtest("Wait for the Cinnamon shell"): + # Correct output should be (true, '2') + # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187 + machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'") + + with subtest("Check if Cinnamon components actually start"): + for i in ["csd-media-keys", "cinnamon-killer-daemon", "xapp-sn-watcher", "nemo-desktop"]: + machine.wait_until_succeeds(f"pgrep -f {i}") + machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'") + machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'") + + with subtest("Check if sessionPath option actually works"): + machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste") + + with subtest("Open Cinnamon Settings"): + machine.succeed("${su "cinnamon-settings themes >&2 &"}") + machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'") + machine.wait_for_text('(Style|Appearance|Color)') + machine.sleep(2) + machine.screenshot("cinnamon_settings") + + with subtest("Lock the screen"): + machine.succeed("${su "cinnamon-screensaver-command -l >&2 &"}") + machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is active'") + machine.sleep(2) + machine.screenshot("cinnamon_screensaver") + machine.send_chars("${user.password}\n", delay=0.2) + machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is inactive'") + machine.sleep(2) + + with subtest("Open GNOME Terminal"): + machine.succeed("${su "gnome-terminal"}") + machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'") + machine.sleep(2) + + with subtest("Open virtual keyboard"): + machine.succeed("${su "dbus-send --print-reply --dest=org.Cinnamon /org/Cinnamon org.Cinnamon.ToggleKeyboard"}") + machine.wait_for_text('(Ctrl|Alt)') + machine.sleep(2) + machine.screenshot("cinnamon_virtual_keyboard") + + with subtest("Check if Cinnamon has ever coredumped"): + machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'") + ''; +} diff --git a/nixos/tests/cjdns.nix b/nixos/tests/cjdns.nix index 7fdb30a0abe6..52762f853e20 100644 --- a/nixos/tests/cjdns.nix +++ b/nixos/tests/cjdns.nix @@ -18,117 +18,115 @@ let in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "cjdns"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ehmry ]; - }; +{ pkgs, ... }: +{ + name = "cjdns"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ehmry ]; + }; - nodes = { - # Alice finds peers over over ETHInterface. - alice = - { ... }: - { - imports = [ basicConfig ]; + nodes = { + # Alice finds peers over over ETHInterface. + alice = + { ... }: + { + imports = [ basicConfig ]; - services.cjdns.ETHInterface.bind = "eth1"; + services.cjdns.ETHInterface.bind = "eth1"; - services.httpd.enable = true; - services.httpd.adminAddr = "foo@example.org"; - networking.firewall.allowedTCPPorts = [ 80 ]; - }; + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + networking.firewall.allowedTCPPorts = [ 80 ]; + }; - # Bob explicitly connects to Carol over UDPInterface. - bob = - { ... }: + # Bob explicitly connects to Carol over UDPInterface. + bob = + { ... }: - { - imports = [ basicConfig ]; + { + imports = [ basicConfig ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.2"; - prefixLength = 24; - } - ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.2"; + prefixLength = 24; + } + ]; - services.cjdns = { - UDPInterface = { - bind = "0.0.0.0:1024"; - connectTo."192.168.0.1:1024" = { - password = carolPassword; - publicKey = carolPubKey; - }; + services.cjdns = { + UDPInterface = { + bind = "0.0.0.0:1024"; + connectTo."192.168.0.1:1024" = { + password = carolPassword; + publicKey = carolPubKey; }; }; }; + }; - # Carol listens on ETHInterface and UDPInterface, - # but knows neither Alice or Bob. - carol = - { ... }: - { - imports = [ basicConfig ]; + # Carol listens on ETHInterface and UDPInterface, + # but knows neither Alice or Bob. + carol = + { ... }: + { + imports = [ basicConfig ]; - environment.etc."cjdns.keys".text = '' - CJDNS_PRIVATE_KEY=${carolKey} - CJDNS_ADMIN_PASSWORD=FOOBAR - ''; + environment.etc."cjdns.keys".text = '' + CJDNS_PRIVATE_KEY=${carolKey} + CJDNS_ADMIN_PASSWORD=FOOBAR + ''; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.1"; - prefixLength = 24; - } - ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.1"; + prefixLength = 24; + } + ]; - services.cjdns = { - authorizedPasswords = [ carolPassword ]; - ETHInterface.bind = "eth1"; - UDPInterface.bind = "192.168.0.1:1024"; - }; - networking.firewall.allowedUDPPorts = [ 1024 ]; + services.cjdns = { + authorizedPasswords = [ carolPassword ]; + ETHInterface.bind = "eth1"; + UDPInterface.bind = "192.168.0.1:1024"; }; + networking.firewall.allowedUDPPorts = [ 1024 ]; + }; - }; + }; - testScript = '' - import re + testScript = '' + import re - start_all() + start_all() - alice.wait_for_unit("cjdns.service") - bob.wait_for_unit("cjdns.service") - carol.wait_for_unit("cjdns.service") + alice.wait_for_unit("cjdns.service") + bob.wait_for_unit("cjdns.service") + carol.wait_for_unit("cjdns.service") - def cjdns_ip(machine): - res = machine.succeed("ip -o -6 addr show dev tun0") - ip = re.split("\s+|/", res)[3] - machine.log("has ip {}".format(ip)) - return ip + def cjdns_ip(machine): + res = machine.succeed("ip -o -6 addr show dev tun0") + ip = re.split("\s+|/", res)[3] + machine.log("has ip {}".format(ip)) + return ip - alice_ip6 = cjdns_ip(alice) - bob_ip6 = cjdns_ip(bob) - carol_ip6 = cjdns_ip(carol) + alice_ip6 = cjdns_ip(alice) + bob_ip6 = cjdns_ip(bob) + carol_ip6 = cjdns_ip(carol) - # ping a few times each to let the routing table establish itself + # ping a few times each to let the routing table establish itself - alice.succeed("ping -c 4 {}".format(carol_ip6)) - bob.succeed("ping -c 4 {}".format(carol_ip6)) + alice.succeed("ping -c 4 {}".format(carol_ip6)) + bob.succeed("ping -c 4 {}".format(carol_ip6)) - carol.succeed("ping -c 4 {}".format(alice_ip6)) - carol.succeed("ping -c 4 {}".format(bob_ip6)) + carol.succeed("ping -c 4 {}".format(alice_ip6)) + carol.succeed("ping -c 4 {}".format(bob_ip6)) - alice.succeed("ping -c 4 {}".format(bob_ip6)) - bob.succeed("ping -c 4 {}".format(alice_ip6)) + alice.succeed("ping -c 4 {}".format(bob_ip6)) + bob.succeed("ping -c 4 {}".format(alice_ip6)) - alice.wait_for_unit("httpd.service") + alice.wait_for_unit("httpd.service") - bob.succeed("curl --fail -g http://[{}]".format(alice_ip6)) - ''; - } -) + bob.succeed("curl --fail -g http://[{}]".format(alice_ip6)) + ''; +} diff --git a/nixos/tests/clickhouse.nix b/nixos/tests/clickhouse.nix index 80868c04e9e4..165f00a1ec4e 100644 --- a/nixos/tests/clickhouse.nix +++ b/nixos/tests/clickhouse.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "clickhouse"; - meta.maintainers = with pkgs.lib.maintainers; [ ]; +{ pkgs, ... }: +{ + name = "clickhouse"; + meta.maintainers = with pkgs.lib.maintainers; [ ]; - nodes.machine = { - services.clickhouse.enable = true; - virtualisation.memorySize = 4096; - }; + nodes.machine = { + services.clickhouse.enable = true; + virtualisation.memorySize = 4096; + }; - testScript = - let - # work around quote/substitution complexity by Nix, Perl, bash and SQL. - tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();"; - insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');"; - selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`"; - in - '' - machine.start() - machine.wait_for_unit("clickhouse.service") - machine.wait_for_open_port(9000) + testScript = + let + # work around quote/substitution complexity by Nix, Perl, bash and SQL. + tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();"; + insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');"; + selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`"; + in + '' + machine.start() + machine.wait_for_unit("clickhouse.service") + machine.wait_for_open_port(9000) - machine.succeed( - "cat ${tableDDL} | clickhouse-client" - ) - machine.succeed( - "cat ${insertQuery} | clickhouse-client" - ) - machine.succeed( - "cat ${selectQuery} | clickhouse-client | grep foo" - ) - ''; - } -) + machine.succeed( + "cat ${tableDDL} | clickhouse-client" + ) + machine.succeed( + "cat ${insertQuery} | clickhouse-client" + ) + machine.succeed( + "cat ${selectQuery} | clickhouse-client | grep foo" + ) + ''; +} diff --git a/nixos/tests/cloudlog.nix b/nixos/tests/cloudlog.nix index b49d1929f80c..94be4d241f1b 100644 --- a/nixos/tests/cloudlog.nix +++ b/nixos/tests/cloudlog.nix @@ -1,21 +1,19 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "cloudlog"; - meta = { - maintainers = with pkgs.lib.maintainers; [ melling ]; +{ pkgs, ... }: +{ + name = "cloudlog"; + meta = { + maintainers = with pkgs.lib.maintainers; [ melling ]; + }; + nodes = { + machine = { + services.mysql.package = pkgs.mariadb; + services.cloudlog.enable = true; }; - nodes = { - machine = { - services.mysql.package = pkgs.mariadb; - services.cloudlog.enable = true; - }; - }; - testScript = '' - start_all() - machine.wait_for_unit("phpfpm-cloudlog") - machine.wait_for_open_port(80); - machine.wait_until_succeeds("curl -s -L --fail http://localhost | grep 'Login - Cloudlog'") - ''; - } -) + }; + testScript = '' + start_all() + machine.wait_for_unit("phpfpm-cloudlog") + machine.wait_for_open_port(80); + machine.wait_until_succeeds("curl -s -L --fail http://localhost | grep 'Login - Cloudlog'") + ''; +} diff --git a/nixos/tests/cockpit.nix b/nixos/tests/cockpit.nix index 29692f10aad2..b05c4b4dc93d 100644 --- a/nixos/tests/cockpit.nix +++ b/nixos/tests/cockpit.nix @@ -1,156 +1,154 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - user = "alice"; # from ./common/user-account.nix - password = "foobar"; # from ./common/user-account.nix - in - { - name = "cockpit"; - meta = { - maintainers = with lib.maintainers; [ lucasew ]; - }; - nodes = { - server = - { config, ... }: - { - imports = [ ./common/user-account.nix ]; - security.polkit.enable = true; - users.users.${user} = { - extraGroups = [ "wheel" ]; - }; - services.cockpit = { - enable = true; - port = 7890; - openFirewall = true; - allowed-origins = [ - "https://server:${toString config.services.cockpit.port}" - ]; - }; +let + user = "alice"; # from ./common/user-account.nix + password = "foobar"; # from ./common/user-account.nix +in +{ + name = "cockpit"; + meta = { + maintainers = with lib.maintainers; [ lucasew ]; + }; + nodes = { + server = + { config, ... }: + { + imports = [ ./common/user-account.nix ]; + security.polkit.enable = true; + users.users.${user} = { + extraGroups = [ "wheel" ]; }; - client = - { config, ... }: - { - imports = [ ./common/user-account.nix ]; - environment.systemPackages = - let - seleniumScript = - pkgs.writers.writePython3Bin "selenium-script" - { - libraries = with pkgs.python3Packages; [ selenium ]; - } - '' - from selenium import webdriver - from selenium.webdriver.common.by import By - from selenium.webdriver.firefox.options import Options - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - from time import sleep - - - def log(msg): - from sys import stderr - print(f"[*] {msg}", file=stderr) - - - log("Initializing") - - options = Options() - options.add_argument("--headless") - - service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501 - driver = webdriver.Firefox(options=options, service=service) - - driver.implicitly_wait(10) - - log("Opening homepage") - driver.get("https://server:7890") - - - def wait_elem(by, query, timeout=10): - wait = WebDriverWait(driver, timeout) - wait.until(EC.presence_of_element_located((by, query))) - - - def wait_title_contains(title, timeout=10): - wait = WebDriverWait(driver, timeout) - wait.until(EC.title_contains(title)) - - - def find_element(by, query): - return driver.find_element(by, query) - - - def set_value(elem, value): - script = 'arguments[0].value = arguments[1]' - return driver.execute_script(script, elem, value) - - - log("Waiting for the homepage to load") - - # cockpit sets initial title as hostname - wait_title_contains("server") - wait_elem(By.CSS_SELECTOR, 'input#login-user-input') - - log("Homepage loaded!") - - log("Filling out username") - login_input = find_element(By.CSS_SELECTOR, 'input#login-user-input') - set_value(login_input, "${user}") - - log("Filling out password") - password_input = find_element(By.CSS_SELECTOR, 'input#login-password-input') - set_value(password_input, "${password}") - - log("Submitting credentials for login") - driver.find_element(By.CSS_SELECTOR, 'button#login-button').click() - - # driver.implicitly_wait(1) - # driver.get("https://server:7890/system") - - log("Waiting dashboard to load") - wait_title_contains("${user}@server") - - log("Waiting for the frontend to initialize") - sleep(1) - - log("Looking for that banner that tells about limited access") - container_iframe = find_element(By.CSS_SELECTOR, 'iframe.container-frame') - driver.switch_to.frame(container_iframe) - - assert "Web console is running in limited access mode" in driver.page_source - - log("Clicking the sudo button") - for button in driver.find_elements(By.TAG_NAME, "button"): - if 'admin' in button.text: - button.click() - driver.switch_to.default_content() - - log("Checking that /nonexistent is not a thing") - assert '/nonexistent' not in driver.page_source - assert len(driver.find_elements(By.CSS_SELECTOR, '#machine-reconnect')) == 0 - - driver.close() - ''; - in - with pkgs; - [ - firefox-unwrapped - geckodriver - seleniumScript - ]; + services.cockpit = { + enable = true; + port = 7890; + openFirewall = true; + allowed-origins = [ + "https://server:${toString config.services.cockpit.port}" + ]; }; - }; + }; + client = + { config, ... }: + { + imports = [ ./common/user-account.nix ]; + environment.systemPackages = + let + seleniumScript = + pkgs.writers.writePython3Bin "selenium-script" + { + libraries = with pkgs.python3Packages; [ selenium ]; + } + '' + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.firefox.options import Options + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + from time import sleep - testScript = '' - start_all() - server.wait_for_unit("sockets.target") - server.wait_for_open_port(7890) + def log(msg): + from sys import stderr + print(f"[*] {msg}", file=stderr) - client.succeed("curl -k https://server:7890 -o /dev/stderr") - print(client.succeed("whoami")) - client.succeed('PYTHONUNBUFFERED=1 selenium-script') - ''; - } -) + + log("Initializing") + + options = Options() + options.add_argument("--headless") + + service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501 + driver = webdriver.Firefox(options=options, service=service) + + driver.implicitly_wait(10) + + log("Opening homepage") + driver.get("https://server:7890") + + + def wait_elem(by, query, timeout=10): + wait = WebDriverWait(driver, timeout) + wait.until(EC.presence_of_element_located((by, query))) + + + def wait_title_contains(title, timeout=10): + wait = WebDriverWait(driver, timeout) + wait.until(EC.title_contains(title)) + + + def find_element(by, query): + return driver.find_element(by, query) + + + def set_value(elem, value): + script = 'arguments[0].value = arguments[1]' + return driver.execute_script(script, elem, value) + + + log("Waiting for the homepage to load") + + # cockpit sets initial title as hostname + wait_title_contains("server") + wait_elem(By.CSS_SELECTOR, 'input#login-user-input') + + log("Homepage loaded!") + + log("Filling out username") + login_input = find_element(By.CSS_SELECTOR, 'input#login-user-input') + set_value(login_input, "${user}") + + log("Filling out password") + password_input = find_element(By.CSS_SELECTOR, 'input#login-password-input') + set_value(password_input, "${password}") + + log("Submitting credentials for login") + driver.find_element(By.CSS_SELECTOR, 'button#login-button').click() + + # driver.implicitly_wait(1) + # driver.get("https://server:7890/system") + + log("Waiting dashboard to load") + wait_title_contains("${user}@server") + + log("Waiting for the frontend to initialize") + sleep(1) + + log("Looking for that banner that tells about limited access") + container_iframe = find_element(By.CSS_SELECTOR, 'iframe.container-frame') + driver.switch_to.frame(container_iframe) + + assert "Web console is running in limited access mode" in driver.page_source + + log("Clicking the sudo button") + for button in driver.find_elements(By.TAG_NAME, "button"): + if 'admin' in button.text: + button.click() + driver.switch_to.default_content() + + log("Checking that /nonexistent is not a thing") + assert '/nonexistent' not in driver.page_source + assert len(driver.find_elements(By.CSS_SELECTOR, '#machine-reconnect')) == 0 + + driver.close() + ''; + in + with pkgs; + [ + firefox-unwrapped + geckodriver + seleniumScript + ]; + }; + }; + + testScript = '' + start_all() + + server.wait_for_unit("sockets.target") + server.wait_for_open_port(7890) + + client.succeed("curl -k https://server:7890 -o /dev/stderr") + print(client.succeed("whoami")) + client.succeed('PYTHONUNBUFFERED=1 selenium-script') + ''; +} diff --git a/nixos/tests/code-server.nix b/nixos/tests/code-server.nix index d58a0faee104..e4c9c8397740 100644 --- a/nixos/tests/code-server.nix +++ b/nixos/tests/code-server.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "code-server"; +{ pkgs, lib, ... }: +{ + name = "code-server"; - nodes = { - machine = - { pkgs, ... }: - { - services.code-server = { - enable = true; - auth = "none"; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.code-server = { + enable = true; + auth = "none"; }; - }; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("code-server.service") - machine.wait_for_open_port(4444) - machine.succeed("curl -k --fail http://localhost:4444", timeout=10) - ''; + testScript = '' + start_all() + machine.wait_for_unit("code-server.service") + machine.wait_for_open_port(4444) + machine.succeed("curl -k --fail http://localhost:4444", timeout=10) + ''; - meta.maintainers = [ lib.maintainers.drupol ]; - } -) + meta.maintainers = [ lib.maintainers.drupol ]; +} diff --git a/nixos/tests/coder.nix b/nixos/tests/coder.nix index 8bbba5d900c0..9b0b2c334a0b 100644 --- a/nixos/tests/coder.nix +++ b/nixos/tests/coder.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "coder"; - meta.maintainers = pkgs.coder.meta.maintainers; +{ pkgs, ... }: +{ + name = "coder"; + meta.maintainers = pkgs.coder.meta.maintainers; - nodes.machine = - { pkgs, ... }: - { - services.coder = { - enable = true; - accessUrl = "http://localhost:3000"; - }; + nodes.machine = + { pkgs, ... }: + { + services.coder = { + enable = true; + accessUrl = "http://localhost:3000"; }; + }; - testScript = '' - machine.start() - machine.wait_for_unit("postgresql.service") - machine.wait_for_unit("coder.service") - machine.wait_for_open_port(3000) + testScript = '' + machine.start() + machine.wait_for_unit("postgresql.service") + machine.wait_for_unit("coder.service") + machine.wait_for_open_port(3000) - machine.succeed("curl --fail http://localhost:3000") - ''; - } -) + machine.succeed("curl --fail http://localhost:3000") + ''; +} diff --git a/nixos/tests/collectd.nix b/nixos/tests/collectd.nix index 31d4689f1efe..7f99bff111e5 100644 --- a/nixos/tests/collectd.nix +++ b/nixos/tests/collectd.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "collectd"; - meta = { }; +{ pkgs, ... }: +{ + name = "collectd"; + meta = { }; - nodes.machine = - { pkgs, lib, ... }: + nodes.machine = + { pkgs, lib, ... }: - { - services.collectd = { - enable = true; - extraConfig = lib.mkBefore '' - Interval 30 + { + services.collectd = { + enable = true; + extraConfig = lib.mkBefore '' + Interval 30 + ''; + plugins = { + rrdtool = '' + DataDir "/var/lib/collectd/rrd" ''; - plugins = { - rrdtool = '' - DataDir "/var/lib/collectd/rrd" - ''; - load = ""; - }; + load = ""; }; - environment.systemPackages = [ pkgs.rrdtool ]; }; + environment.systemPackages = [ pkgs.rrdtool ]; + }; - testScript = '' - machine.wait_for_unit("collectd.service") - hostname = machine.succeed("hostname").strip() - file = f"/var/lib/collectd/rrd/{hostname}/load/load.rrd" - machine.wait_for_file(file); - machine.succeed(f"rrdinfo {file} | logger") - # check that this file contains a shortterm metric - machine.succeed(f"rrdinfo {file} | grep -F 'ds[shortterm].min = '") - # check that interval was set before the plugins - machine.succeed(f"rrdinfo {file} | grep -F 'step = 30'") - # check that there are frequent updates - machine.succeed(f"cp {file} before") - machine.wait_until_fails(f"cmp before {file}") - ''; - } -) + testScript = '' + machine.wait_for_unit("collectd.service") + hostname = machine.succeed("hostname").strip() + file = f"/var/lib/collectd/rrd/{hostname}/load/load.rrd" + machine.wait_for_file(file); + machine.succeed(f"rrdinfo {file} | logger") + # check that this file contains a shortterm metric + machine.succeed(f"rrdinfo {file} | grep -F 'ds[shortterm].min = '") + # check that interval was set before the plugins + machine.succeed(f"rrdinfo {file} | grep -F 'step = 30'") + # check that there are frequent updates + machine.succeed(f"cp {file} before") + machine.wait_until_fails(f"cmp before {file}") + ''; +} diff --git a/nixos/tests/commafeed.nix b/nixos/tests/commafeed.nix index 7b65720818a9..c93619e46635 100644 --- a/nixos/tests/commafeed.nix +++ b/nixos/tests/commafeed.nix @@ -1,21 +1,19 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "commafeed"; +{ lib, ... }: +{ + name = "commafeed"; - nodes.server = { - services.commafeed = { - enable = true; - }; + nodes.server = { + services.commafeed = { + enable = true; }; + }; - testScript = '' - server.start() - server.wait_for_unit("commafeed.service") - server.wait_for_open_port(8082) - server.succeed("curl --fail --silent http://localhost:8082") - ''; + testScript = '' + server.start() + server.wait_for_unit("commafeed.service") + server.wait_for_open_port(8082) + server.succeed("curl --fail --silent http://localhost:8082") + ''; - meta.maintainers = [ lib.maintainers.raroh73 ]; - } -) + meta.maintainers = [ lib.maintainers.raroh73 ]; +} diff --git a/nixos/tests/connman.nix b/nixos/tests/connman.nix index 8ab00de10601..1902e0638b96 100644 --- a/nixos/tests/connman.nix +++ b/nixos/tests/connman.nix @@ -1,85 +1,83 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "connman"; - meta = with lib.maintainers; { - maintainers = [ rnhmjoj ]; +{ pkgs, lib, ... }: +{ + name = "connman"; + meta = with lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; + + # Router running radvd on VLAN 1 + nodes.router = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + + virtualisation.vlans = [ 1 ]; + + boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true; + + networking = { + useDHCP = false; + interfaces.eth1.ipv6.addresses = [ + { + address = "fd12::1"; + prefixLength = 64; + } + ]; + }; + + services.radvd = { + enable = true; + config = '' + interface eth1 { + AdvSendAdvert on; + AdvManagedFlag on; + AdvOtherConfigFlag on; + prefix fd12::/64 { + AdvAutonomous off; + }; + }; + ''; + }; }; - # Router running radvd on VLAN 1 - nodes.router = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; + # Client running connman, connected to VLAN 1 + nodes.client = + { ... }: + { + virtualisation.vlans = [ 1 ]; - virtualisation.vlans = [ 1 ]; + # add a virtual wlan interface + boot.kernelModules = [ "mac80211_hwsim" ]; + boot.extraModprobeConfig = '' + options mac80211_hwsim radios=1 + ''; - boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true; + # Note: the overrides are needed because the wifi is + # disabled with mkVMOverride in qemu-vm.nix. + services.connman.enable = lib.mkOverride 0 true; + services.connman.networkInterfaceBlacklist = [ "eth0" ]; + networking.wireless.enable = lib.mkOverride 0 true; + networking.wireless.interfaces = [ "wlan0" ]; + }; - networking = { - useDHCP = false; - interfaces.eth1.ipv6.addresses = [ - { - address = "fd12::1"; - prefixLength = 64; - } - ]; - }; + testScript = '' + start_all() - services.radvd = { - enable = true; - config = '' - interface eth1 { - AdvSendAdvert on; - AdvManagedFlag on; - AdvOtherConfigFlag on; - prefix fd12::/64 { - AdvAutonomous off; - }; - }; - ''; - }; - }; + with subtest("Router is ready"): + router.wait_for_unit("radvd.service") - # Client running connman, connected to VLAN 1 - nodes.client = - { ... }: - { - virtualisation.vlans = [ 1 ]; + with subtest("Daemons are running"): + client.wait_for_unit("wpa_supplicant-wlan0.service") + client.wait_for_unit("connman.service") + client.wait_until_succeeds("connmanctl state | grep -q ready") - # add a virtual wlan interface - boot.kernelModules = [ "mac80211_hwsim" ]; - boot.extraModprobeConfig = '' - options mac80211_hwsim radios=1 - ''; + with subtest("Wired interface is configured"): + client.wait_until_succeeds("ip -6 route | grep -q fd12::/64") + client.wait_until_succeeds("ping -c 1 fd12::1") - # Note: the overrides are needed because the wifi is - # disabled with mkVMOverride in qemu-vm.nix. - services.connman.enable = lib.mkOverride 0 true; - services.connman.networkInterfaceBlacklist = [ "eth0" ]; - networking.wireless.enable = lib.mkOverride 0 true; - networking.wireless.interfaces = [ "wlan0" ]; - }; - - testScript = '' - start_all() - - with subtest("Router is ready"): - router.wait_for_unit("radvd.service") - - with subtest("Daemons are running"): - client.wait_for_unit("wpa_supplicant-wlan0.service") - client.wait_for_unit("connman.service") - client.wait_until_succeeds("connmanctl state | grep -q ready") - - with subtest("Wired interface is configured"): - client.wait_until_succeeds("ip -6 route | grep -q fd12::/64") - client.wait_until_succeeds("ping -c 1 fd12::1") - - with subtest("Can set up a wireless access point"): - client.succeed("connmanctl enable wifi") - client.wait_until_succeeds("connmanctl tether wifi on nixos-test reproducibility | grep -q 'Enabled'") - client.wait_until_succeeds("iw wlan0 info | grep -q nixos-test") - ''; - } -) + with subtest("Can set up a wireless access point"): + client.succeed("connmanctl enable wifi") + client.wait_until_succeeds("connmanctl tether wifi on nixos-test reproducibility | grep -q 'Enabled'") + client.wait_until_succeeds("iw wlan0 info | grep -q nixos-test") + ''; +} diff --git a/nixos/tests/consul-template.nix b/nixos/tests/consul-template.nix index 015e09111b3d..6720cd44e629 100644 --- a/nixos/tests/consul-template.nix +++ b/nixos/tests/consul-template.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "consul-template"; +{ ... }: +{ + name = "consul-template"; - nodes.machine = - { ... }: - { - services.consul-template.instances.example.settings = { - template = [ - { - contents = '' - {{ key "example" }} - ''; - perms = "0600"; - destination = "/example"; - } - ]; - }; - - services.consul = { - enable = true; - extraConfig = { - server = true; - bootstrap_expect = 1; - bind_addr = "127.0.0.1"; - }; - }; + nodes.machine = + { ... }: + { + services.consul-template.instances.example.settings = { + template = [ + { + contents = '' + {{ key "example" }} + ''; + perms = "0600"; + destination = "/example"; + } + ]; }; - testScript = '' - machine.wait_for_unit("consul.service") - machine.wait_for_open_port(8500) + services.consul = { + enable = true; + extraConfig = { + server = true; + bootstrap_expect = 1; + bind_addr = "127.0.0.1"; + }; + }; + }; - machine.wait_for_unit("consul-template-example.service") + testScript = '' + machine.wait_for_unit("consul.service") + machine.wait_for_open_port(8500) - machine.wait_until_succeeds('consul kv put example example') + machine.wait_for_unit("consul-template-example.service") - machine.wait_for_file("/example") - machine.succeed('grep "example" /example') - ''; - } -) + machine.wait_until_succeeds('consul kv put example example') + + machine.wait_for_file("/example") + machine.succeed('grep "example" /example') + ''; +} diff --git a/nixos/tests/consul.nix b/nixos/tests/consul.nix index 253d70f13b59..8dc141b674f7 100644 --- a/nixos/tests/consul.nix +++ b/nixos/tests/consul.nix @@ -1,267 +1,265 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - # Settings for both servers and agents - webUi = true; - retry_interval = "1s"; - raft_multiplier = 1; +let + # Settings for both servers and agents + webUi = true; + retry_interval = "1s"; + raft_multiplier = 1; - defaultExtraConfig = { - inherit retry_interval; - performance = { - inherit raft_multiplier; + defaultExtraConfig = { + inherit retry_interval; + performance = { + inherit raft_multiplier; + }; + }; + + allConsensusServerHosts = [ + "192.168.1.1" + "192.168.1.2" + "192.168.1.3" + ]; + + allConsensusClientHosts = [ + "192.168.2.1" + "192.168.2.2" + ]; + + firewallSettings = { + # See https://www.consul.io/docs/install/ports.html + allowedTCPPorts = [ + 8301 + 8302 + 8600 + 8500 + 8300 + ]; + allowedUDPPorts = [ + 8301 + 8302 + 8600 + ]; + }; + + client = + index: + { pkgs, ... }: + let + ip = builtins.elemAt allConsensusClientHosts index; + in + { + environment.systemPackages = [ pkgs.consul ]; + + networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ + { + address = ip; + prefixLength = 16; + } + ]; + networking.firewall = firewallSettings; + + nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ]; + + services.consul = { + enable = true; + inherit webUi; + extraConfig = defaultExtraConfig // { + server = false; + retry_join = allConsensusServerHosts; + bind_addr = ip; + }; }; }; - allConsensusServerHosts = [ - "192.168.1.1" - "192.168.1.2" - "192.168.1.3" - ]; - - allConsensusClientHosts = [ - "192.168.2.1" - "192.168.2.2" - ]; - - firewallSettings = { - # See https://www.consul.io/docs/install/ports.html - allowedTCPPorts = [ - 8301 - 8302 - 8600 - 8500 - 8300 + server = + index: + { pkgs, ... }: + let + numConsensusServers = builtins.length allConsensusServerHosts; + thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index; + ip = thisConsensusServerHost; # since we already use IPs to identify servers + in + { + networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ + { + address = ip; + prefixLength = 16; + } ]; - allowedUDPPorts = [ - 8301 - 8302 - 8600 - ]; - }; + networking.firewall = firewallSettings; - client = - index: - { pkgs, ... }: - let - ip = builtins.elemAt allConsensusClientHosts index; - in - { - environment.systemPackages = [ pkgs.consul ]; + nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ]; - networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ - { - address = ip; - prefixLength = 16; - } - ]; - networking.firewall = firewallSettings; - - nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ]; - - services.consul = { + services.consul = + assert builtins.elem thisConsensusServerHost allConsensusServerHosts; + { enable = true; inherit webUi; extraConfig = defaultExtraConfig // { - server = false; - retry_join = allConsensusServerHosts; + server = true; + bootstrap_expect = numConsensusServers; + # Tell Consul that we never intend to drop below this many servers. + # Ensures to not permanently lose consensus after temporary loss. + # See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040 + autopilot.min_quorum = numConsensusServers; + retry_join = + # If there's only 1 node in the network, we allow self-join; + # otherwise, the node must not try to join itself, and join only the other servers. + # See https://github.com/hashicorp/consul/issues/2868 + if numConsensusServers == 1 then + allConsensusServerHosts + else + builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts; bind_addr = ip; }; }; - }; - - server = - index: - { pkgs, ... }: - let - numConsensusServers = builtins.length allConsensusServerHosts; - thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index; - ip = thisConsensusServerHost; # since we already use IPs to identify servers - in - { - networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ - { - address = ip; - prefixLength = 16; - } - ]; - networking.firewall = firewallSettings; - - nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ]; - - services.consul = - assert builtins.elem thisConsensusServerHost allConsensusServerHosts; - { - enable = true; - inherit webUi; - extraConfig = defaultExtraConfig // { - server = true; - bootstrap_expect = numConsensusServers; - # Tell Consul that we never intend to drop below this many servers. - # Ensures to not permanently lose consensus after temporary loss. - # See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040 - autopilot.min_quorum = numConsensusServers; - retry_join = - # If there's only 1 node in the network, we allow self-join; - # otherwise, the node must not try to join itself, and join only the other servers. - # See https://github.com/hashicorp/consul/issues/2868 - if numConsensusServers == 1 then - allConsensusServerHosts - else - builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts; - bind_addr = ip; - }; - }; - }; - in - { - name = "consul"; - - nodes = { - server1 = server 0; - server2 = server 1; - server3 = server 2; - - client1 = client 0; - client2 = client 1; }; +in +{ + name = "consul"; - testScript = '' - servers = [server1, server2, server3] - machines = [server1, server2, server3, client1, client2] + nodes = { + server1 = server 0; + server2 = server 1; + server3 = server 2; - for m in machines: - m.wait_for_unit("consul.service") + client1 = client 0; + client2 = client 1; + }; + + testScript = '' + servers = [server1, server2, server3] + machines = [server1, server2, server3, client1, client2] + + for m in machines: + m.wait_for_unit("consul.service") - def wait_for_healthy_servers(): - # See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040 - # for why the `Voter` column of `list-peers` has that info. - # TODO: The `grep true` relies on the fact that currently in - # the output like - # # consul operator raft list-peers - # Node ID Address State Voter RaftProtocol - # server3 ... 192.168.1.3:8300 leader true 3 - # server2 ... 192.168.1.2:8300 follower true 3 - # server1 ... 192.168.1.1:8300 follower false 3 - # `Voter`is the only boolean column. - # Change this to the more reliable way to be defined by - # https://github.com/hashicorp/consul/issues/8118 - # once that ticket is closed. - for m in machines: - m.wait_until_succeeds( - "[ $(consul operator raft list-peers | grep true | wc -l) == 3 ]" - ) + def wait_for_healthy_servers(): + # See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040 + # for why the `Voter` column of `list-peers` has that info. + # TODO: The `grep true` relies on the fact that currently in + # the output like + # # consul operator raft list-peers + # Node ID Address State Voter RaftProtocol + # server3 ... 192.168.1.3:8300 leader true 3 + # server2 ... 192.168.1.2:8300 follower true 3 + # server1 ... 192.168.1.1:8300 follower false 3 + # `Voter`is the only boolean column. + # Change this to the more reliable way to be defined by + # https://github.com/hashicorp/consul/issues/8118 + # once that ticket is closed. + for m in machines: + m.wait_until_succeeds( + "[ $(consul operator raft list-peers | grep true | wc -l) == 3 ]" + ) - def wait_for_all_machines_alive(): - """ - Note that Serf-"alive" does not mean "Raft"-healthy; - see `wait_for_healthy_servers()` for that instead. - """ - for m in machines: - m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]") + def wait_for_all_machines_alive(): + """ + Note that Serf-"alive" does not mean "Raft"-healthy; + see `wait_for_healthy_servers()` for that instead. + """ + for m in machines: + m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]") - wait_for_healthy_servers() - # Also wait for clients to be alive. - wait_for_all_machines_alive() + wait_for_healthy_servers() + # Also wait for clients to be alive. + wait_for_all_machines_alive() - client1.succeed("consul kv put testkey 42") - client2.succeed("[ $(consul kv get testkey) == 42 ]") + client1.succeed("consul kv put testkey 42") + client2.succeed("[ $(consul kv get testkey) == 42 ]") - def rolling_restart_test(proper_rolling_procedure=True): - """ - Tests that the cluster can tolearate failures of any single server, - following the recommended rolling upgrade procedure from - https://www.consul.io/docs/upgrading#standard-upgrades. + def rolling_restart_test(proper_rolling_procedure=True): + """ + Tests that the cluster can tolearate failures of any single server, + following the recommended rolling upgrade procedure from + https://www.consul.io/docs/upgrading#standard-upgrades. - Optionally, `proper_rolling_procedure=False` can be given - to wait only for each server to be back `Healthy`, not `Stable` - in the Raft consensus, see Consul setting `ServerStabilizationTime` and - https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040. - """ + Optionally, `proper_rolling_procedure=False` can be given + to wait only for each server to be back `Healthy`, not `Stable` + in the Raft consensus, see Consul setting `ServerStabilizationTime` and + https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040. + """ - for server in servers: - server.block() - server.systemctl("stop consul") + for server in servers: + server.block() + server.systemctl("stop consul") - # Make sure the stopped peer is recognized as being down - client1.wait_until_succeeds( - f"[ $(consul members | grep {server.name} | grep -o -E 'failed|left' | wc -l) == 1 ]" - ) + # Make sure the stopped peer is recognized as being down + client1.wait_until_succeeds( + f"[ $(consul members | grep {server.name} | grep -o -E 'failed|left' | wc -l) == 1 ]" + ) - # For each client, wait until they have connection again - # using `kv get -recurse` before issuing commands. - client1.wait_until_succeeds("consul kv get -recurse") - client2.wait_until_succeeds("consul kv get -recurse") + # For each client, wait until they have connection again + # using `kv get -recurse` before issuing commands. + client1.wait_until_succeeds("consul kv get -recurse") + client2.wait_until_succeeds("consul kv get -recurse") - # Do some consul actions while one server is down. - client1.succeed("consul kv put testkey 43") - client2.succeed("[ $(consul kv get testkey) == 43 ]") - client2.succeed("consul kv delete testkey") + # Do some consul actions while one server is down. + client1.succeed("consul kv put testkey 43") + client2.succeed("[ $(consul kv get testkey) == 43 ]") + client2.succeed("consul kv delete testkey") - server.unblock() - server.systemctl("start consul") + server.unblock() + server.systemctl("start consul") - if proper_rolling_procedure: - # Wait for recovery. - wait_for_healthy_servers() - else: - # NOT proper rolling upgrade procedure, see above. - wait_for_all_machines_alive() + if proper_rolling_procedure: + # Wait for recovery. + wait_for_healthy_servers() + else: + # NOT proper rolling upgrade procedure, see above. + wait_for_all_machines_alive() - # Wait for client connections. - client1.wait_until_succeeds("consul kv get -recurse") - client2.wait_until_succeeds("consul kv get -recurse") + # Wait for client connections. + client1.wait_until_succeeds("consul kv get -recurse") + client2.wait_until_succeeds("consul kv get -recurse") - # Do some consul actions with server back up. - client1.succeed("consul kv put testkey 44") - client2.succeed("[ $(consul kv get testkey) == 44 ]") - client2.succeed("consul kv delete testkey") + # Do some consul actions with server back up. + client1.succeed("consul kv put testkey 44") + client2.succeed("[ $(consul kv get testkey) == 44 ]") + client2.succeed("consul kv delete testkey") - def all_servers_crash_simultaneously_test(): - """ - Tests that the cluster will eventually come back after all - servers crash simultaneously. - """ + def all_servers_crash_simultaneously_test(): + """ + Tests that the cluster will eventually come back after all + servers crash simultaneously. + """ - for server in servers: - server.block() - server.systemctl("stop --no-block consul") + for server in servers: + server.block() + server.systemctl("stop --no-block consul") - for server in servers: - # --no-block is async, so ensure it has been stopped by now - server.wait_until_fails("systemctl is-active --quiet consul") - server.unblock() - server.systemctl("start consul") + for server in servers: + # --no-block is async, so ensure it has been stopped by now + server.wait_until_fails("systemctl is-active --quiet consul") + server.unblock() + server.systemctl("start consul") - # Wait for recovery. - wait_for_healthy_servers() + # Wait for recovery. + wait_for_healthy_servers() - # Wait for client connections. - client1.wait_until_succeeds("consul kv get -recurse") - client2.wait_until_succeeds("consul kv get -recurse") + # Wait for client connections. + client1.wait_until_succeeds("consul kv get -recurse") + client2.wait_until_succeeds("consul kv get -recurse") - # Do some consul actions with servers back up. - client1.succeed("consul kv put testkey 44") - client2.succeed("[ $(consul kv get testkey) == 44 ]") - client2.succeed("consul kv delete testkey") + # Do some consul actions with servers back up. + client1.succeed("consul kv put testkey 44") + client2.succeed("[ $(consul kv get testkey) == 44 ]") + client2.succeed("consul kv delete testkey") - # Run the tests. + # Run the tests. - print("rolling_restart_test()") - rolling_restart_test() + print("rolling_restart_test()") + rolling_restart_test() - print("all_servers_crash_simultaneously_test()") - all_servers_crash_simultaneously_test() + print("all_servers_crash_simultaneously_test()") + all_servers_crash_simultaneously_test() - print("rolling_restart_test(proper_rolling_procedure=False)") - rolling_restart_test(proper_rolling_procedure=False) - ''; - } -) + print("rolling_restart_test(proper_rolling_procedure=False)") + rolling_restart_test(proper_rolling_procedure=False) + ''; +} diff --git a/nixos/tests/containers-bridge.nix b/nixos/tests/containers-bridge.nix index a8e9f574924f..1f38b1b48e11 100644 --- a/nixos/tests/containers-bridge.nix +++ b/nixos/tests/containers-bridge.nix @@ -5,110 +5,108 @@ let containerIp6 = "fc00::2/7"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-bridge"; - meta = { - maintainers = with lib.maintainers; [ - aristid - aszlig - kampfschlaefer - ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-bridge"; + meta = { + maintainers = with lib.maintainers; [ + aristid + aszlig + kampfschlaefer + ]; + }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/installer/cd-dvd/channel.nix ]; - virtualisation.writableStore = true; + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/installer/cd-dvd/channel.nix ]; + virtualisation.writableStore = true; - networking.bridges = { - br0 = { - interfaces = [ ]; - }; + networking.bridges = { + br0 = { + interfaces = [ ]; }; - networking.interfaces = { - br0 = { - ipv4.addresses = [ - { - address = hostIp; - prefixLength = 24; - } - ]; - ipv6.addresses = [ - { - address = hostIp6; - prefixLength = 7; - } - ]; - }; + }; + networking.interfaces = { + br0 = { + ipv4.addresses = [ + { + address = hostIp; + prefixLength = 24; + } + ]; + ipv6.addresses = [ + { + address = hostIp6; + prefixLength = 7; + } + ]; }; - - containers.webserver = { - autoStart = true; - privateNetwork = true; - hostBridge = "br0"; - localAddress = containerIp; - localAddress6 = containerIp6; - config = { - services.httpd.enable = true; - services.httpd.adminAddr = "foo@example.org"; - networking.firewall.allowedTCPPorts = [ 80 ]; - }; - }; - - containers.web-noip = { - autoStart = true; - privateNetwork = true; - hostBridge = "br0"; - config = { - services.httpd.enable = true; - services.httpd.adminAddr = "foo@example.org"; - networking.firewall.allowedTCPPorts = [ 80 ]; - }; - }; - - virtualisation.additionalPaths = [ pkgs.stdenv ]; }; - testScript = '' - machine.wait_for_unit("default.target") - assert "webserver" in machine.succeed("nixos-container list") + containers.webserver = { + autoStart = true; + privateNetwork = true; + hostBridge = "br0"; + localAddress = containerIp; + localAddress6 = containerIp6; + config = { + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + networking.firewall.allowedTCPPorts = [ 80 ]; + }; + }; - with subtest("Start the webserver container"): - assert "up" in machine.succeed("nixos-container status webserver") + containers.web-noip = { + autoStart = true; + privateNetwork = true; + hostBridge = "br0"; + config = { + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + networking.firewall.allowedTCPPorts = [ 80 ]; + }; + }; - with subtest("Bridges exist inside containers"): - machine.succeed( - "nixos-container run webserver -- ip link show eth0", - "nixos-container run web-noip -- ip link show eth0", - ) + virtualisation.additionalPaths = [ pkgs.stdenv ]; + }; - ip = "${containerIp}".split("/")[0] - machine.succeed(f"ping -n -c 1 {ip}") - machine.succeed(f"curl --fail http://{ip}/ > /dev/null") + testScript = '' + machine.wait_for_unit("default.target") + assert "webserver" in machine.succeed("nixos-container list") - ip6 = "${containerIp6}".split("/")[0] - machine.succeed(f"ping -n -c 1 {ip6}") - machine.succeed(f"curl --fail http://[{ip6}]/ > /dev/null") + with subtest("Start the webserver container"): + assert "up" in machine.succeed("nixos-container status webserver") - with subtest( - "nixos-container show-ip works in case of an ipv4 address " - + "with subnetmask in CIDR notation." - ): - result = machine.succeed("nixos-container show-ip webserver").rstrip() - assert result == ip + with subtest("Bridges exist inside containers"): + machine.succeed( + "nixos-container run webserver -- ip link show eth0", + "nixos-container run web-noip -- ip link show eth0", + ) - with subtest("Stop the container"): - machine.succeed("nixos-container stop webserver") - machine.fail( - f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null", - f"curl --fail --connect-timeout 2 http://[{ip6}]/ > /dev/null", - ) + ip = "${containerIp}".split("/")[0] + machine.succeed(f"ping -n -c 1 {ip}") + machine.succeed(f"curl --fail http://{ip}/ > /dev/null") - # Destroying a declarative container should fail. - machine.fail("nixos-container destroy webserver") - ''; - } -) + ip6 = "${containerIp6}".split("/")[0] + machine.succeed(f"ping -n -c 1 {ip6}") + machine.succeed(f"curl --fail http://[{ip6}]/ > /dev/null") + + with subtest( + "nixos-container show-ip works in case of an ipv4 address " + + "with subnetmask in CIDR notation." + ): + result = machine.succeed("nixos-container show-ip webserver").rstrip() + assert result == ip + + with subtest("Stop the container"): + machine.succeed("nixos-container stop webserver") + machine.fail( + f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null", + f"curl --fail --connect-timeout 2 http://[{ip6}]/ > /dev/null", + ) + + # Destroying a declarative container should fail. + machine.fail("nixos-container destroy webserver") + ''; +} diff --git a/nixos/tests/containers-custom-pkgs.nix b/nixos/tests/containers-custom-pkgs.nix index 74b1ea207b68..b341232fd8cd 100644 --- a/nixos/tests/containers-custom-pkgs.nix +++ b/nixos/tests/containers-custom-pkgs.nix @@ -1,48 +1,46 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let +{ pkgs, lib, ... }: +let - customPkgs = pkgs.appendOverlays [ - (self: super: { - hello = super.hello.overrideAttrs (old: { - name = "custom-hello"; - }); - }) - ]; + customPkgs = pkgs.appendOverlays [ + (self: super: { + hello = super.hello.overrideAttrs (old: { + name = "custom-hello"; + }); + }) + ]; - in - { - name = "containers-custom-pkgs"; - meta = { - maintainers = with lib.maintainers; [ erikarvstedt ]; +in +{ + name = "containers-custom-pkgs"; + meta = { + maintainers = with lib.maintainers; [ erikarvstedt ]; + }; + + nodes.machine = + { config, ... }: + { + assertions = + let + helloName = (builtins.head config.containers.test.config.system.extraDependencies).name; + in + [ + { + assertion = helloName == "custom-hello"; + message = "Unexpected value: ${helloName}"; + } + ]; + + containers.test = { + autoStart = true; + config = + { pkgs, config, ... }: + { + nixpkgs.pkgs = customPkgs; + system.extraDependencies = [ pkgs.hello ]; + }; + }; }; - nodes.machine = - { config, ... }: - { - assertions = - let - helloName = (builtins.head config.containers.test.config.system.extraDependencies).name; - in - [ - { - assertion = helloName == "custom-hello"; - message = "Unexpected value: ${helloName}"; - } - ]; - - containers.test = { - autoStart = true; - config = - { pkgs, config, ... }: - { - nixpkgs.pkgs = customPkgs; - system.extraDependencies = [ pkgs.hello ]; - }; - }; - }; - - # This test only consists of evaluating the test machine - testScript = "pass"; - } -) + # This test only consists of evaluating the test machine + testScript = "pass"; +} diff --git a/nixos/tests/containers-ephemeral.nix b/nixos/tests/containers-ephemeral.nix index 5204ba67e26b..798962967704 100644 --- a/nixos/tests/containers-ephemeral.nix +++ b/nixos/tests/containers-ephemeral.nix @@ -1,59 +1,57 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-ephemeral"; - meta = { - maintainers = with lib.maintainers; [ patryk27 ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-ephemeral"; + meta = { + maintainers = with lib.maintainers; [ patryk27 ]; + }; - nodes.machine = - { pkgs, ... }: - { - virtualisation.writableStore = true; + nodes.machine = + { pkgs, ... }: + { + virtualisation.writableStore = true; - containers.webserver = { - ephemeral = true; - privateNetwork = true; - hostAddress = "10.231.136.1"; - localAddress = "10.231.136.2"; - config = { - services.nginx = { - enable = true; - virtualHosts.localhost = { - root = pkgs.runCommand "localhost" { } '' - mkdir "$out" - echo hello world > "$out/index.html" - ''; - }; + containers.webserver = { + ephemeral = true; + privateNetwork = true; + hostAddress = "10.231.136.1"; + localAddress = "10.231.136.2"; + config = { + services.nginx = { + enable = true; + virtualHosts.localhost = { + root = pkgs.runCommand "localhost" { } '' + mkdir "$out" + echo hello world > "$out/index.html" + ''; }; - networking.firewall.allowedTCPPorts = [ 80 ]; }; + networking.firewall.allowedTCPPorts = [ 80 ]; }; }; + }; - testScript = '' - assert "webserver" in machine.succeed("nixos-container list") + testScript = '' + assert "webserver" in machine.succeed("nixos-container list") - machine.succeed("nixos-container start webserver") + machine.succeed("nixos-container start webserver") - with subtest("Container got its own root folder"): - machine.succeed("ls /run/nixos-containers/webserver") + with subtest("Container got its own root folder"): + machine.succeed("ls /run/nixos-containers/webserver") - with subtest("Container persistent directory is not created"): - machine.fail("ls /var/lib/nixos-containers/webserver") + with subtest("Container persistent directory is not created"): + machine.fail("ls /var/lib/nixos-containers/webserver") - # Since "start" returns after the container has reached - # multi-user.target, we should now be able to access it. - ip = machine.succeed("nixos-container show-ip webserver").rstrip() - machine.succeed(f"ping -n -c1 {ip}") - machine.succeed(f"curl --fail http://{ip}/ > /dev/null") + # Since "start" returns after the container has reached + # multi-user.target, we should now be able to access it. + ip = machine.succeed("nixos-container show-ip webserver").rstrip() + machine.succeed(f"ping -n -c1 {ip}") + machine.succeed(f"curl --fail http://{ip}/ > /dev/null") - with subtest("Stop the container"): - machine.succeed("nixos-container stop webserver") - machine.fail(f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null") + with subtest("Stop the container"): + machine.succeed("nixos-container stop webserver") + machine.fail(f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null") - with subtest("Container's root folder was removed"): - machine.fail("ls /run/nixos-containers/webserver") - ''; - } -) + with subtest("Container's root folder was removed"): + machine.fail("ls /run/nixos-containers/webserver") + ''; +} diff --git a/nixos/tests/containers-extra_veth.nix b/nixos/tests/containers-extra_veth.nix index ed3b8099036b..3a50fa824986 100644 --- a/nixos/tests/containers-extra_veth.nix +++ b/nixos/tests/containers-extra_veth.nix @@ -1,115 +1,113 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-extra_veth"; - meta = { - maintainers = with lib.maintainers; [ kampfschlaefer ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-extra_veth"; + meta = { + maintainers = with lib.maintainers; [ kampfschlaefer ]; + }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/installer/cd-dvd/channel.nix ]; - virtualisation.writableStore = true; - virtualisation.vlans = [ ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/installer/cd-dvd/channel.nix ]; + virtualisation.writableStore = true; + virtualisation.vlans = [ ]; - networking.useDHCP = false; - networking.bridges = { - br0 = { - interfaces = [ ]; - }; - br1 = { - interfaces = [ ]; - }; + networking.useDHCP = false; + networking.bridges = { + br0 = { + interfaces = [ ]; }; - networking.interfaces = { - br0 = { - ipv4.addresses = [ - { - address = "192.168.0.1"; - prefixLength = 24; - } - ]; - ipv6.addresses = [ - { - address = "fc00::1"; - prefixLength = 7; - } - ]; - }; - br1 = { - ipv4.addresses = [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; - }; + br1 = { + interfaces = [ ]; }; - - containers.webserver = { - autoStart = true; - privateNetwork = true; - hostBridge = "br0"; - localAddress = "192.168.0.100/24"; - localAddress6 = "fc00::2/7"; - extraVeths = { - veth1 = { - hostBridge = "br1"; - localAddress = "192.168.1.100/24"; - }; - veth2 = { - hostAddress = "192.168.2.1"; - localAddress = "192.168.2.100"; - }; - }; - config = { - networking.firewall.allowedTCPPorts = [ 80 ]; - }; + }; + networking.interfaces = { + br0 = { + ipv4.addresses = [ + { + address = "192.168.0.1"; + prefixLength = 24; + } + ]; + ipv6.addresses = [ + { + address = "fc00::1"; + prefixLength = 7; + } + ]; + }; + br1 = { + ipv4.addresses = [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; }; - - virtualisation.additionalPaths = [ pkgs.stdenv ]; }; - testScript = '' - machine.wait_for_unit("default.target") - assert "webserver" in machine.succeed("nixos-container list") + containers.webserver = { + autoStart = true; + privateNetwork = true; + hostBridge = "br0"; + localAddress = "192.168.0.100/24"; + localAddress6 = "fc00::2/7"; + extraVeths = { + veth1 = { + hostBridge = "br1"; + localAddress = "192.168.1.100/24"; + }; + veth2 = { + hostAddress = "192.168.2.1"; + localAddress = "192.168.2.100"; + }; + }; + config = { + networking.firewall.allowedTCPPorts = [ 80 ]; + }; + }; - with subtest("Status of the webserver container is up"): - assert "up" in machine.succeed("nixos-container status webserver") + virtualisation.additionalPaths = [ pkgs.stdenv ]; + }; - with subtest("Ensure that the veths are inside the container"): - assert "state UP" in machine.succeed( - "nixos-container run webserver -- ip link show veth1" - ) - assert "state UP" in machine.succeed( - "nixos-container run webserver -- ip link show veth2" - ) + testScript = '' + machine.wait_for_unit("default.target") + assert "webserver" in machine.succeed("nixos-container list") - with subtest("Ensure the presence of the extra veths"): - assert "state UP" in machine.succeed("ip link show veth1") - assert "state UP" in machine.succeed("ip link show veth2") + with subtest("Status of the webserver container is up"): + assert "up" in machine.succeed("nixos-container status webserver") - with subtest("Ensure the veth1 is part of br1 on the host"): - assert "master br1" in machine.succeed("ip link show veth1") + with subtest("Ensure that the veths are inside the container"): + assert "state UP" in machine.succeed( + "nixos-container run webserver -- ip link show veth1" + ) + assert "state UP" in machine.succeed( + "nixos-container run webserver -- ip link show veth2" + ) - with subtest("Ping on main veth"): - machine.succeed("ping -n -c 1 192.168.0.100") - machine.succeed("ping -n -c 1 fc00::2") + with subtest("Ensure the presence of the extra veths"): + assert "state UP" in machine.succeed("ip link show veth1") + assert "state UP" in machine.succeed("ip link show veth2") - with subtest("Ping on the first extra veth"): - machine.succeed("ping -n -c 1 192.168.1.100 >&2") + with subtest("Ensure the veth1 is part of br1 on the host"): + assert "master br1" in machine.succeed("ip link show veth1") - with subtest("Ping on the second extra veth"): - machine.succeed("ping -n -c 1 192.168.2.100 >&2") + with subtest("Ping on main veth"): + machine.succeed("ping -n -c 1 192.168.0.100") + machine.succeed("ping -n -c 1 fc00::2") - with subtest("Container can be stopped"): - machine.succeed("nixos-container stop webserver") - machine.fail("ping -n -c 1 192.168.1.100 >&2") - machine.fail("ping -n -c 1 192.168.2.100 >&2") + with subtest("Ping on the first extra veth"): + machine.succeed("ping -n -c 1 192.168.1.100 >&2") - with subtest("Destroying a declarative container should fail"): - machine.fail("nixos-container destroy webserver") - ''; - } -) + with subtest("Ping on the second extra veth"): + machine.succeed("ping -n -c 1 192.168.2.100 >&2") + + with subtest("Container can be stopped"): + machine.succeed("nixos-container stop webserver") + machine.fail("ping -n -c 1 192.168.1.100 >&2") + machine.fail("ping -n -c 1 192.168.2.100 >&2") + + with subtest("Destroying a declarative container should fail"): + machine.fail("nixos-container destroy webserver") + ''; +} diff --git a/nixos/tests/containers-hosts.nix b/nixos/tests/containers-hosts.nix index 0e5f50f5292c..4e827e7c3983 100644 --- a/nixos/tests/containers-hosts.nix +++ b/nixos/tests/containers-hosts.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-hosts"; - meta = { - maintainers = with lib.maintainers; [ montag451 ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-hosts"; + meta = { + maintainers = with lib.maintainers; [ montag451 ]; + }; - nodes.machine = - { lib, ... }: - { - virtualisation.vlans = [ ]; + nodes.machine = + { lib, ... }: + { + virtualisation.vlans = [ ]; - networking.bridges.br0.interfaces = [ ]; - networking.interfaces.br0.ipv4.addresses = [ - { - address = "10.11.0.254"; - prefixLength = 24; - } - ]; + networking.bridges.br0.interfaces = [ ]; + networking.interfaces.br0.ipv4.addresses = [ + { + address = "10.11.0.254"; + prefixLength = 24; + } + ]; - # Force /etc/hosts to be the only source for host name resolution - environment.etc."nsswitch.conf".text = lib.mkForce '' - hosts: files - ''; + # Force /etc/hosts to be the only source for host name resolution + environment.etc."nsswitch.conf".text = lib.mkForce '' + hosts: files + ''; - containers.simple = { - autoStart = true; - privateNetwork = true; - localAddress = "10.10.0.1"; - hostAddress = "10.10.0.254"; + containers.simple = { + autoStart = true; + privateNetwork = true; + localAddress = "10.10.0.1"; + hostAddress = "10.10.0.254"; - config = { }; - }; - - containers.netmask = { - autoStart = true; - privateNetwork = true; - hostBridge = "br0"; - localAddress = "10.11.0.1/24"; - - config = { }; - }; + config = { }; }; - testScript = '' - start_all() - machine.wait_for_unit("default.target") + containers.netmask = { + autoStart = true; + privateNetwork = true; + hostBridge = "br0"; + localAddress = "10.11.0.1/24"; - with subtest("Ping the containers using the entries added in /etc/hosts"): - for host in "simple.containers", "netmask.containers": - machine.succeed(f"ping -n -c 1 {host}") - ''; - } -) + config = { }; + }; + }; + + testScript = '' + start_all() + machine.wait_for_unit("default.target") + + with subtest("Ping the containers using the entries added in /etc/hosts"): + for host in "simple.containers", "netmask.containers": + machine.succeed(f"ping -n -c 1 {host}") + ''; +} diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix index f1bd56519c6d..746e72f54a32 100644 --- a/nixos/tests/containers-imperative.nix +++ b/nixos/tests/containers-imperative.nix @@ -1,197 +1,195 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-imperative"; - meta = { - maintainers = with lib.maintainers; [ - aristid - aszlig - kampfschlaefer - ]; +{ pkgs, lib, ... }: +{ + name = "containers-imperative"; + meta = { + maintainers = with lib.maintainers; [ + aristid + aszlig + kampfschlaefer + ]; + }; + + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + imports = [ ../modules/installer/cd-dvd/channel.nix ]; + + # XXX: Sandbox setup fails while trying to hardlink files from the host's + # store file system into the prepared chroot directory. + nix.settings.sandbox = false; + nix.settings.substituters = [ ]; # don't try to access cache.nixos.org + + virtualisation.memorySize = 2048; + virtualisation.writableStore = true; + # Make sure we always have all the required dependencies for creating a + # container available within the VM, because we don't have network access. + virtualisation.additionalPaths = + let + emptyContainer = import ../lib/eval-config.nix { + modules = lib.singleton { + nixpkgs = { inherit (config.nixpkgs) localSystem; }; + + containers.foo.config = { }; + }; + + # The system is inherited from the host above. + # Set it to null, to remove the "legacy" entrypoint's non-hermetic default. + system = null; + }; + in + with pkgs; + [ + stdenv + stdenvNoCC + emptyContainer.config.containers.foo.path + libxslt + desktop-file-utils + texinfo + docbook5 + libxml2 + docbook_xsl_ns + xorg.lndir + documentation-highlighter + perlPackages.ConfigIniFiles + ]; }; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - imports = [ ../modules/installer/cd-dvd/channel.nix ]; - - # XXX: Sandbox setup fails while trying to hardlink files from the host's - # store file system into the prepared chroot directory. - nix.settings.sandbox = false; - nix.settings.substituters = [ ]; # don't try to access cache.nixos.org - - virtualisation.memorySize = 2048; - virtualisation.writableStore = true; - # Make sure we always have all the required dependencies for creating a - # container available within the VM, because we don't have network access. - virtualisation.additionalPaths = - let - emptyContainer = import ../lib/eval-config.nix { - modules = lib.singleton { - nixpkgs = { inherit (config.nixpkgs) localSystem; }; - - containers.foo.config = { }; - }; - - # The system is inherited from the host above. - # Set it to null, to remove the "legacy" entrypoint's non-hermetic default. - system = null; - }; - in - with pkgs; - [ - stdenv - stdenvNoCC - emptyContainer.config.containers.foo.path - libxslt - desktop-file-utils - texinfo - docbook5 - libxml2 - docbook_xsl_ns - xorg.lndir - documentation-highlighter - perlPackages.ConfigIniFiles - ]; - }; - - testScript = - let - tmpfilesContainerConfig = pkgs.writeText "container-config-tmpfiles" '' - { - systemd.tmpfiles.rules = [ "d /foo - - - - -" ]; - systemd.services.foo = { - serviceConfig.Type = "oneshot"; - script = "ls -al /foo"; - wantedBy = [ "multi-user.target" ]; - }; - } - ''; - brokenCfg = pkgs.writeText "broken.nix" '' - { - assertions = [ - { assertion = false; - message = "I never evaluate"; - } - ]; - } - ''; - in - '' - with subtest("Make sure we have a NixOS tree (required by ‘nixos-container create’)"): - machine.succeed("PAGER=cat nix-env -qa -A nixos.hello >&2") - - id1, id2 = None, None - - with subtest("Create some containers imperatively"): - id1 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip() - machine.log(f"created container {id1}") - - id2 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip() - machine.log(f"created container {id2}") - - assert id1 != id2 - - with subtest(f"Put the root of {id2} into a bind mount"): - machine.succeed( - f"mv /var/lib/nixos-containers/{id2} /id2-bindmount", - f"mount --bind /id2-bindmount /var/lib/nixos-containers/{id1}", - ) - - ip1 = machine.succeed(f"nixos-container show-ip {id1}").rstrip() - ip2 = machine.succeed(f"nixos-container show-ip {id2}").rstrip() - assert ip1 != ip2 - - with subtest( - "Create a directory and a file we can later check if it still exists " - + "after destruction of the container" - ): - machine.succeed("mkdir /nested-bindmount") - machine.succeed("echo important data > /nested-bindmount/dummy") - - with subtest( - "Create a directory with a dummy file and bind-mount it into both containers." - ): - for id in id1, id2: - important_path = f"/var/lib/nixos-containers/{id}/very/important/data" - machine.succeed( - f"mkdir -p {important_path}", - f"mount --bind /nested-bindmount {important_path}", - ) - - with subtest("Start one of them"): - machine.succeed(f"nixos-container start {id1}") - - with subtest("Execute commands via the root shell"): - assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname") - - with subtest("Execute a nix command via the root shell. (regression test for #40355)"): - machine.succeed( - f"nixos-container run {id1} -- nix-instantiate -E " - + '\'derivation { name = "empty"; builder = "false"; system = "false"; }\' ' - ) - - with subtest("Stop and start (regression test for #4989)"): - machine.succeed(f"nixos-container stop {id1}") - machine.succeed(f"nixos-container start {id1}") - - # clear serial backlog for next tests - machine.succeed("logger eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d") - machine.wait_for_console_text( - "eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d" - ) - - with subtest("Stop a container early"): - machine.succeed(f"nixos-container stop {id1}") - machine.succeed(f"nixos-container start {id1} >&2 &") - machine.wait_for_console_text("Stage 2") - machine.succeed(f"nixos-container stop {id1}") - machine.wait_for_console_text(f"Container {id1} exited successfully") - machine.succeed(f"nixos-container start {id1}") - - with subtest("Stop a container without machined (regression test for #109695)"): - machine.systemctl("stop systemd-machined") - machine.succeed(f"nixos-container stop {id1}") - machine.wait_for_console_text(f"Container {id1} has been shut down") - machine.succeed(f"nixos-container start {id1}") - - with subtest("tmpfiles are present"): - machine.log("creating container tmpfiles") - machine.succeed( - "nixos-container create tmpfiles --config-file ${tmpfilesContainerConfig}" - ) - machine.log("created, starting…") - machine.succeed("nixos-container start tmpfiles") - machine.log("done starting, investigating…") - machine.succeed( - "echo $(nixos-container run tmpfiles -- systemctl is-active foo.service) | grep -q active;" - ) - machine.succeed("nixos-container destroy tmpfiles") - - with subtest("Execute commands via the root shell"): - assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname") - - with subtest("Destroy the containers"): - for id in id1, id2: - machine.succeed(f"nixos-container destroy {id}") - - with subtest("Check whether destruction of any container has killed important data"): - machine.succeed("grep -qF 'important data' /nested-bindmount/dummy") - - with subtest("Ensure that the container path is gone"): - print(machine.succeed("ls -lsa /var/lib/nixos-containers")) - machine.succeed(f"test ! -e /var/lib/nixos-containers/{id1}") - - with subtest("Ensure that a failed container creation doesn'leave any state"): - machine.fail( - "nixos-container create b0rk --config-file ${brokenCfg}" - ) - machine.succeed("test ! -e /var/lib/nixos-containers/b0rk") + testScript = + let + tmpfilesContainerConfig = pkgs.writeText "container-config-tmpfiles" '' + { + systemd.tmpfiles.rules = [ "d /foo - - - - -" ]; + systemd.services.foo = { + serviceConfig.Type = "oneshot"; + script = "ls -al /foo"; + wantedBy = [ "multi-user.target" ]; + }; + } ''; - } -) + brokenCfg = pkgs.writeText "broken.nix" '' + { + assertions = [ + { assertion = false; + message = "I never evaluate"; + } + ]; + } + ''; + in + '' + with subtest("Make sure we have a NixOS tree (required by ‘nixos-container create’)"): + machine.succeed("PAGER=cat nix-env -qa -A nixos.hello >&2") + + id1, id2 = None, None + + with subtest("Create some containers imperatively"): + id1 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip() + machine.log(f"created container {id1}") + + id2 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip() + machine.log(f"created container {id2}") + + assert id1 != id2 + + with subtest(f"Put the root of {id2} into a bind mount"): + machine.succeed( + f"mv /var/lib/nixos-containers/{id2} /id2-bindmount", + f"mount --bind /id2-bindmount /var/lib/nixos-containers/{id1}", + ) + + ip1 = machine.succeed(f"nixos-container show-ip {id1}").rstrip() + ip2 = machine.succeed(f"nixos-container show-ip {id2}").rstrip() + assert ip1 != ip2 + + with subtest( + "Create a directory and a file we can later check if it still exists " + + "after destruction of the container" + ): + machine.succeed("mkdir /nested-bindmount") + machine.succeed("echo important data > /nested-bindmount/dummy") + + with subtest( + "Create a directory with a dummy file and bind-mount it into both containers." + ): + for id in id1, id2: + important_path = f"/var/lib/nixos-containers/{id}/very/important/data" + machine.succeed( + f"mkdir -p {important_path}", + f"mount --bind /nested-bindmount {important_path}", + ) + + with subtest("Start one of them"): + machine.succeed(f"nixos-container start {id1}") + + with subtest("Execute commands via the root shell"): + assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname") + + with subtest("Execute a nix command via the root shell. (regression test for #40355)"): + machine.succeed( + f"nixos-container run {id1} -- nix-instantiate -E " + + '\'derivation { name = "empty"; builder = "false"; system = "false"; }\' ' + ) + + with subtest("Stop and start (regression test for #4989)"): + machine.succeed(f"nixos-container stop {id1}") + machine.succeed(f"nixos-container start {id1}") + + # clear serial backlog for next tests + machine.succeed("logger eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d") + machine.wait_for_console_text( + "eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d" + ) + + with subtest("Stop a container early"): + machine.succeed(f"nixos-container stop {id1}") + machine.succeed(f"nixos-container start {id1} >&2 &") + machine.wait_for_console_text("Stage 2") + machine.succeed(f"nixos-container stop {id1}") + machine.wait_for_console_text(f"Container {id1} exited successfully") + machine.succeed(f"nixos-container start {id1}") + + with subtest("Stop a container without machined (regression test for #109695)"): + machine.systemctl("stop systemd-machined") + machine.succeed(f"nixos-container stop {id1}") + machine.wait_for_console_text(f"Container {id1} has been shut down") + machine.succeed(f"nixos-container start {id1}") + + with subtest("tmpfiles are present"): + machine.log("creating container tmpfiles") + machine.succeed( + "nixos-container create tmpfiles --config-file ${tmpfilesContainerConfig}" + ) + machine.log("created, starting…") + machine.succeed("nixos-container start tmpfiles") + machine.log("done starting, investigating…") + machine.succeed( + "echo $(nixos-container run tmpfiles -- systemctl is-active foo.service) | grep -q active;" + ) + machine.succeed("nixos-container destroy tmpfiles") + + with subtest("Execute commands via the root shell"): + assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname") + + with subtest("Destroy the containers"): + for id in id1, id2: + machine.succeed(f"nixos-container destroy {id}") + + with subtest("Check whether destruction of any container has killed important data"): + machine.succeed("grep -qF 'important data' /nested-bindmount/dummy") + + with subtest("Ensure that the container path is gone"): + print(machine.succeed("ls -lsa /var/lib/nixos-containers")) + machine.succeed(f"test ! -e /var/lib/nixos-containers/{id1}") + + with subtest("Ensure that a failed container creation doesn'leave any state"): + machine.fail( + "nixos-container create b0rk --config-file ${brokenCfg}" + ) + machine.succeed("test ! -e /var/lib/nixos-containers/b0rk") + ''; +} diff --git a/nixos/tests/containers-ip.nix b/nixos/tests/containers-ip.nix index 979fb365126d..44217970d24e 100644 --- a/nixos/tests/containers-ip.nix +++ b/nixos/tests/containers-ip.nix @@ -12,71 +12,69 @@ let }; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-ipv4-ipv6"; - meta = { - maintainers = with lib.maintainers; [ - aristid - aszlig - kampfschlaefer - ]; +{ pkgs, lib, ... }: +{ + name = "containers-ipv4-ipv6"; + meta = { + maintainers = with lib.maintainers; [ + aristid + aszlig + kampfschlaefer + ]; + }; + + nodes.machine = + { pkgs, ... }: + { + virtualisation.writableStore = true; + + containers.webserver4 = webserverFor "10.231.136.1" "10.231.136.2"; + containers.webserver6 = webserverFor "fc00::2" "fc00::1"; + virtualisation.additionalPaths = [ pkgs.stdenv ]; }; - nodes.machine = - { pkgs, ... }: - { - virtualisation.writableStore = true; - - containers.webserver4 = webserverFor "10.231.136.1" "10.231.136.2"; - containers.webserver6 = webserverFor "fc00::2" "fc00::1"; - virtualisation.additionalPaths = [ pkgs.stdenv ]; - }; - - testScript = - { nodes, ... }: - '' - import time + testScript = + { nodes, ... }: + '' + import time - def curl_host(ip): - # put [] around ipv6 addresses for curl - host = ip if ":" not in ip else f"[{ip}]" - return f"curl --fail --connect-timeout 2 http://{host}/ > /dev/null" + def curl_host(ip): + # put [] around ipv6 addresses for curl + host = ip if ":" not in ip else f"[{ip}]" + return f"curl --fail --connect-timeout 2 http://{host}/ > /dev/null" - def get_ip(container): - # need to distinguish because show-ip won't work for ipv6 - if container == "webserver4": - ip = machine.succeed(f"nixos-container show-ip {container}").rstrip() - assert ip == "${nodes.machine.config.containers.webserver4.localAddress}" - return ip - return "${nodes.machine.config.containers.webserver6.localAddress}" + def get_ip(container): + # need to distinguish because show-ip won't work for ipv6 + if container == "webserver4": + ip = machine.succeed(f"nixos-container show-ip {container}").rstrip() + assert ip == "${nodes.machine.config.containers.webserver4.localAddress}" + return ip + return "${nodes.machine.config.containers.webserver6.localAddress}" - for container in "webserver4", "webserver6": - assert container in machine.succeed("nixos-container list") + for container in "webserver4", "webserver6": + assert container in machine.succeed("nixos-container list") - with subtest(f"Start container {container}"): - machine.succeed(f"nixos-container start {container}") - # wait 2s for container to start and network to be up - time.sleep(2) + with subtest(f"Start container {container}"): + machine.succeed(f"nixos-container start {container}") + # wait 2s for container to start and network to be up + time.sleep(2) - # Since "start" returns after the container has reached - # multi-user.target, we should now be able to access it. + # Since "start" returns after the container has reached + # multi-user.target, we should now be able to access it. - ip = get_ip(container) - with subtest(f"{container} reacts to pings and HTTP requests"): - machine.succeed(f"ping -n -c1 {ip}") - machine.succeed(curl_host(ip)) + ip = get_ip(container) + with subtest(f"{container} reacts to pings and HTTP requests"): + machine.succeed(f"ping -n -c1 {ip}") + machine.succeed(curl_host(ip)) - with subtest(f"Stop container {container}"): - machine.succeed(f"nixos-container stop {container}") - machine.fail(curl_host(ip)) + with subtest(f"Stop container {container}"): + machine.succeed(f"nixos-container stop {container}") + machine.fail(curl_host(ip)) - # Destroying a declarative container should fail. - machine.fail(f"nixos-container destroy {container}") - ''; - } -) + # Destroying a declarative container should fail. + machine.fail(f"nixos-container destroy {container}") + ''; +} diff --git a/nixos/tests/containers-macvlans.nix b/nixos/tests/containers-macvlans.nix index 3ebfef9d0bc8..7b9644d0542f 100644 --- a/nixos/tests/containers-macvlans.nix +++ b/nixos/tests/containers-macvlans.nix @@ -4,97 +4,95 @@ let containerIp2 = "192.168.1.254"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-macvlans"; - meta = { - maintainers = with lib.maintainers; [ montag451 ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-macvlans"; + meta = { + maintainers = with lib.maintainers; [ montag451 ]; + }; - nodes = { + nodes = { - machine1 = - { lib, ... }: - { - virtualisation.vlans = [ 1 ]; + machine1 = + { lib, ... }: + { + virtualisation.vlans = [ 1 ]; - # To be able to ping containers from the host, it is necessary - # to create a macvlan on the host on the VLAN 1 network. - networking.macvlans.mv-eth1-host = { - interface = "eth1"; - mode = "bridge"; - }; - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ ]; - networking.interfaces.mv-eth1-host = { - ipv4.addresses = [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; - }; + # To be able to ping containers from the host, it is necessary + # to create a macvlan on the host on the VLAN 1 network. + networking.macvlans.mv-eth1-host = { + interface = "eth1"; + mode = "bridge"; + }; + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ ]; + networking.interfaces.mv-eth1-host = { + ipv4.addresses = [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; + }; - containers.test1 = { - autoStart = true; - macvlans = [ "eth1" ]; + containers.test1 = { + autoStart = true; + macvlans = [ "eth1" ]; - config = { - networking.interfaces.mv-eth1 = { - ipv4.addresses = [ - { - address = containerIp1; - prefixLength = 24; - } - ]; - }; - }; - }; - - containers.test2 = { - autoStart = true; - macvlans = [ "eth1" ]; - - config = { - networking.interfaces.mv-eth1 = { - ipv4.addresses = [ - { - address = containerIp2; - prefixLength = 24; - } - ]; - }; + config = { + networking.interfaces.mv-eth1 = { + ipv4.addresses = [ + { + address = containerIp1; + prefixLength = 24; + } + ]; }; }; }; - machine2 = - { ... }: - { - virtualisation.vlans = [ 1 ]; + containers.test2 = { + autoStart = true; + macvlans = [ "eth1" ]; + + config = { + networking.interfaces.mv-eth1 = { + ipv4.addresses = [ + { + address = containerIp2; + prefixLength = 24; + } + ]; + }; + }; }; + }; - }; + machine2 = + { ... }: + { + virtualisation.vlans = [ 1 ]; + }; - testScript = '' - start_all() - machine1.wait_for_unit("default.target") - machine2.wait_for_unit("default.target") + }; - with subtest( - "Ping between containers to check that macvlans are created in bridge mode" - ): - machine1.succeed("nixos-container run test1 -- ping -n -c 1 ${containerIp2}") + testScript = '' + start_all() + machine1.wait_for_unit("default.target") + machine2.wait_for_unit("default.target") - with subtest("Ping containers from the host (machine1)"): - machine1.succeed("ping -n -c 1 ${containerIp1}") - machine1.succeed("ping -n -c 1 ${containerIp2}") + with subtest( + "Ping between containers to check that macvlans are created in bridge mode" + ): + machine1.succeed("nixos-container run test1 -- ping -n -c 1 ${containerIp2}") - with subtest( - "Ping containers from the second machine to check that containers are reachable from the outside" - ): - machine2.succeed("ping -n -c 1 ${containerIp1}") - machine2.succeed("ping -n -c 1 ${containerIp2}") - ''; - } -) + with subtest("Ping containers from the host (machine1)"): + machine1.succeed("ping -n -c 1 ${containerIp1}") + machine1.succeed("ping -n -c 1 ${containerIp2}") + + with subtest( + "Ping containers from the second machine to check that containers are reachable from the outside" + ): + machine2.succeed("ping -n -c 1 ${containerIp1}") + machine2.succeed("ping -n -c 1 ${containerIp2}") + ''; +} diff --git a/nixos/tests/containers-names.nix b/nixos/tests/containers-names.nix index 2ed047b62fe2..65f2763c50b3 100644 --- a/nixos/tests/containers-names.nix +++ b/nixos/tests/containers-names.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-names"; - meta = { - maintainers = with lib.maintainers; [ patryk27 ]; +{ pkgs, lib, ... }: +{ + name = "containers-names"; + meta = { + maintainers = with lib.maintainers; [ patryk27 ]; + }; + + nodes.machine = + { ... }: + { + # We're using the newest kernel, so that we can test containers with long names. + # Please see https://github.com/NixOS/nixpkgs/issues/38509 for details. + boot.kernelPackages = pkgs.linuxPackages_latest; + + containers = + let + container = subnet: { + autoStart = true; + privateNetwork = true; + hostAddress = "192.168.${subnet}.1"; + localAddress = "192.168.${subnet}.2"; + config = { }; + }; + + in + { + first = container "1"; + second = container "2"; + really-long-name = container "3"; + really-long-long-name-2 = container "4"; + }; }; - nodes.machine = - { ... }: - { - # We're using the newest kernel, so that we can test containers with long names. - # Please see https://github.com/NixOS/nixpkgs/issues/38509 for details. - boot.kernelPackages = pkgs.linuxPackages_latest; + testScript = '' + machine.wait_for_unit("default.target") - containers = - let - container = subnet: { - autoStart = true; - privateNetwork = true; - hostAddress = "192.168.${subnet}.1"; - localAddress = "192.168.${subnet}.2"; - config = { }; - }; - - in - { - first = container "1"; - second = container "2"; - really-long-name = container "3"; - really-long-long-name-2 = container "4"; - }; - }; - - testScript = '' - machine.wait_for_unit("default.target") - - machine.succeed("ip link show | grep ve-first") - machine.succeed("ip link show | grep ve-second") - machine.succeed("ip link show | grep ve-really-lFYWO") - machine.succeed("ip link show | grep ve-really-l3QgY") - ''; - } -) + machine.succeed("ip link show | grep ve-first") + machine.succeed("ip link show | grep ve-second") + machine.succeed("ip link show | grep ve-really-lFYWO") + machine.succeed("ip link show | grep ve-really-l3QgY") + ''; +} diff --git a/nixos/tests/containers-nested.nix b/nixos/tests/containers-nested.nix index a274b64c443e..363332dc5853 100644 --- a/nixos/tests/containers-nested.nix +++ b/nixos/tests/containers-nested.nix @@ -1,36 +1,34 @@ # Test for NixOS' container nesting. -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "nested"; +{ pkgs, ... }: +{ + name = "nested"; - meta = with pkgs.lib.maintainers; { - maintainers = [ sorki ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ sorki ]; + }; - nodes.machine = - { lib, ... }: - let - makeNested = subConf: { - containers.nested = { - autoStart = true; - privateNetwork = true; - config = subConf; - }; + nodes.machine = + { lib, ... }: + let + makeNested = subConf: { + containers.nested = { + autoStart = true; + privateNetwork = true; + config = subConf; }; - in - makeNested (makeNested { }); + }; + in + makeNested (makeNested { }); - testScript = '' - machine.start() - machine.wait_for_unit("container@nested.service") - machine.succeed("systemd-run --pty --machine=nested -- machinectl list | grep nested") - print( - machine.succeed( - "systemd-run --pty --machine=nested -- systemd-run --pty --machine=nested -- systemctl status" - ) - ) - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("container@nested.service") + machine.succeed("systemd-run --pty --machine=nested -- machinectl list | grep nested") + print( + machine.succeed( + "systemd-run --pty --machine=nested -- systemd-run --pty --machine=nested -- systemctl status" + ) + ) + ''; +} diff --git a/nixos/tests/containers-physical_interfaces.nix b/nixos/tests/containers-physical_interfaces.nix index efe7982c6762..aa96727985c5 100644 --- a/nixos/tests/containers-physical_interfaces.nix +++ b/nixos/tests/containers-physical_interfaces.nix @@ -1,153 +1,151 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-physical_interfaces"; - meta = { - maintainers = with lib.maintainers; [ kampfschlaefer ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-physical_interfaces"; + meta = { + maintainers = with lib.maintainers; [ kampfschlaefer ]; + }; - nodes = { - server = - { ... }: - { - virtualisation.vlans = [ 1 ]; + nodes = { + server = + { ... }: + { + virtualisation.vlans = [ 1 ]; - containers.server = { - privateNetwork = true; - interfaces = [ "eth1" ]; + containers.server = { + privateNetwork = true; + interfaces = [ "eth1" ]; - config = { - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "10.10.0.1"; - prefixLength = 24; - } - ]; - networking.firewall.enable = false; - }; + config = { + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "10.10.0.1"; + prefixLength = 24; + } + ]; + networking.firewall.enable = false; }; }; - bridged = - { ... }: - { - virtualisation.vlans = [ 1 ]; + }; + bridged = + { ... }: + { + virtualisation.vlans = [ 1 ]; - containers.bridged = { - privateNetwork = true; - interfaces = [ "eth1" ]; + containers.bridged = { + privateNetwork = true; + interfaces = [ "eth1" ]; - config = { - networking.bridges.br0.interfaces = [ "eth1" ]; - networking.interfaces.br0.ipv4.addresses = [ - { - address = "10.10.0.2"; - prefixLength = 24; - } - ]; - networking.firewall.enable = false; - }; + config = { + networking.bridges.br0.interfaces = [ "eth1" ]; + networking.interfaces.br0.ipv4.addresses = [ + { + address = "10.10.0.2"; + prefixLength = 24; + } + ]; + networking.firewall.enable = false; }; }; + }; - bonded = - { ... }: - { - virtualisation.vlans = [ 1 ]; + bonded = + { ... }: + { + virtualisation.vlans = [ 1 ]; - containers.bonded = { - privateNetwork = true; - interfaces = [ "eth1" ]; + containers.bonded = { + privateNetwork = true; + interfaces = [ "eth1" ]; - config = { - networking.bonds.bond0 = { - interfaces = [ "eth1" ]; - driverOptions.mode = "active-backup"; - }; - networking.interfaces.bond0.ipv4.addresses = [ - { - address = "10.10.0.3"; - prefixLength = 24; - } - ]; - networking.firewall.enable = false; + config = { + networking.bonds.bond0 = { + interfaces = [ "eth1" ]; + driverOptions.mode = "active-backup"; }; + networking.interfaces.bond0.ipv4.addresses = [ + { + address = "10.10.0.3"; + prefixLength = 24; + } + ]; + networking.firewall.enable = false; }; }; + }; - bridgedbond = - { ... }: - { - virtualisation.vlans = [ 1 ]; + bridgedbond = + { ... }: + { + virtualisation.vlans = [ 1 ]; - containers.bridgedbond = { - privateNetwork = true; - interfaces = [ "eth1" ]; + containers.bridgedbond = { + privateNetwork = true; + interfaces = [ "eth1" ]; - config = { - networking.bonds.bond0 = { - interfaces = [ "eth1" ]; - driverOptions.mode = "active-backup"; - }; - networking.bridges.br0.interfaces = [ "bond0" ]; - networking.interfaces.br0.ipv4.addresses = [ - { - address = "10.10.0.4"; - prefixLength = 24; - } - ]; - networking.firewall.enable = false; + config = { + networking.bonds.bond0 = { + interfaces = [ "eth1" ]; + driverOptions.mode = "active-backup"; }; + networking.bridges.br0.interfaces = [ "bond0" ]; + networking.interfaces.br0.ipv4.addresses = [ + { + address = "10.10.0.4"; + prefixLength = 24; + } + ]; + networking.firewall.enable = false; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("Prepare server"): - server.wait_for_unit("default.target") - server.succeed("ip link show dev eth1 >&2") + with subtest("Prepare server"): + server.wait_for_unit("default.target") + server.succeed("ip link show dev eth1 >&2") - with subtest("Simple physical interface is up"): - server.succeed("nixos-container start server") - server.wait_for_unit("container@server") - server.succeed( - "systemctl -M server list-dependencies network-addresses-eth1.service >&2" - ) + with subtest("Simple physical interface is up"): + server.succeed("nixos-container start server") + server.wait_for_unit("container@server") + server.succeed( + "systemctl -M server list-dependencies network-addresses-eth1.service >&2" + ) - # The other tests will ping this container on its ip. Here we just check - # that the device is present in the container. - server.succeed("nixos-container run server -- ip a show dev eth1 >&2") + # The other tests will ping this container on its ip. Here we just check + # that the device is present in the container. + server.succeed("nixos-container run server -- ip a show dev eth1 >&2") - with subtest("Physical device in bridge in container can ping server"): - bridged.wait_for_unit("default.target") - bridged.succeed("nixos-container start bridged") - bridged.wait_for_unit("container@bridged") - bridged.succeed( - "systemctl -M bridged list-dependencies network-addresses-br0.service >&2", - "systemctl -M bridged status -n 30 -l network-addresses-br0.service", - "nixos-container run bridged -- ping -w 10 -c 1 -n 10.10.0.1", - ) + with subtest("Physical device in bridge in container can ping server"): + bridged.wait_for_unit("default.target") + bridged.succeed("nixos-container start bridged") + bridged.wait_for_unit("container@bridged") + bridged.succeed( + "systemctl -M bridged list-dependencies network-addresses-br0.service >&2", + "systemctl -M bridged status -n 30 -l network-addresses-br0.service", + "nixos-container run bridged -- ping -w 10 -c 1 -n 10.10.0.1", + ) - with subtest("Physical device in bond in container can ping server"): - bonded.wait_for_unit("default.target") - bonded.succeed("nixos-container start bonded") - bonded.wait_for_unit("container@bonded") - bonded.succeed( - "systemctl -M bonded list-dependencies network-addresses-bond0 >&2", - "systemctl -M bonded status -n 30 -l network-addresses-bond0 >&2", - "nixos-container run bonded -- ping -w 10 -c 1 -n 10.10.0.1", - ) + with subtest("Physical device in bond in container can ping server"): + bonded.wait_for_unit("default.target") + bonded.succeed("nixos-container start bonded") + bonded.wait_for_unit("container@bonded") + bonded.succeed( + "systemctl -M bonded list-dependencies network-addresses-bond0 >&2", + "systemctl -M bonded status -n 30 -l network-addresses-bond0 >&2", + "nixos-container run bonded -- ping -w 10 -c 1 -n 10.10.0.1", + ) - with subtest("Physical device in bond in bridge in container can ping server"): - bridgedbond.wait_for_unit("default.target") - bridgedbond.succeed("nixos-container start bridgedbond") - bridgedbond.wait_for_unit("container@bridgedbond") - bridgedbond.succeed( - "systemctl -M bridgedbond list-dependencies network-addresses-br0.service >&2", - "systemctl -M bridgedbond status -n 30 -l network-addresses-br0.service", - "nixos-container run bridgedbond -- ping -w 10 -c 1 -n 10.10.0.1", - ) - ''; - } -) + with subtest("Physical device in bond in bridge in container can ping server"): + bridgedbond.wait_for_unit("default.target") + bridgedbond.succeed("nixos-container start bridgedbond") + bridgedbond.wait_for_unit("container@bridgedbond") + bridgedbond.succeed( + "systemctl -M bridgedbond list-dependencies network-addresses-br0.service >&2", + "systemctl -M bridgedbond status -n 30 -l network-addresses-br0.service", + "nixos-container run bridgedbond -- ping -w 10 -c 1 -n 10.10.0.1", + ) + ''; +} diff --git a/nixos/tests/containers-portforward.nix b/nixos/tests/containers-portforward.nix index be087dba48f0..21b618b4abd5 100644 --- a/nixos/tests/containers-portforward.nix +++ b/nixos/tests/containers-portforward.nix @@ -5,69 +5,67 @@ let containerPort = 80; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-portforward"; - meta = { - maintainers = with lib.maintainers; [ - aristid - aszlig - kampfschlaefer - ianwookim - ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-portforward"; + meta = { + maintainers = with lib.maintainers; [ + aristid + aszlig + kampfschlaefer + ianwookim + ]; + }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/installer/cd-dvd/channel.nix ]; - virtualisation.writableStore = true; + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/installer/cd-dvd/channel.nix ]; + virtualisation.writableStore = true; - containers.webserver = { - privateNetwork = true; - hostAddress = hostIp; - localAddress = containerIp; - forwardPorts = [ - { - protocol = "tcp"; - hostPort = hostPort; - containerPort = containerPort; - } - ]; - config = { - services.httpd.enable = true; - services.httpd.adminAddr = "foo@example.org"; - networking.firewall.allowedTCPPorts = [ 80 ]; - }; + containers.webserver = { + privateNetwork = true; + hostAddress = hostIp; + localAddress = containerIp; + forwardPorts = [ + { + protocol = "tcp"; + hostPort = hostPort; + containerPort = containerPort; + } + ]; + config = { + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + networking.firewall.allowedTCPPorts = [ 80 ]; }; - - virtualisation.additionalPaths = [ pkgs.stdenv ]; }; - testScript = '' - container_list = machine.succeed("nixos-container list") - assert "webserver" in container_list + virtualisation.additionalPaths = [ pkgs.stdenv ]; + }; - # Start the webserver container. - machine.succeed("nixos-container start webserver") + testScript = '' + container_list = machine.succeed("nixos-container list") + assert "webserver" in container_list - # wait two seconds for the container to start and the network to be up - machine.sleep(2) + # Start the webserver container. + machine.succeed("nixos-container start webserver") - # Since "start" returns after the container has reached - # multi-user.target, we should now be able to access it. - # ip = machine.succeed("nixos-container show-ip webserver").strip() - machine.succeed("ping -n -c1 ${hostIp}") - machine.succeed("curl --fail http://${hostIp}:${toString hostPort}/ > /dev/null") + # wait two seconds for the container to start and the network to be up + machine.sleep(2) - # Stop the container. - machine.succeed("nixos-container stop webserver") - machine.fail("curl --fail --connect-timeout 2 http://${hostIp}:${toString hostPort}/ > /dev/null") + # Since "start" returns after the container has reached + # multi-user.target, we should now be able to access it. + # ip = machine.succeed("nixos-container show-ip webserver").strip() + machine.succeed("ping -n -c1 ${hostIp}") + machine.succeed("curl --fail http://${hostIp}:${toString hostPort}/ > /dev/null") - # Destroying a declarative container should fail. - machine.fail("nixos-container destroy webserver") - ''; + # Stop the container. + machine.succeed("nixos-container stop webserver") + machine.fail("curl --fail --connect-timeout 2 http://${hostIp}:${toString hostPort}/ > /dev/null") - } -) + # Destroying a declarative container should fail. + machine.fail("nixos-container destroy webserver") + ''; + +} diff --git a/nixos/tests/containers-reloadable.nix b/nixos/tests/containers-reloadable.nix index 74c6003da659..9854db830550 100644 --- a/nixos/tests/containers-reloadable.nix +++ b/nixos/tests/containers-reloadable.nix @@ -1,61 +1,59 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-reloadable"; - meta = { - maintainers = with lib.maintainers; [ danbst ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-reloadable"; + meta = { + maintainers = with lib.maintainers; [ danbst ]; + }; - nodes = { - machine = - { lib, ... }: - { - containers.test1 = { - autoStart = true; - config.environment.etc.check.text = "client_base"; - }; + nodes = { + machine = + { lib, ... }: + { + containers.test1 = { + autoStart = true; + config.environment.etc.check.text = "client_base"; + }; - # prevent make-test-python.nix to change IP - networking.interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [ ]; + # prevent make-test-python.nix to change IP + networking.interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [ ]; - specialisation.c1.configuration = { - containers.test1.config = { - environment.etc.check.text = lib.mkForce "client_c1"; - services.httpd.enable = true; - services.httpd.adminAddr = "nixos@example.com"; - }; - }; - - specialisation.c2.configuration = { - containers.test1.config = { - environment.etc.check.text = lib.mkForce "client_c2"; - services.nginx.enable = true; - }; + specialisation.c1.configuration = { + containers.test1.config = { + environment.etc.check.text = lib.mkForce "client_c1"; + services.httpd.enable = true; + services.httpd.adminAddr = "nixos@example.com"; }; }; - }; - testScript = '' - machine.start() - machine.wait_for_unit("default.target") + specialisation.c2.configuration = { + containers.test1.config = { + environment.etc.check.text = lib.mkForce "client_c2"; + services.nginx.enable = true; + }; + }; + }; + }; - assert "client_base" in machine.succeed("nixos-container run test1 cat /etc/check") + testScript = '' + machine.start() + machine.wait_for_unit("default.target") - with subtest("httpd is available after activating config1"): - machine.succeed( - "/run/booted-system/specialisation/c1/bin/switch-to-configuration test >&2", - "[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2", - "systemctl status httpd -M test1 >&2", - ) + assert "client_base" in machine.succeed("nixos-container run test1 cat /etc/check") - with subtest("httpd is not available any longer after switching to config2"): - machine.succeed( - "/run/booted-system/specialisation/c2/bin/switch-to-configuration test >&2", - "[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2", - "systemctl status nginx -M test1 >&2", - ) - machine.fail("systemctl status httpd -M test1 >&2") - ''; + with subtest("httpd is available after activating config1"): + machine.succeed( + "/run/booted-system/specialisation/c1/bin/switch-to-configuration test >&2", + "[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2", + "systemctl status httpd -M test1 >&2", + ) - } -) + with subtest("httpd is not available any longer after switching to config2"): + machine.succeed( + "/run/booted-system/specialisation/c2/bin/switch-to-configuration test >&2", + "[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2", + "systemctl status nginx -M test1 >&2", + ) + machine.fail("systemctl status httpd -M test1 >&2") + ''; + +} diff --git a/nixos/tests/containers-require-bind-mounts.nix b/nixos/tests/containers-require-bind-mounts.nix index b5a861beacc9..eb65a619c7e9 100644 --- a/nixos/tests/containers-require-bind-mounts.nix +++ b/nixos/tests/containers-require-bind-mounts.nix @@ -1,40 +1,38 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "containers-require-bind-mounts"; - meta.maintainers = with lib.maintainers; [ kira-bruneau ]; +{ lib, ... }: +{ + name = "containers-require-bind-mounts"; + meta.maintainers = with lib.maintainers; [ kira-bruneau ]; - nodes.machine = { - containers.require-bind-mounts = { - bindMounts = { - "/srv/data" = { }; - }; - config = { }; - }; - - virtualisation.fileSystems = { - "/srv/data" = { - fsType = "tmpfs"; - options = [ "noauto" ]; - }; + nodes.machine = { + containers.require-bind-mounts = { + bindMounts = { + "/srv/data" = { }; }; + config = { }; }; - testScript = '' - machine.wait_for_unit("default.target") + virtualisation.fileSystems = { + "/srv/data" = { + fsType = "tmpfs"; + options = [ "noauto" ]; + }; + }; + }; - assert "require-bind-mounts" in machine.succeed("nixos-container list") + testScript = '' + machine.wait_for_unit("default.target") + + assert "require-bind-mounts" in machine.succeed("nixos-container list") + assert "down" in machine.succeed("nixos-container status require-bind-mounts") + assert "inactive" in machine.fail("systemctl is-active srv-data.mount") + + with subtest("bind mount host paths must be mounted to run container"): + machine.succeed("nixos-container start require-bind-mounts") + assert "up" in machine.succeed("nixos-container status require-bind-mounts") + assert "active" in machine.succeed("systemctl status srv-data.mount") + + machine.succeed("systemctl stop srv-data.mount") assert "down" in machine.succeed("nixos-container status require-bind-mounts") assert "inactive" in machine.fail("systemctl is-active srv-data.mount") - - with subtest("bind mount host paths must be mounted to run container"): - machine.succeed("nixos-container start require-bind-mounts") - assert "up" in machine.succeed("nixos-container status require-bind-mounts") - assert "active" in machine.succeed("systemctl status srv-data.mount") - - machine.succeed("systemctl stop srv-data.mount") - assert "down" in machine.succeed("nixos-container status require-bind-mounts") - assert "inactive" in machine.fail("systemctl is-active srv-data.mount") - ''; - } -) + ''; +} diff --git a/nixos/tests/containers-restart_networking.nix b/nixos/tests/containers-restart_networking.nix index d0d77031ea4b..8ce524a3e91c 100644 --- a/nixos/tests/containers-restart_networking.nix +++ b/nixos/tests/containers-restart_networking.nix @@ -1,131 +1,129 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-restart_networking"; - meta = { - maintainers = with lib.maintainers; [ kampfschlaefer ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-restart_networking"; + meta = { + maintainers = with lib.maintainers; [ kampfschlaefer ]; + }; - nodes = { - client = { - virtualisation.vlans = [ 1 ]; + nodes = { + client = { + virtualisation.vlans = [ 1 ]; - networking.firewall.enable = false; + networking.firewall.enable = false; - containers.webserver = { - autoStart = true; - privateNetwork = true; - hostBridge = "br0"; - config = { - networking.firewall.enable = false; - networking.interfaces.eth0.ipv4.addresses = [ - { - address = "192.168.1.122"; - prefixLength = 24; - } - ]; - }; + containers.webserver = { + autoStart = true; + privateNetwork = true; + hostBridge = "br0"; + config = { + networking.firewall.enable = false; + networking.interfaces.eth0.ipv4.addresses = [ + { + address = "192.168.1.122"; + prefixLength = 24; + } + ]; }; + }; + networking.bridges.br0 = { + interfaces = [ ]; + rstp = false; + }; + + networking.interfaces.br0.ipv4.addresses = [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; + + networking.interfaces.eth1 = { + ipv4.addresses = lib.mkForce [ ]; + ipv6.addresses = lib.mkForce [ ]; + }; + + specialisation.eth1.configuration = { + networking.bridges.br0.interfaces = [ "eth1" ]; + networking.interfaces = { + eth1.ipv4.addresses = lib.mkForce [ ]; + eth1.ipv6.addresses = lib.mkForce [ ]; + br0.ipv4.addresses = [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; + }; + }; + + specialisation.eth1-rstp.configuration = { networking.bridges.br0 = { - interfaces = [ ]; - rstp = false; + interfaces = [ "eth1" ]; + rstp = lib.mkForce true; }; - networking.interfaces.br0.ipv4.addresses = [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; - - networking.interfaces.eth1 = { - ipv4.addresses = lib.mkForce [ ]; - ipv6.addresses = lib.mkForce [ ]; - }; - - specialisation.eth1.configuration = { - networking.bridges.br0.interfaces = [ "eth1" ]; - networking.interfaces = { - eth1.ipv4.addresses = lib.mkForce [ ]; - eth1.ipv6.addresses = lib.mkForce [ ]; - br0.ipv4.addresses = [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; - }; - - specialisation.eth1-rstp.configuration = { - networking.bridges.br0 = { - interfaces = [ "eth1" ]; - rstp = lib.mkForce true; - }; - - networking.interfaces = { - eth1.ipv4.addresses = lib.mkForce [ ]; - eth1.ipv6.addresses = lib.mkForce [ ]; - br0.ipv4.addresses = [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; + networking.interfaces = { + eth1.ipv4.addresses = lib.mkForce [ ]; + eth1.ipv6.addresses = lib.mkForce [ ]; + br0.ipv4.addresses = [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; }; }; }; + }; - testScript = '' - client.start() + testScript = '' + client.start() - client.wait_for_unit("default.target") + client.wait_for_unit("default.target") - with subtest("Initial configuration connectivity check"): - client.succeed("ping 192.168.1.122 -c 1 -n >&2") - client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2") + with subtest("Initial configuration connectivity check"): + client.succeed("ping 192.168.1.122 -c 1 -n >&2") + client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2") - client.fail("ip l show eth1 |grep 'master br0' >&2") - client.fail("grep eth1 /run/br0.interfaces >&2") + client.fail("ip l show eth1 |grep 'master br0' >&2") + client.fail("grep eth1 /run/br0.interfaces >&2") - with subtest("Bridged configuration without STP preserves connectivity"): - client.succeed( - "/run/booted-system/specialisation/eth1/bin/switch-to-configuration test >&2" - ) + with subtest("Bridged configuration without STP preserves connectivity"): + client.succeed( + "/run/booted-system/specialisation/eth1/bin/switch-to-configuration test >&2" + ) - client.succeed( - "ping 192.168.1.122 -c 1 -n >&2", - "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2", - "ip l show eth1 |grep 'master br0' >&2", - "grep eth1 /run/br0.interfaces >&2", - ) + client.succeed( + "ping 192.168.1.122 -c 1 -n >&2", + "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2", + "ip l show eth1 |grep 'master br0' >&2", + "grep eth1 /run/br0.interfaces >&2", + ) - # activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity - # with subtest("Bridged configuration with STP"): - # client.succeed("/run/booted-system/specialisation/eth1-rstp/bin/switch-to-configuration test >&2") - # client.execute("ip -4 a >&2") - # client.execute("ip l >&2") - # - # client.succeed( - # "ping 192.168.1.122 -c 1 -n >&2", - # "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2", - # "ip l show eth1 |grep 'master br0' >&2", - # "grep eth1 /run/br0.interfaces >&2", - # ) + # activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity + # with subtest("Bridged configuration with STP"): + # client.succeed("/run/booted-system/specialisation/eth1-rstp/bin/switch-to-configuration test >&2") + # client.execute("ip -4 a >&2") + # client.execute("ip l >&2") + # + # client.succeed( + # "ping 192.168.1.122 -c 1 -n >&2", + # "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2", + # "ip l show eth1 |grep 'master br0' >&2", + # "grep eth1 /run/br0.interfaces >&2", + # ) - with subtest("Reverting to initial configuration preserves connectivity"): - client.succeed( - "/run/booted-system/bin/switch-to-configuration test >&2" - ) + with subtest("Reverting to initial configuration preserves connectivity"): + client.succeed( + "/run/booted-system/bin/switch-to-configuration test >&2" + ) - client.succeed("ping 192.168.1.122 -c 1 -n >&2") - client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2") + client.succeed("ping 192.168.1.122 -c 1 -n >&2") + client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2") - client.fail("ip l show eth1 |grep 'master br0' >&2") - client.fail("grep eth1 /run/br0.interfaces >&2") - ''; + client.fail("ip l show eth1 |grep 'master br0' >&2") + client.fail("grep eth1 /run/br0.interfaces >&2") + ''; - } -) +} diff --git a/nixos/tests/containers-tmpfs.nix b/nixos/tests/containers-tmpfs.nix index 668417df271e..2ac1795cca06 100644 --- a/nixos/tests/containers-tmpfs.nix +++ b/nixos/tests/containers-tmpfs.nix @@ -1,93 +1,91 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-tmpfs"; - meta = { - maintainers = with lib.maintainers; [ patryk27 ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-tmpfs"; + meta = { + maintainers = with lib.maintainers; [ patryk27 ]; + }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/installer/cd-dvd/channel.nix ]; - virtualisation.writableStore = true; + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/installer/cd-dvd/channel.nix ]; + virtualisation.writableStore = true; - containers.tmpfs = { - autoStart = true; - tmpfs = [ - # Mount var as a tmpfs - "/var" + containers.tmpfs = { + autoStart = true; + tmpfs = [ + # Mount var as a tmpfs + "/var" - # Add a nested mount inside a tmpfs - "/var/log" + # Add a nested mount inside a tmpfs + "/var/log" - # Add a tmpfs on a path that does not exist - "/some/random/path" - ]; - config = { }; - }; - - virtualisation.additionalPaths = [ pkgs.stdenv ]; + # Add a tmpfs on a path that does not exist + "/some/random/path" + ]; + config = { }; }; - testScript = '' - machine.wait_for_unit("default.target") - assert "tmpfs" in machine.succeed("nixos-container list") + virtualisation.additionalPaths = [ pkgs.stdenv ]; + }; - with subtest("tmpfs container is up"): - assert "up" in machine.succeed("nixos-container status tmpfs") + testScript = '' + machine.wait_for_unit("default.target") + assert "tmpfs" in machine.succeed("nixos-container list") + + with subtest("tmpfs container is up"): + assert "up" in machine.succeed("nixos-container status tmpfs") - def tmpfs_cmd(command): - return f"nixos-container run tmpfs -- {command} 2>/dev/null" + def tmpfs_cmd(command): + return f"nixos-container run tmpfs -- {command} 2>/dev/null" - with subtest("/var is mounted as a tmpfs"): - machine.succeed(tmpfs_cmd("mountpoint -q /var")) + with subtest("/var is mounted as a tmpfs"): + machine.succeed(tmpfs_cmd("mountpoint -q /var")) - with subtest("/var/log is mounted as a tmpfs"): - assert "What: tmpfs" in machine.succeed( - tmpfs_cmd("systemctl status var-log.mount --no-pager") - ) - machine.succeed(tmpfs_cmd("mountpoint -q /var/log")) + with subtest("/var/log is mounted as a tmpfs"): + assert "What: tmpfs" in machine.succeed( + tmpfs_cmd("systemctl status var-log.mount --no-pager") + ) + machine.succeed(tmpfs_cmd("mountpoint -q /var/log")) - with subtest("/some/random/path is mounted as a tmpfs"): - assert "What: tmpfs" in machine.succeed( - tmpfs_cmd("systemctl status some-random-path.mount --no-pager") - ) - machine.succeed(tmpfs_cmd("mountpoint -q /some/random/path")) + with subtest("/some/random/path is mounted as a tmpfs"): + assert "What: tmpfs" in machine.succeed( + tmpfs_cmd("systemctl status some-random-path.mount --no-pager") + ) + machine.succeed(tmpfs_cmd("mountpoint -q /some/random/path")) - with subtest( - "files created in the container in a non-tmpfs directory are visible on the host." - ): - # This establishes legitimacy for the following tests - machine.succeed( - tmpfs_cmd("touch /root/test.file"), - tmpfs_cmd("ls -l /root | grep -q test.file"), - "test -e /var/lib/nixos-containers/tmpfs/root/test.file", - ) + with subtest( + "files created in the container in a non-tmpfs directory are visible on the host." + ): + # This establishes legitimacy for the following tests + machine.succeed( + tmpfs_cmd("touch /root/test.file"), + tmpfs_cmd("ls -l /root | grep -q test.file"), + "test -e /var/lib/nixos-containers/tmpfs/root/test.file", + ) - with subtest( - "/some/random/path is writable and that files created there are not " - + "in the hosts container dir but in the tmpfs" - ): - machine.succeed( - tmpfs_cmd("touch /some/random/path/test.file"), - tmpfs_cmd("test -e /some/random/path/test.file"), - ) - machine.fail("test -e /var/lib/nixos-containers/tmpfs/some/random/path/test.file") + with subtest( + "/some/random/path is writable and that files created there are not " + + "in the hosts container dir but in the tmpfs" + ): + machine.succeed( + tmpfs_cmd("touch /some/random/path/test.file"), + tmpfs_cmd("test -e /some/random/path/test.file"), + ) + machine.fail("test -e /var/lib/nixos-containers/tmpfs/some/random/path/test.file") - with subtest( - "files created in the hosts container dir in a path where a tmpfs " - + "file system has been mounted are not visible to the container as " - + "the do not exist in the tmpfs" - ): - machine.succeed( - "touch /var/lib/nixos-containers/tmpfs/var/test.file", - "test -e /var/lib/nixos-containers/tmpfs/var/test.file", - "ls -l /var/lib/nixos-containers/tmpfs/var/ | grep -q test.file 2>/dev/null", - ) - machine.fail(tmpfs_cmd("ls -l /var | grep -q test.file")) - ''; - } -) + with subtest( + "files created in the hosts container dir in a path where a tmpfs " + + "file system has been mounted are not visible to the container as " + + "the do not exist in the tmpfs" + ): + machine.succeed( + "touch /var/lib/nixos-containers/tmpfs/var/test.file", + "test -e /var/lib/nixos-containers/tmpfs/var/test.file", + "ls -l /var/lib/nixos-containers/tmpfs/var/ | grep -q test.file 2>/dev/null", + ) + machine.fail(tmpfs_cmd("ls -l /var | grep -q test.file")) + ''; +} diff --git a/nixos/tests/containers-unified-hierarchy.nix b/nixos/tests/containers-unified-hierarchy.nix index 5ea89d96dcfe..417f68d0d31d 100644 --- a/nixos/tests/containers-unified-hierarchy.nix +++ b/nixos/tests/containers-unified-hierarchy.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "containers-unified-hierarchy"; - meta = { - maintainers = with lib.maintainers; [ farnoy ]; - }; +{ pkgs, lib, ... }: +{ + name = "containers-unified-hierarchy"; + meta = { + maintainers = with lib.maintainers; [ farnoy ]; + }; - nodes.machine = - { ... }: - { - containers = { - test-container = { - autoStart = true; - config = { }; - }; + nodes.machine = + { ... }: + { + containers = { + test-container = { + autoStart = true; + config = { }; }; }; + }; - testScript = '' - machine.wait_for_unit("default.target") + testScript = '' + machine.wait_for_unit("default.target") - machine.succeed("echo 'stat -fc %T /sys/fs/cgroup/ | grep cgroup2fs' | nixos-container root-login test-container") - ''; - } -) + machine.succeed("echo 'stat -fc %T /sys/fs/cgroup/ | grep cgroup2fs' | nixos-container root-login test-container") + ''; +} diff --git a/nixos/tests/convos.nix b/nixos/tests/convos.nix index f92ab49a2988..fd0588290950 100644 --- a/nixos/tests/convos.nix +++ b/nixos/tests/convos.nix @@ -1,28 +1,26 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - let - port = 3333; - in - { - name = "convos"; - meta.maintainers = with lib.maintainers; [ sgo ]; +let + port = 3333; +in +{ + name = "convos"; + meta.maintainers = with lib.maintainers; [ sgo ]; - nodes = { - machine = - { pkgs, ... }: - { - services.convos = { - enable = true; - listenPort = port; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.convos = { + enable = true; + listenPort = port; }; - }; + }; + }; - testScript = '' - machine.wait_for_unit("convos") - machine.wait_for_open_port(${toString port}) - machine.succeed("curl -f http://localhost:${toString port}/") - ''; - } -) + testScript = '' + machine.wait_for_unit("convos") + machine.wait_for_open_port(${toString port}) + machine.succeed("curl -f http://localhost:${toString port}/") + ''; +} diff --git a/nixos/tests/coturn.nix b/nixos/tests/coturn.nix index e090b45d4af6..8366ceb0045e 100644 --- a/nixos/tests/coturn.nix +++ b/nixos/tests/coturn.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "coturn"; - nodes = { - default = { - services.coturn.enable = true; - }; - secretsfile = { - boot.postBootCommands = '' - echo "some-very-secret-string" > /run/coturn-secret - ''; - services.coturn = { - enable = true; - static-auth-secret-file = "/run/coturn-secret"; - }; +{ pkgs, ... }: +{ + name = "coturn"; + nodes = { + default = { + services.coturn.enable = true; + }; + secretsfile = { + boot.postBootCommands = '' + echo "some-very-secret-string" > /run/coturn-secret + ''; + services.coturn = { + enable = true; + static-auth-secret-file = "/run/coturn-secret"; }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("by default works without configuration"): - default.wait_for_unit("coturn.service") + with subtest("by default works without configuration"): + default.wait_for_unit("coturn.service") - with subtest("works with static-auth-secret-file"): - secretsfile.wait_for_unit("coturn.service") - secretsfile.wait_for_open_port(3478) - secretsfile.succeed("grep 'some-very-secret-string' /run/coturn/turnserver.cfg") - # Forbidden IP, fails: - secretsfile.fail("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 127.0.0.1 -DgX -e 127.0.0.1 -n 1 -c -y") - # allowed-peer-ip, should succeed: - secretsfile.succeed("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 192.168.1.2 -DgX -e 192.168.1.2 -n 1 -c -y") + with subtest("works with static-auth-secret-file"): + secretsfile.wait_for_unit("coturn.service") + secretsfile.wait_for_open_port(3478) + secretsfile.succeed("grep 'some-very-secret-string' /run/coturn/turnserver.cfg") + # Forbidden IP, fails: + secretsfile.fail("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 127.0.0.1 -DgX -e 127.0.0.1 -n 1 -c -y") + # allowed-peer-ip, should succeed: + secretsfile.succeed("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 192.168.1.2 -DgX -e 192.168.1.2 -n 1 -c -y") - default.log(default.execute("systemd-analyze security coturn.service | grep -v '✓'")[1]) - ''; - } -) + default.log(default.execute("systemd-analyze security coturn.service | grep -v '✓'")[1]) + ''; +} diff --git a/nixos/tests/couchdb.nix b/nixos/tests/couchdb.nix index a3945915f98b..d11176bdd311 100644 --- a/nixos/tests/couchdb.nix +++ b/nixos/tests/couchdb.nix @@ -14,51 +14,49 @@ let testpass = "cowabunga"; testlogin = "${testuser}:${testpass}@"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "couchdb"; - meta.maintainers = [ ]; +{ pkgs, lib, ... }: +{ + name = "couchdb"; + meta.maintainers = [ ]; - nodes = { - couchdb3 = makeNode pkgs.couchdb3 testuser testpass; - }; + nodes = { + couchdb3 = makeNode pkgs.couchdb3 testuser testpass; + }; - testScript = - let - curlJqCheck = - login: action: path: jqexpr: result: - pkgs.writeScript "curl-jq-check-${action}-${path}.sh" '' - RESULT=$(curl -X ${action} http://${login}127.0.0.1:5984/${path} | jq -r '${jqexpr}') - echo $RESULT >&2 - if [ "$RESULT" != "${result}" ]; then - exit 1 - fi - ''; - in - '' - start_all() + testScript = + let + curlJqCheck = + login: action: path: jqexpr: result: + pkgs.writeScript "curl-jq-check-${action}-${path}.sh" '' + RESULT=$(curl -X ${action} http://${login}127.0.0.1:5984/${path} | jq -r '${jqexpr}') + echo $RESULT >&2 + if [ "$RESULT" != "${result}" ]; then + exit 1 + fi + ''; + in + '' + start_all() - couchdb3.wait_for_unit("couchdb.service") - couchdb3.wait_until_succeeds( - "${curlJqCheck testlogin "GET" "" ".couchdb" "Welcome"}" - ) - couchdb3.wait_until_succeeds( - "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}" - ) - couchdb3.succeed("${curlJqCheck testlogin "PUT" "foo" ".ok" "true"}") - couchdb3.succeed( - "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "1"}" - ) - couchdb3.succeed( - "${curlJqCheck testlogin "DELETE" "foo" ".ok" "true"}" - ) - couchdb3.succeed( - "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}" - ) - couchdb3.succeed( - "${curlJqCheck testlogin "GET" "_node/couchdb@127.0.0.1" ".couchdb" "Welcome"}" - ) - ''; - } -) + couchdb3.wait_for_unit("couchdb.service") + couchdb3.wait_until_succeeds( + "${curlJqCheck testlogin "GET" "" ".couchdb" "Welcome"}" + ) + couchdb3.wait_until_succeeds( + "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}" + ) + couchdb3.succeed("${curlJqCheck testlogin "PUT" "foo" ".ok" "true"}") + couchdb3.succeed( + "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "1"}" + ) + couchdb3.succeed( + "${curlJqCheck testlogin "DELETE" "foo" ".ok" "true"}" + ) + couchdb3.succeed( + "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}" + ) + couchdb3.succeed( + "${curlJqCheck testlogin "GET" "_node/couchdb@127.0.0.1" ".couchdb" "Welcome"}" + ) + ''; +} diff --git a/nixos/tests/crabfit.nix b/nixos/tests/crabfit.nix index eb38a0ae0cfc..e22615c5a049 100644 --- a/nixos/tests/crabfit.nix +++ b/nixos/tests/crabfit.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "crabfit"; +{ + name = "crabfit"; - meta.maintainers = [ ]; + meta.maintainers = [ ]; - nodes = { - machine = - { pkgs, ... }: - { - services.crabfit = { - enable = true; + nodes = { + machine = + { pkgs, ... }: + { + services.crabfit = { + enable = true; - frontend.host = "http://127.0.0.1:3001"; - api.host = "127.0.0.1:3000"; - }; + frontend.host = "http://127.0.0.1:3001"; + api.host = "127.0.0.1:3000"; }; - }; + }; + }; - # TODO: Add a reverse proxy and a dns entry for testing - testScript = '' - machine.wait_for_unit("crabfit-api") - machine.wait_for_unit("crabfit-frontend") + # TODO: Add a reverse proxy and a dns entry for testing + testScript = '' + machine.wait_for_unit("crabfit-api") + machine.wait_for_unit("crabfit-frontend") - machine.wait_for_open_port(3000) - machine.wait_for_open_port(3001) + machine.wait_for_open_port(3000) + machine.wait_for_open_port(3001) - machine.succeed("curl -f http://localhost:3001/") - ''; - } -) + machine.succeed("curl -f http://localhost:3001/") + ''; +} diff --git a/nixos/tests/croc.nix b/nixos/tests/croc.nix index 4b6d3f4d85a7..296d12c4ebe6 100644 --- a/nixos/tests/croc.nix +++ b/nixos/tests/croc.nix @@ -1,59 +1,57 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.croc ]; - }; - pass = "PassRelay"; - in - { - name = "croc"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - equirosa - SuperSandro2000 - ]; +{ pkgs, ... }: +let + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.croc ]; }; + pass = "PassRelay"; +in +{ + name = "croc"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + equirosa + SuperSandro2000 + ]; + }; - nodes = { - relay = { - services.croc = { - enable = true; - pass = pass; - openFirewall = true; - }; + nodes = { + relay = { + services.croc = { + enable = true; + pass = pass; + openFirewall = true; }; - sender = client; - receiver = client; }; + sender = client; + receiver = client; + }; - testScript = '' - start_all() + testScript = '' + start_all() - # wait until relay is up - relay.wait_for_unit("croc") - relay.wait_for_open_port(9009) - relay.wait_for_open_port(9010) - relay.wait_for_open_port(9011) - relay.wait_for_open_port(9012) - relay.wait_for_open_port(9013) + # wait until relay is up + relay.wait_for_unit("croc") + relay.wait_for_open_port(9009) + relay.wait_for_open_port(9010) + relay.wait_for_open_port(9011) + relay.wait_for_open_port(9012) + relay.wait_for_open_port(9013) - # generate testfiles and send them - sender.wait_for_unit("multi-user.target") - sender.execute("echo Hello World > testfile01.txt") - sender.execute("echo Hello Earth > testfile02.txt") - sender.execute( - "env CROC_SECRET=topSecret croc --pass ${pass} --relay relay send testfile01.txt testfile02.txt >&2 &" - ) + # generate testfiles and send them + sender.wait_for_unit("multi-user.target") + sender.execute("echo Hello World > testfile01.txt") + sender.execute("echo Hello Earth > testfile02.txt") + sender.execute( + "env CROC_SECRET=topSecret croc --pass ${pass} --relay relay send testfile01.txt testfile02.txt >&2 &" + ) - # receive the testfiles and check them - receiver.succeed( - "env CROC_SECRET=topSecret croc --pass ${pass} --yes --relay relay" - ) - assert "Hello World" in receiver.succeed("cat testfile01.txt") - assert "Hello Earth" in receiver.succeed("cat testfile02.txt") - ''; - } -) + # receive the testfiles and check them + receiver.succeed( + "env CROC_SECRET=topSecret croc --pass ${pass} --yes --relay relay" + ) + assert "Hello World" in receiver.succeed("cat testfile01.txt") + assert "Hello Earth" in receiver.succeed("cat testfile02.txt") + ''; +} diff --git a/nixos/tests/curl-impersonate.nix b/nixos/tests/curl-impersonate.nix index 28e741a2e19f..866bd78ce163 100644 --- a/nixos/tests/curl-impersonate.nix +++ b/nixos/tests/curl-impersonate.nix @@ -24,182 +24,180 @@ uses upstream for its tests. */ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - # Update with domains in TestImpersonate.TEST_URLS if needed from: - # https://github.com/lwthiker/curl-impersonate/blob/main/tests/test_impersonate.py - domains = [ - "www.wikimedia.org" - "www.wikipedia.org" - "www.mozilla.org" - "www.apache.org" - "www.kernel.org" - "git-scm.com" - ]; +{ pkgs, lib, ... }: +let + # Update with domains in TestImpersonate.TEST_URLS if needed from: + # https://github.com/lwthiker/curl-impersonate/blob/main/tests/test_impersonate.py + domains = [ + "www.wikimedia.org" + "www.wikipedia.org" + "www.mozilla.org" + "www.apache.org" + "www.kernel.org" + "git-scm.com" + ]; - tls-certs = - let - # Configure CA with X.509 v3 extensions that would be trusted by curl - ca-cert-conf = pkgs.writeText "curl-impersonate-ca.cnf" '' - basicConstraints = critical, CA:TRUE - subjectKeyIdentifier = hash - authorityKeyIdentifier = keyid:always, issuer:always - keyUsage = critical, cRLSign, digitalSignature, keyCertSign - ''; - - # Configure leaf certificate with X.509 v3 extensions that would be trusted - # by curl and set subject-alternative names for test domains - tls-cert-conf = pkgs.writeText "curl-impersonate-tls.cnf" '' - basicConstraints = critical, CA:FALSE - subjectKeyIdentifier = hash - authorityKeyIdentifier = keyid:always, issuer:always - keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment, keyAgreement - extendedKeyUsage = critical, serverAuth - subjectAltName = @alt_names - - [alt_names] - ${lib.concatStringsSep "\n" (lib.imap0 (idx: domain: "DNS.${toString idx} = ${domain}") domains)} - ''; - in - pkgs.runCommand "curl-impersonate-test-certs" - { - nativeBuildInputs = [ pkgs.openssl ]; - } - '' - # create CA certificate and key - openssl req -newkey rsa:4096 -keyout ca-key.pem -out ca-csr.pem -nodes -subj '/CN=curl-impersonate-ca.nixos.test' - openssl x509 -req -sha512 -in ca-csr.pem -key ca-key.pem -out ca.pem -extfile ${ca-cert-conf} -days 36500 - openssl x509 -in ca.pem -text - - # create server certificate and key - openssl req -newkey rsa:4096 -keyout key.pem -out csr.pem -nodes -subj '/CN=curl-impersonate.nixos.test' - openssl x509 -req -sha512 -in csr.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile ${tls-cert-conf} -days 36500 - openssl x509 -in cert.pem -text - - # output CA cert and server cert and key - mkdir -p $out - cp key.pem cert.pem ca.pem $out - ''; - - # Test script - curl-impersonate-test = - let - # Build miniature libcurl client used by test driver - minicurl = - pkgs.runCommandCC "minicurl" - { - buildInputs = [ pkgs.curl ]; - } - '' - mkdir -p $out/bin - $CC -Wall -Werror -o $out/bin/minicurl ${pkgs.curl-impersonate.src}/tests/minicurl.c `curl-config --libs` - ''; - in - pkgs.writeShellScript "curl-impersonate-test" '' - set -euxo pipefail - - # Test driver requirements - export PATH="${ - with pkgs; - lib.makeBinPath [ - bash - coreutils - python3Packages.pytest - nghttp2 - tcpdump - ] - }" - export PYTHONPATH="${ - with pkgs.python3Packages; - makePythonPath [ - pyyaml - pytest-asyncio - dpkt - ts1-signatures - ] - }" - - # Prepare test root prefix - mkdir -p usr/{bin,lib} - cp -rs ${pkgs.curl-impersonate}/* ${minicurl}/* usr/ - - cp -r ${pkgs.curl-impersonate.src}/tests ./ - - # Run tests - cd tests - pytest . --install-dir ../usr --capture-interface eth1 --exitfirst -k 'not test_http2_headers' + tls-certs = + let + # Configure CA with X.509 v3 extensions that would be trusted by curl + ca-cert-conf = pkgs.writeText "curl-impersonate-ca.cnf" '' + basicConstraints = critical, CA:TRUE + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always, issuer:always + keyUsage = critical, cRLSign, digitalSignature, keyCertSign ''; - in - { - name = "curl-impersonate"; - meta = with lib.maintainers; { - maintainers = [ ]; - }; + # Configure leaf certificate with X.509 v3 extensions that would be trusted + # by curl and set subject-alternative names for test domains + tls-cert-conf = pkgs.writeText "curl-impersonate-tls.cnf" '' + basicConstraints = critical, CA:FALSE + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always, issuer:always + keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment, keyAgreement + extendedKeyUsage = critical, serverAuth + subjectAltName = @alt_names - nodes = { - web = - { - nodes, - pkgs, - lib, - config, - ... - }: - { - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; + [alt_names] + ${lib.concatStringsSep "\n" (lib.imap0 (idx: domain: "DNS.${toString idx} = ${domain}") domains)} + ''; + in + pkgs.runCommand "curl-impersonate-test-certs" + { + nativeBuildInputs = [ pkgs.openssl ]; + } + '' + # create CA certificate and key + openssl req -newkey rsa:4096 -keyout ca-key.pem -out ca-csr.pem -nodes -subj '/CN=curl-impersonate-ca.nixos.test' + openssl x509 -req -sha512 -in ca-csr.pem -key ca-key.pem -out ca.pem -extfile ${ca-cert-conf} -days 36500 + openssl x509 -in ca.pem -text - services = { - nginx = { - enable = true; - virtualHosts."curl-impersonate.nixos.test" = { - default = true; - addSSL = true; - sslCertificate = "${tls-certs}/cert.pem"; - sslCertificateKey = "${tls-certs}/key.pem"; - }; + # create server certificate and key + openssl req -newkey rsa:4096 -keyout key.pem -out csr.pem -nodes -subj '/CN=curl-impersonate.nixos.test' + openssl x509 -req -sha512 -in csr.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile ${tls-cert-conf} -days 36500 + openssl x509 -in cert.pem -text + + # output CA cert and server cert and key + mkdir -p $out + cp key.pem cert.pem ca.pem $out + ''; + + # Test script + curl-impersonate-test = + let + # Build miniature libcurl client used by test driver + minicurl = + pkgs.runCommandCC "minicurl" + { + buildInputs = [ pkgs.curl ]; + } + '' + mkdir -p $out/bin + $CC -Wall -Werror -o $out/bin/minicurl ${pkgs.curl-impersonate.src}/tests/minicurl.c `curl-config --libs` + ''; + in + pkgs.writeShellScript "curl-impersonate-test" '' + set -euxo pipefail + + # Test driver requirements + export PATH="${ + with pkgs; + lib.makeBinPath [ + bash + coreutils + python3Packages.pytest + nghttp2 + tcpdump + ] + }" + export PYTHONPATH="${ + with pkgs.python3Packages; + makePythonPath [ + pyyaml + pytest-asyncio + dpkt + ts1-signatures + ] + }" + + # Prepare test root prefix + mkdir -p usr/{bin,lib} + cp -rs ${pkgs.curl-impersonate}/* ${minicurl}/* usr/ + + cp -r ${pkgs.curl-impersonate.src}/tests ./ + + # Run tests + cd tests + pytest . --install-dir ../usr --capture-interface eth1 --exitfirst -k 'not test_http2_headers' + ''; +in +{ + name = "curl-impersonate"; + + meta = with lib.maintainers; { + maintainers = [ ]; + }; + + nodes = { + web = + { + nodes, + pkgs, + lib, + config, + ... + }: + { + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; + + services = { + nginx = { + enable = true; + virtualHosts."curl-impersonate.nixos.test" = { + default = true; + addSSL = true; + sslCertificate = "${tls-certs}/cert.pem"; + sslCertificateKey = "${tls-certs}/key.pem"; }; }; }; + }; - curl = - { - nodes, - pkgs, - lib, - config, - ... - }: - { - networking.extraHosts = lib.concatStringsSep "\n" ( - map (domain: "${nodes.web.networking.primaryIPAddress} ${domain}") domains - ); + curl = + { + nodes, + pkgs, + lib, + config, + ... + }: + { + networking.extraHosts = lib.concatStringsSep "\n" ( + map (domain: "${nodes.web.networking.primaryIPAddress} ${domain}") domains + ); - security.pki.certificateFiles = [ "${tls-certs}/ca.pem" ]; - }; - }; + security.pki.certificateFiles = [ "${tls-certs}/ca.pem" ]; + }; + }; - testScript = - { nodes, ... }: - '' - start_all() + testScript = + { nodes, ... }: + '' + start_all() - with subtest("Wait for network"): - web.systemctl("start network-online.target") - curl.systemctl("start network-online.target") - web.wait_for_unit("network-online.target") - curl.wait_for_unit("network-online.target") + with subtest("Wait for network"): + web.systemctl("start network-online.target") + curl.systemctl("start network-online.target") + web.wait_for_unit("network-online.target") + curl.wait_for_unit("network-online.target") - with subtest("Wait for web server"): - web.wait_for_unit("nginx.service") - web.wait_for_open_port(443) + with subtest("Wait for web server"): + web.wait_for_unit("nginx.service") + web.wait_for_open_port(443) - with subtest("Run curl-impersonate tests"): - curl.succeed("${curl-impersonate-test}") - ''; - } -) + with subtest("Run curl-impersonate tests"): + curl.succeed("${curl-impersonate-test}") + ''; +} diff --git a/nixos/tests/dae.nix b/nixos/tests/dae.nix index 127bbcb78813..f3e4edaf8f94 100644 --- a/nixos/tests/dae.nix +++ b/nixos/tests/dae.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { +{ lib, pkgs, ... }: +{ - name = "dae"; + name = "dae"; - meta = { - maintainers = with lib.maintainers; [ oluceps ]; + meta = { + maintainers = with lib.maintainers; [ oluceps ]; + }; + + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.curl ]; + services.nginx = { + enable = true; + statusPage = true; + }; + services.dae = { + enable = true; + config = '' + global { disable_waiting_network: true } + routing{} + ''; + }; }; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.curl ]; - services.nginx = { - enable = true; - statusPage = true; - }; - services.dae = { - enable = true; - config = '' - global { disable_waiting_network: true } - routing{} - ''; - }; - }; + testScript = '' + machine.wait_for_unit("nginx.service") + machine.wait_for_unit("dae.service") - testScript = '' - machine.wait_for_unit("nginx.service") - machine.wait_for_unit("dae.service") + machine.wait_for_open_port(80) - machine.wait_for_open_port(80) + machine.succeed("curl --fail --max-time 10 http://localhost") + ''; - machine.succeed("curl --fail --max-time 10 http://localhost") - ''; - - } -) +} diff --git a/nixos/tests/db-rest.nix b/nixos/tests/db-rest.nix index 18843f330837..9d6cb251a45b 100644 --- a/nixos/tests/db-rest.nix +++ b/nixos/tests/db-rest.nix @@ -1,125 +1,123 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "db-rest"; - meta.maintainers = with pkgs.lib.maintainers; [ marie ]; +{ pkgs, ... }: +{ + name = "db-rest"; + meta.maintainers = with pkgs.lib.maintainers; [ marie ]; - nodes = { - database = { - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.2.10"; - prefixLength = 24; - } - ]; - }; - firewall.allowedTCPPorts = [ 31638 ]; - }; - - services.redis.servers.db-rest = { - enable = true; - bind = "0.0.0.0"; - requirePass = "choochoo"; - port = 31638; + nodes = { + database = { + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.2.10"; + prefixLength = 24; + } + ]; }; + firewall.allowedTCPPorts = [ 31638 ]; }; - serverWithTcp = - { pkgs, ... }: - { - environment = { - etc = { - "db-rest/password-redis-db".text = '' - choochoo - ''; - }; - }; - - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.2.11"; - prefixLength = 24; - } - ]; - }; - firewall.allowedTCPPorts = [ 3000 ]; - }; - - services.db-rest = { - enable = true; - host = "0.0.0.0"; - redis = { - enable = true; - createLocally = false; - host = "192.168.2.10"; - port = 31638; - passwordFile = "/etc/db-rest/password-redis-db"; - useSSL = false; - }; - }; - }; - - serverWithUnixSocket = - { pkgs, ... }: - { - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.2.12"; - prefixLength = 24; - } - ]; - }; - firewall.allowedTCPPorts = [ 3000 ]; - }; - - services.db-rest = { - enable = true; - host = "0.0.0.0"; - redis = { - enable = true; - createLocally = true; - }; - }; - }; - - client = { - environment.systemPackages = [ pkgs.jq ]; - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.2.13"; - prefixLength = 24; - } - ]; - }; - }; + services.redis.servers.db-rest = { + enable = true; + bind = "0.0.0.0"; + requirePass = "choochoo"; + port = 31638; }; }; - testScript = '' - start_all() + serverWithTcp = + { pkgs, ... }: + { + environment = { + etc = { + "db-rest/password-redis-db".text = '' + choochoo + ''; + }; + }; - with subtest("db-rest redis with TCP socket"): - database.wait_for_unit("redis-db-rest.service") - database.wait_for_open_port(31638) + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.2.11"; + prefixLength = 24; + } + ]; + }; + firewall.allowedTCPPorts = [ 3000 ]; + }; - serverWithTcp.wait_for_unit("db-rest.service") - serverWithTcp.wait_for_open_port(3000) + services.db-rest = { + enable = true; + host = "0.0.0.0"; + redis = { + enable = true; + createLocally = false; + host = "192.168.2.10"; + port = 31638; + passwordFile = "/etc/db-rest/password-redis-db"; + useSSL = false; + }; + }; + }; - client.succeed("curl --fail --get http://192.168.2.11:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'") + serverWithUnixSocket = + { pkgs, ... }: + { + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.2.12"; + prefixLength = 24; + } + ]; + }; + firewall.allowedTCPPorts = [ 3000 ]; + }; - with subtest("db-rest redis with Unix socket"): - serverWithUnixSocket.wait_for_unit("db-rest.service") - serverWithUnixSocket.wait_for_open_port(3000) + services.db-rest = { + enable = true; + host = "0.0.0.0"; + redis = { + enable = true; + createLocally = true; + }; + }; + }; - client.succeed("curl --fail --get http://192.168.2.12:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'") - ''; - } -) + client = { + environment.systemPackages = [ pkgs.jq ]; + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.2.13"; + prefixLength = 24; + } + ]; + }; + }; + }; + }; + + testScript = '' + start_all() + + with subtest("db-rest redis with TCP socket"): + database.wait_for_unit("redis-db-rest.service") + database.wait_for_open_port(31638) + + serverWithTcp.wait_for_unit("db-rest.service") + serverWithTcp.wait_for_open_port(3000) + + client.succeed("curl --fail --get http://192.168.2.11:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'") + + with subtest("db-rest redis with Unix socket"): + serverWithUnixSocket.wait_for_unit("db-rest.service") + serverWithUnixSocket.wait_for_open_port(3000) + + client.succeed("curl --fail --get http://192.168.2.12:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'") + ''; +} diff --git a/nixos/tests/dconf.nix b/nixos/tests/dconf.nix index 933817917104..195c56702e2b 100644 --- a/nixos/tests/dconf.nix +++ b/nixos/tests/dconf.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "dconf"; +{ lib, ... }: +{ + name = "dconf"; - meta.maintainers = with lib.maintainers; [ - linsui - ]; + meta.maintainers = with lib.maintainers; [ + linsui + ]; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - users.extraUsers.alice = { - isNormalUser = true; - }; - programs.dconf = with lib.gvariant; { - enable = true; - profiles.user.databases = [ - { - settings = { - "test/not".locked = mkInt32 1; - "test/is".locked = "locked"; - }; - locks = [ - "/test/is/locked" - ]; - } - ]; - }; + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + users.extraUsers.alice = { + isNormalUser = true; }; + programs.dconf = with lib.gvariant; { + enable = true; + profiles.user.databases = [ + { + settings = { + "test/not".locked = mkInt32 1; + "test/is".locked = "locked"; + }; + locks = [ + "/test/is/locked" + ]; + } + ]; + }; + }; - testScript = '' - machine.succeed("test $(dconf read -d /test/not/locked) == 1") - machine.succeed("test $(dconf read -d /test/is/locked) == \"'locked'\"") - machine.fail("sudo -u alice dbus-run-session -- dconf write /test/is/locked \"@s 'unlocked'\"") - machine.succeed("sudo -u alice dbus-run-session -- dconf write /test/not/locked \"@i 2\"") - ''; - } -) + testScript = '' + machine.succeed("test $(dconf read -d /test/not/locked) == 1") + machine.succeed("test $(dconf read -d /test/is/locked) == \"'locked'\"") + machine.fail("sudo -u alice dbus-run-session -- dconf write /test/is/locked \"@s 'unlocked'\"") + machine.succeed("sudo -u alice dbus-run-session -- dconf write /test/not/locked \"@i 2\"") + ''; +} diff --git a/nixos/tests/ddns-updater.nix b/nixos/tests/ddns-updater.nix index caa763e09bba..95e5953e6a11 100644 --- a/nixos/tests/ddns-updater.nix +++ b/nixos/tests/ddns-updater.nix @@ -1,28 +1,26 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - port = 6000; - in - { - name = "ddns-updater"; +{ pkgs, lib, ... }: +let + port = 6000; +in +{ + name = "ddns-updater"; - meta.maintainers = with lib.maintainers; [ delliott ]; + meta.maintainers = with lib.maintainers; [ delliott ]; - nodes.machine = - { pkgs, ... }: - { - services.ddns-updater = { - enable = true; - environment = { - LISTENING_ADDRESS = ":" + (toString port); - }; + nodes.machine = + { pkgs, ... }: + { + services.ddns-updater = { + enable = true; + environment = { + LISTENING_ADDRESS = ":" + (toString port); }; }; + }; - testScript = '' - machine.wait_for_unit("ddns-updater.service") - machine.wait_for_open_port(${toString port}) - machine.succeed("curl --fail http://localhost:${toString port}/") - ''; - } -) + testScript = '' + machine.wait_for_unit("ddns-updater.service") + machine.wait_for_open_port(${toString port}) + machine.succeed("curl --fail http://localhost:${toString port}/") + ''; +} diff --git a/nixos/tests/deconz.nix b/nixos/tests/deconz.nix index ceeabb6c261a..ea0e19a74683 100644 --- a/nixos/tests/deconz.nix +++ b/nixos/tests/deconz.nix @@ -1,37 +1,35 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - httpPort = 800; - in - { - name = "deconz"; +{ pkgs, lib, ... }: +let + httpPort = 800; +in +{ + name = "deconz"; - meta.maintainers = with lib.maintainers; [ - bjornfor - ]; + meta.maintainers = with lib.maintainers; [ + bjornfor + ]; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - nixpkgs.config.allowUnfree = true; - services.deconz = { - enable = true; - inherit httpPort; - extraArgs = [ - "--dbg-err=2" - "--dbg-info=2" - ]; - }; + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + nixpkgs.config.allowUnfree = true; + services.deconz = { + enable = true; + inherit httpPort; + extraArgs = [ + "--dbg-err=2" + "--dbg-info=2" + ]; }; + }; - testScript = '' - machine.wait_for_unit("deconz.service") - machine.succeed("curl -sfL http://localhost:${toString httpPort}") - ''; - } -) + testScript = '' + machine.wait_for_unit("deconz.service") + machine.succeed("curl -sfL http://localhost:${toString httpPort}") + ''; +} diff --git a/nixos/tests/deepin.nix b/nixos/tests/deepin.nix index bf545c08f886..677e58942079 100644 --- a/nixos/tests/deepin.nix +++ b/nixos/tests/deepin.nix @@ -1,57 +1,55 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "deepin"; +{ pkgs, lib, ... }: +{ + name = "deepin"; - meta.maintainers = lib.teams.deepin.members; + meta.maintainers = lib.teams.deepin.members; - nodes.machine = - { ... }: - { - imports = [ - ./common/user-account.nix - ]; + nodes.machine = + { ... }: + { + imports = [ + ./common/user-account.nix + ]; - virtualisation.memorySize = 2048; + virtualisation.memorySize = 2048; - services.xserver.enable = true; + services.xserver.enable = true; - services.xserver.displayManager = { - lightdm.enable = true; - autoLogin = { - enable = true; - user = "alice"; - }; + services.xserver.displayManager = { + lightdm.enable = true; + autoLogin = { + enable = true; + user = "alice"; }; - - services.xserver.desktopManager.deepin.enable = true; }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - '' - with subtest("Wait for login"): - machine.wait_for_x() - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") + services.xserver.desktopManager.deepin.enable = true; + }; - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + '' + with subtest("Wait for login"): + machine.wait_for_x() + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") - with subtest("Check if Deepin session components actually start"): - machine.wait_until_succeeds("pgrep -f dde-session-daemon") - machine.wait_for_window("dde-session-daemon") - machine.wait_until_succeeds("pgrep -f dde-desktop") - machine.wait_for_window("dde-desktop") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Open deepin-terminal"): - machine.succeed("su - ${user.name} -c 'DISPLAY=:0 deepin-terminal >&2 &'") - machine.wait_for_window("deepin-terminal") - machine.sleep(20) - machine.screenshot("screen") - ''; - } -) + with subtest("Check if Deepin session components actually start"): + machine.wait_until_succeeds("pgrep -f dde-session-daemon") + machine.wait_for_window("dde-session-daemon") + machine.wait_until_succeeds("pgrep -f dde-desktop") + machine.wait_for_window("dde-desktop") + + with subtest("Open deepin-terminal"): + machine.succeed("su - ${user.name} -c 'DISPLAY=:0 deepin-terminal >&2 &'") + machine.wait_for_window("deepin-terminal") + machine.sleep(20) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/deluge.nix b/nixos/tests/deluge.nix index 58763bab5345..b7385608a86c 100644 --- a/nixos/tests/deluge.nix +++ b/nixos/tests/deluge.nix @@ -1,69 +1,67 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "deluge"; - meta = with pkgs.lib.maintainers; { - maintainers = [ flokli ]; - }; +{ pkgs, ... }: +{ + name = "deluge"; + meta = with pkgs.lib.maintainers; { + maintainers = [ flokli ]; + }; - nodes = { - simple = { - services.deluge = { + nodes = { + simple = { + services.deluge = { + enable = true; + package = pkgs.deluge-2_x; + web = { enable = true; - package = pkgs.deluge-2_x; - web = { - enable = true; - openFirewall = true; - }; - }; - }; - - declarative = { - services.deluge = { - enable = true; - package = pkgs.deluge-2_x; openFirewall = true; - declarative = true; - config = { - allow_remote = true; - download_location = "/var/lib/deluge/my-download"; - daemon_port = 58846; - listen_ports = [ - 6881 - 6889 - ]; - }; - web = { - enable = true; - port = 3142; - }; - authFile = pkgs.writeText "deluge-auth" '' - localclient:a7bef72a890:10 - andrew:password:10 - user3:anotherpass:5 - ''; }; }; - }; - testScript = '' - start_all() + declarative = { + services.deluge = { + enable = true; + package = pkgs.deluge-2_x; + openFirewall = true; + declarative = true; + config = { + allow_remote = true; + download_location = "/var/lib/deluge/my-download"; + daemon_port = 58846; + listen_ports = [ + 6881 + 6889 + ]; + }; + web = { + enable = true; + port = 3142; + }; + authFile = pkgs.writeText "deluge-auth" '' + localclient:a7bef72a890:10 + andrew:password:10 + user3:anotherpass:5 + ''; + }; + }; - simple.wait_for_unit("deluged") - simple.wait_for_unit("delugeweb") - simple.wait_for_open_port(8112) - declarative.wait_for_unit("network.target") - declarative.wait_until_succeeds("curl --fail http://simple:8112") + }; - declarative.wait_for_unit("deluged") - declarative.wait_for_unit("delugeweb") - declarative.wait_until_succeeds("curl --fail http://declarative:3142") + testScript = '' + start_all() - # deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291 - declarative.succeed( - "(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'" - ) - ''; - } -) + simple.wait_for_unit("deluged") + simple.wait_for_unit("delugeweb") + simple.wait_for_open_port(8112) + declarative.wait_for_unit("network.target") + declarative.wait_until_succeeds("curl --fail http://simple:8112") + + declarative.wait_for_unit("deluged") + declarative.wait_for_unit("delugeweb") + declarative.wait_until_succeeds("curl --fail http://declarative:3142") + + # deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291 + declarative.succeed( + "(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'" + ) + ''; +} diff --git a/nixos/tests/dependency-track.nix b/nixos/tests/dependency-track.nix index baa55e779058..4039339ff8cd 100644 --- a/nixos/tests/dependency-track.nix +++ b/nixos/tests/dependency-track.nix @@ -1,71 +1,69 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - dependencyTrackPort = 8081; - in - { - name = "dependency-track"; - meta = { - maintainers = pkgs.lib.teams.cyberus.members; - }; +{ pkgs, ... }: +let + dependencyTrackPort = 8081; +in +{ + name = "dependency-track"; + meta = { + maintainers = pkgs.lib.teams.cyberus.members; + }; - nodes = { - server = - { pkgs, ... }: - { - virtualisation = { - cores = 2; - diskSize = 4096; - memorySize = 1024 * 2; - }; - - environment.systemPackages = with pkgs; [ curl ]; - systemd.services.dependency-track = { - # source: https://github.com/DependencyTrack/dependency-track/blob/37e0ba59e8057c18a87a7a76e247a8f75677a56c/dev/scripts/data-nist-generate-dummy.sh - preStart = '' - set -euo pipefail - - NIST_DIR="$HOME/.dependency-track/nist" - - rm -rf "$NIST_DIR" - mkdir -p "$NIST_DIR" - - for feed in $(seq "2024" "2002"); do - touch "$NIST_DIR/nvdcve-1.1-$feed.json.gz" - echo "9999999999999" > "$NIST_DIR/nvdcve-1.1-$feed.json.gz.ts" - done - ''; - }; - services.dependency-track = { - enable = true; - port = dependencyTrackPort; - nginx.domain = "localhost"; - database.passwordFile = "${pkgs.writeText "dbPassword" ''hunter2'THE'''H''''E''}"; - }; + nodes = { + server = + { pkgs, ... }: + { + virtualisation = { + cores = 2; + diskSize = 4096; + memorySize = 1024 * 2; }; - }; - testScript = - # python - '' - import json + environment.systemPackages = with pkgs; [ curl ]; + systemd.services.dependency-track = { + # source: https://github.com/DependencyTrack/dependency-track/blob/37e0ba59e8057c18a87a7a76e247a8f75677a56c/dev/scripts/data-nist-generate-dummy.sh + preStart = '' + set -euo pipefail - start_all() + NIST_DIR="$HOME/.dependency-track/nist" - server.wait_for_unit("dependency-track.service") - server.wait_until_succeeds( - "journalctl -o cat -u dependency-track.service | grep 'Dependency-Track is ready'" + rm -rf "$NIST_DIR" + mkdir -p "$NIST_DIR" + + for feed in $(seq "2024" "2002"); do + touch "$NIST_DIR/nvdcve-1.1-$feed.json.gz" + echo "9999999999999" > "$NIST_DIR/nvdcve-1.1-$feed.json.gz.ts" + done + ''; + }; + services.dependency-track = { + enable = true; + port = dependencyTrackPort; + nginx.domain = "localhost"; + database.passwordFile = "${pkgs.writeText "dbPassword" ''hunter2'THE'''H''''E''}"; + }; + }; + }; + + testScript = + # python + '' + import json + + start_all() + + server.wait_for_unit("dependency-track.service") + server.wait_until_succeeds( + "journalctl -o cat -u dependency-track.service | grep 'Dependency-Track is ready'" + ) + server.wait_for_open_port(${toString dependencyTrackPort}) + + with subtest("version api returns correct version"): + version = json.loads( + server.succeed("curl http://localhost/api/version") ) - server.wait_for_open_port(${toString dependencyTrackPort}) + assert version["version"] == "${pkgs.dependency-track.version}" - with subtest("version api returns correct version"): - version = json.loads( - server.succeed("curl http://localhost/api/version") - ) - assert version["version"] == "${pkgs.dependency-track.version}" - - with subtest("nginx serves frontend"): - server.succeed("curl http://localhost/ | grep \"Dependency-Track\"") - ''; - } -) + with subtest("nginx serves frontend"): + server.succeed("curl http://localhost/ | grep \"Dependency-Track\"") + ''; +} diff --git a/nixos/tests/devpi-server.nix b/nixos/tests/devpi-server.nix index cec019e9d1c4..723499941654 100644 --- a/nixos/tests/devpi-server.nix +++ b/nixos/tests/devpi-server.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - server-port = 3141; - in - { - name = "devpi-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ cafkafk ]; - }; +{ pkgs, ... }: +let + server-port = 3141; +in +{ + name = "devpi-server"; + meta = with pkgs.lib.maintainers; { + maintainers = [ cafkafk ]; + }; - nodes = { - devpi = - { ... }: - { - services.devpi-server = { - enable = true; - host = "0.0.0.0"; - port = server-port; - openFirewall = true; - secretFile = pkgs.writeText "devpi-secret" "v263P+V3YGDYUyfYL/RBURw+tCPMDw94R/iCuBNJrDhaYrZYjpA6XPFVDDH8ViN20j77y2PHoMM/U0opNkVQ2g=="; - }; + nodes = { + devpi = + { ... }: + { + services.devpi-server = { + enable = true; + host = "0.0.0.0"; + port = server-port; + openFirewall = true; + secretFile = pkgs.writeText "devpi-secret" "v263P+V3YGDYUyfYL/RBURw+tCPMDw94R/iCuBNJrDhaYrZYjpA6XPFVDDH8ViN20j77y2PHoMM/U0opNkVQ2g=="; }; + }; - client1 = - { ... }: - { - environment.systemPackages = with pkgs; [ - devpi-client - jq - ]; - }; - }; + client1 = + { ... }: + { + environment.systemPackages = with pkgs; [ + devpi-client + jq + ]; + }; + }; - testScript = '' - start_all() - devpi.wait_for_unit("devpi-server.service") - devpi.wait_for_open_port(${builtins.toString server-port}) + testScript = '' + start_all() + devpi.wait_for_unit("devpi-server.service") + devpi.wait_for_open_port(${builtins.toString server-port}) - client1.succeed("devpi getjson http://devpi:${builtins.toString server-port}") - ''; - } -) + client1.succeed("devpi getjson http://devpi:${builtins.toString server-port}") + ''; +} diff --git a/nixos/tests/dex-oidc.nix b/nixos/tests/dex-oidc.nix index 37718fdcdbde..bc764cc1c471 100644 --- a/nixos/tests/dex-oidc.nix +++ b/nixos/tests/dex-oidc.nix @@ -1,84 +1,82 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "dex-oidc"; - meta.maintainers = with lib.maintainers; [ Flakebi ]; +{ lib, ... }: +{ + name = "dex-oidc"; + meta.maintainers = with lib.maintainers; [ Flakebi ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ jq ]; - services.dex = { - enable = true; - settings = { - issuer = "http://127.0.0.1:8080/dex"; - storage = { - type = "postgres"; - config.host = "/var/run/postgresql"; - }; - web.http = "127.0.0.1:8080"; - oauth2.skipApprovalScreen = true; - staticClients = [ - { - id = "oidcclient"; - name = "Client"; - redirectURIs = [ "https://example.com/callback" ]; - secretFile = "/etc/dex/oidcclient"; - } - ]; - connectors = [ - { - type = "mockPassword"; - id = "mock"; - name = "Example"; - config = { - username = "admin"; - password = "password"; - }; - } - ]; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ jq ]; + services.dex = { + enable = true; + settings = { + issuer = "http://127.0.0.1:8080/dex"; + storage = { + type = "postgres"; + config.host = "/var/run/postgresql"; }; - }; - - # This should not be set from nix but through other means to not leak the secret. - environment.etc."dex/oidcclient" = { - mode = "0400"; - user = "dex"; - text = "oidcclientsecret"; - }; - - services.postgresql = { - enable = true; - ensureDatabases = [ "dex" ]; - ensureUsers = [ + web.http = "127.0.0.1:8080"; + oauth2.skipApprovalScreen = true; + staticClients = [ { - name = "dex"; - ensureDBOwnership = true; + id = "oidcclient"; + name = "Client"; + redirectURIs = [ "https://example.com/callback" ]; + secretFile = "/etc/dex/oidcclient"; + } + ]; + connectors = [ + { + type = "mockPassword"; + id = "mock"; + name = "Example"; + config = { + username = "admin"; + password = "password"; + }; } ]; }; }; - testScript = '' - with subtest("Web server gets ready"): - machine.wait_for_unit("dex.service", timeout=120) - # Wait until server accepts connections - machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'", timeout=120) + # This should not be set from nix but through other means to not leak the secret. + environment.etc."dex/oidcclient" = { + mode = "0400"; + user = "dex"; + text = "oidcclientsecret"; + }; - with subtest("Login"): - state = machine.succeed("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid' | sed -n 's/.*state=\\(.*\\)\">.*/\\1/p'").strip() - print(f"Got state {state}") - # Login request returns 303 with redirect_url that has code as query parameter: - # https://example.com/callback?code=kibsamwdupuy2iwqnlbqei3u6&state= - code = machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password' -w '%{{redirect_url}}' | sed -n 's/.*code=\\(.*\\)&.*/\\1/p'") - print(f"Got approval code {code}") - bearer = machine.succeed(f"curl -fs localhost:8080/dex/token -u oidcclient:oidcclientsecret -d 'grant_type=authorization_code&redirect_uri=https://example.com/callback&code={code}' | jq .access_token -r").strip() - print(f"Got access token {bearer}") + services.postgresql = { + enable = true; + ensureDatabases = [ "dex" ]; + ensureUsers = [ + { + name = "dex"; + ensureDBOwnership = true; + } + ]; + }; + }; - with subtest("Get userinfo"): - assert '"sub"' in machine.succeed( - f"curl -fs localhost:8080/dex/userinfo --oauth2-bearer {bearer}" - ) - ''; - } -) + testScript = '' + with subtest("Web server gets ready"): + machine.wait_for_unit("dex.service", timeout=120) + # Wait until server accepts connections + machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'", timeout=120) + + with subtest("Login"): + state = machine.succeed("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid' | sed -n 's/.*state=\\(.*\\)\">.*/\\1/p'").strip() + print(f"Got state {state}") + # Login request returns 303 with redirect_url that has code as query parameter: + # https://example.com/callback?code=kibsamwdupuy2iwqnlbqei3u6&state= + code = machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password' -w '%{{redirect_url}}' | sed -n 's/.*code=\\(.*\\)&.*/\\1/p'") + print(f"Got approval code {code}") + bearer = machine.succeed(f"curl -fs localhost:8080/dex/token -u oidcclient:oidcclientsecret -d 'grant_type=authorization_code&redirect_uri=https://example.com/callback&code={code}' | jq .access_token -r").strip() + print(f"Got access token {bearer}") + + with subtest("Get userinfo"): + assert '"sub"' in machine.succeed( + f"curl -fs localhost:8080/dex/userinfo --oauth2-bearer {bearer}" + ) + ''; +} diff --git a/nixos/tests/disable-installer-tools.nix b/nixos/tests/disable-installer-tools.nix index d794f7d3f86d..ac8fa4cbf46a 100644 --- a/nixos/tests/disable-installer-tools.nix +++ b/nixos/tests/disable-installer-tools.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { - pkgs, - latestKernel ? false, - ... - }: +{ + pkgs, + latestKernel ? false, + ... +}: - { - name = "disable-installer-tools"; +{ + name = "disable-installer-tools"; - nodes.machine = - { pkgs, lib, ... }: - { - system.disableInstallerTools = true; - boot.enableContainers = false; - environment.defaultPackages = [ ]; - }; + nodes.machine = + { pkgs, lib, ... }: + { + system.disableInstallerTools = true; + boot.enableContainers = false; + environment.defaultPackages = [ ]; + }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - with subtest("nixos installer tools should not be included"): - machine.fail("which nixos-rebuild") - machine.fail("which nixos-install") - machine.fail("which nixos-generate-config") - machine.fail("which nixos-enter") - machine.fail("which nixos-version") - machine.fail("which nixos-build-vms") + with subtest("nixos installer tools should not be included"): + machine.fail("which nixos-rebuild") + machine.fail("which nixos-install") + machine.fail("which nixos-generate-config") + machine.fail("which nixos-enter") + machine.fail("which nixos-version") + machine.fail("which nixos-build-vms") - with subtest("perl should not be included"): - machine.fail("which perl") - ''; - } -) + with subtest("perl should not be included"): + machine.fail("which perl") + ''; +} diff --git a/nixos/tests/discourse.nix b/nixos/tests/discourse.nix index 1f8d1b7bdf4c..8db47f2b1ad1 100644 --- a/nixos/tests/discourse.nix +++ b/nixos/tests/discourse.nix @@ -3,209 +3,207 @@ # 2. sending a private message to the admin user through the API # 3. replying to that message via email. -import ./make-test-python.nix ( - { - pkgs, - lib, - package ? pkgs.discourse, - ... - }: - let - certs = import ./common/acme/server/snakeoil-certs.nix; - clientDomain = "client.fake.domain"; - discourseDomain = certs.domain; - adminPassword = "eYAX85qmMJ5GZIHLaXGDAoszD7HSZp5d"; - secretKeyBase = "381f4ac6d8f5e49d804dae72aa9c046431d2f34c656a705c41cd52fed9b4f6f76f51549f0b55db3b8b0dded7a00d6a381ebe9a4367d2d44f5e743af6628b4d42"; - admin = { - email = "alice@${clientDomain}"; - username = "alice"; - fullName = "Alice Admin"; - passwordFile = "${pkgs.writeText "admin-pass" adminPassword}"; - }; - in - { - name = "discourse"; - meta = with pkgs.lib.maintainers; { - maintainers = [ talyz ]; - }; +{ + pkgs, + lib, + package ? pkgs.discourse, + ... +}: +let + certs = import ./common/acme/server/snakeoil-certs.nix; + clientDomain = "client.fake.domain"; + discourseDomain = certs.domain; + adminPassword = "eYAX85qmMJ5GZIHLaXGDAoszD7HSZp5d"; + secretKeyBase = "381f4ac6d8f5e49d804dae72aa9c046431d2f34c656a705c41cd52fed9b4f6f76f51549f0b55db3b8b0dded7a00d6a381ebe9a4367d2d44f5e743af6628b4d42"; + admin = { + email = "alice@${clientDomain}"; + username = "alice"; + fullName = "Alice Admin"; + passwordFile = "${pkgs.writeText "admin-pass" adminPassword}"; + }; +in +{ + name = "discourse"; + meta = with pkgs.lib.maintainers; { + maintainers = [ talyz ]; + }; - nodes.discourse = - { nodes, ... }: - { - virtualisation.memorySize = 2048; - virtualisation.cores = 4; - virtualisation.useNixStoreImage = true; - virtualisation.writableStore = false; + nodes.discourse = + { nodes, ... }: + { + virtualisation.memorySize = 2048; + virtualisation.cores = 4; + virtualisation.useNixStoreImage = true; + virtualisation.writableStore = false; - imports = [ common/user-account.nix ]; + imports = [ common/user-account.nix ]; - security.pki.certificateFiles = [ - certs.ca.cert - ]; + security.pki.certificateFiles = [ + certs.ca.cert + ]; - networking.extraHosts = '' - 127.0.0.1 ${discourseDomain} - ${nodes.client.networking.primaryIPAddress} ${clientDomain} - ''; - - services.postfix = { - enableSubmission = true; - enableSubmissions = true; - submissionsOptions = { - smtpd_sasl_auth_enable = "yes"; - smtpd_client_restrictions = "permit"; - }; - }; - - environment.systemPackages = [ pkgs.jq ]; - - services.postgresql.package = pkgs.postgresql_15; - - services.discourse = { - enable = true; - inherit admin package; - hostname = discourseDomain; - sslCertificate = "${certs.${discourseDomain}.cert}"; - sslCertificateKey = "${certs.${discourseDomain}.key}"; - secretKeyBaseFile = "${pkgs.writeText "secret-key-base" secretKeyBase}"; - enableACME = false; - mail.outgoing.serverAddress = clientDomain; - mail.incoming.enable = true; - siteSettings = { - posting = { - min_post_length = 5; - min_first_post_length = 5; - min_personal_message_post_length = 5; - }; - }; - unicornTimeout = 900; - }; - - networking.firewall.allowedTCPPorts = [ - 25 - 465 - ]; - }; - - nodes.client = - { nodes, ... }: - { - imports = [ common/user-account.nix ]; - - security.pki.certificateFiles = [ - certs.ca.cert - ]; - - networking.extraHosts = '' - 127.0.0.1 ${clientDomain} - ${nodes.discourse.networking.primaryIPAddress} ${discourseDomain} - ''; - - services.dovecot2 = { - enable = true; - protocols = [ "imap" ]; - }; - - services.postfix = { - enable = true; - origin = clientDomain; - relayDomains = [ clientDomain ]; - config = { - compatibility_level = "2"; - smtpd_banner = "ESMTP server"; - myhostname = clientDomain; - mydestination = clientDomain; - }; - }; - - environment.systemPackages = - let - replyToEmail = pkgs.writeScriptBin "reply-to-email" '' - #!${pkgs.python3.interpreter} - import imaplib - import smtplib - import ssl - import email.header - from email import message_from_bytes - from email.message import EmailMessage - - with imaplib.IMAP4('localhost') as imap: - imap.login('alice', 'foobar') - imap.select() - status, data = imap.search(None, 'ALL') - assert status == 'OK' - - nums = data[0].split() - assert len(nums) == 1 - - status, msg_data = imap.fetch(nums[0], '(RFC822)') - assert status == 'OK' - - msg = email.message_from_bytes(msg_data[0][1]) - subject = str(email.header.make_header(email.header.decode_header(msg['Subject']))) - reply_to = email.header.decode_header(msg['Reply-To'])[0][0] - message_id = email.header.decode_header(msg['Message-ID'])[0][0] - date = email.header.decode_header(msg['Date'])[0][0] - - ctx = ssl.create_default_context() - with smtplib.SMTP_SSL(host='${discourseDomain}', context=ctx) as smtp: - reply = EmailMessage() - reply['Subject'] = 'Re: ' + subject - reply['To'] = reply_to - reply['From'] = 'alice@${clientDomain}' - reply['In-Reply-To'] = message_id - reply['References'] = message_id - reply['Date'] = date - reply.set_content("Test reply.") - - smtp.send_message(reply) - smtp.quit() - ''; - in - [ replyToEmail ]; - - networking.firewall.allowedTCPPorts = [ 25 ]; - }; - - testScript = - { nodes }: - let - request = builtins.toJSON { - title = "Private message"; - raw = "This is a test message."; - target_recipients = admin.username; - archetype = "private_message"; - }; - in - '' - discourse.start() - client.start() - - discourse.wait_for_unit("discourse.service") - discourse.wait_for_file("/run/discourse/sockets/unicorn.sock") - discourse.wait_until_succeeds("curl -sS -f https://${discourseDomain}") - discourse.succeed( - "curl -sS -f https://${discourseDomain}/session/csrf -c cookie -b cookie -H 'Accept: application/json' | jq -r '\"X-CSRF-Token: \" + .csrf' > csrf_token", - "curl -sS -f https://${discourseDomain}/session -c cookie -b cookie -H @csrf_token -H 'Accept: application/json' -d 'login=${nodes.discourse.services.discourse.admin.username}' -d \"password=${adminPassword}\" | jq -e '.user.username == \"${nodes.discourse.services.discourse.admin.username}\"'", - "curl -sS -f https://${discourseDomain}/login -v -H 'Accept: application/json' -c cookie -b cookie 2>&1 | grep ${nodes.discourse.services.discourse.admin.username}", - ) - - client.wait_for_unit("postfix.service") - client.wait_for_unit("dovecot2.service") - - discourse.succeed( - "sudo -u discourse discourse-rake api_key:create_master[master] >api_key", - 'curl -sS -f https://${discourseDomain}/posts -X POST -H "Content-Type: application/json" -H "Api-Key: $(topic_id' - ) - discourse.succeed( - 'curl -sS -f https://${discourseDomain}/t/$(Test reply.

" then true else null end\' ' - ) + networking.extraHosts = '' + 127.0.0.1 ${discourseDomain} + ${nodes.client.networking.primaryIPAddress} ${clientDomain} ''; - } -) + + services.postfix = { + enableSubmission = true; + enableSubmissions = true; + submissionsOptions = { + smtpd_sasl_auth_enable = "yes"; + smtpd_client_restrictions = "permit"; + }; + }; + + environment.systemPackages = [ pkgs.jq ]; + + services.postgresql.package = pkgs.postgresql_15; + + services.discourse = { + enable = true; + inherit admin package; + hostname = discourseDomain; + sslCertificate = "${certs.${discourseDomain}.cert}"; + sslCertificateKey = "${certs.${discourseDomain}.key}"; + secretKeyBaseFile = "${pkgs.writeText "secret-key-base" secretKeyBase}"; + enableACME = false; + mail.outgoing.serverAddress = clientDomain; + mail.incoming.enable = true; + siteSettings = { + posting = { + min_post_length = 5; + min_first_post_length = 5; + min_personal_message_post_length = 5; + }; + }; + unicornTimeout = 900; + }; + + networking.firewall.allowedTCPPorts = [ + 25 + 465 + ]; + }; + + nodes.client = + { nodes, ... }: + { + imports = [ common/user-account.nix ]; + + security.pki.certificateFiles = [ + certs.ca.cert + ]; + + networking.extraHosts = '' + 127.0.0.1 ${clientDomain} + ${nodes.discourse.networking.primaryIPAddress} ${discourseDomain} + ''; + + services.dovecot2 = { + enable = true; + protocols = [ "imap" ]; + }; + + services.postfix = { + enable = true; + origin = clientDomain; + relayDomains = [ clientDomain ]; + config = { + compatibility_level = "2"; + smtpd_banner = "ESMTP server"; + myhostname = clientDomain; + mydestination = clientDomain; + }; + }; + + environment.systemPackages = + let + replyToEmail = pkgs.writeScriptBin "reply-to-email" '' + #!${pkgs.python3.interpreter} + import imaplib + import smtplib + import ssl + import email.header + from email import message_from_bytes + from email.message import EmailMessage + + with imaplib.IMAP4('localhost') as imap: + imap.login('alice', 'foobar') + imap.select() + status, data = imap.search(None, 'ALL') + assert status == 'OK' + + nums = data[0].split() + assert len(nums) == 1 + + status, msg_data = imap.fetch(nums[0], '(RFC822)') + assert status == 'OK' + + msg = email.message_from_bytes(msg_data[0][1]) + subject = str(email.header.make_header(email.header.decode_header(msg['Subject']))) + reply_to = email.header.decode_header(msg['Reply-To'])[0][0] + message_id = email.header.decode_header(msg['Message-ID'])[0][0] + date = email.header.decode_header(msg['Date'])[0][0] + + ctx = ssl.create_default_context() + with smtplib.SMTP_SSL(host='${discourseDomain}', context=ctx) as smtp: + reply = EmailMessage() + reply['Subject'] = 'Re: ' + subject + reply['To'] = reply_to + reply['From'] = 'alice@${clientDomain}' + reply['In-Reply-To'] = message_id + reply['References'] = message_id + reply['Date'] = date + reply.set_content("Test reply.") + + smtp.send_message(reply) + smtp.quit() + ''; + in + [ replyToEmail ]; + + networking.firewall.allowedTCPPorts = [ 25 ]; + }; + + testScript = + { nodes }: + let + request = builtins.toJSON { + title = "Private message"; + raw = "This is a test message."; + target_recipients = admin.username; + archetype = "private_message"; + }; + in + '' + discourse.start() + client.start() + + discourse.wait_for_unit("discourse.service") + discourse.wait_for_file("/run/discourse/sockets/unicorn.sock") + discourse.wait_until_succeeds("curl -sS -f https://${discourseDomain}") + discourse.succeed( + "curl -sS -f https://${discourseDomain}/session/csrf -c cookie -b cookie -H 'Accept: application/json' | jq -r '\"X-CSRF-Token: \" + .csrf' > csrf_token", + "curl -sS -f https://${discourseDomain}/session -c cookie -b cookie -H @csrf_token -H 'Accept: application/json' -d 'login=${nodes.discourse.services.discourse.admin.username}' -d \"password=${adminPassword}\" | jq -e '.user.username == \"${nodes.discourse.services.discourse.admin.username}\"'", + "curl -sS -f https://${discourseDomain}/login -v -H 'Accept: application/json' -c cookie -b cookie 2>&1 | grep ${nodes.discourse.services.discourse.admin.username}", + ) + + client.wait_for_unit("postfix.service") + client.wait_for_unit("dovecot2.service") + + discourse.succeed( + "sudo -u discourse discourse-rake api_key:create_master[master] >api_key", + 'curl -sS -f https://${discourseDomain}/posts -X POST -H "Content-Type: application/json" -H "Api-Key: $(topic_id' + ) + discourse.succeed( + 'curl -sS -f https://${discourseDomain}/t/$(Test reply.

" then true else null end\' ' + ) + ''; +} diff --git a/nixos/tests/documize.nix b/nixos/tests/documize.nix index 1537047ad679..4565033a9349 100644 --- a/nixos/tests/documize.nix +++ b/nixos/tests/documize.nix @@ -1,67 +1,65 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "documize"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, lib, ... }: +{ + name = "documize"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.jq ]; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.jq ]; - services.documize = { - enable = true; - port = 3000; - dbtype = "postgresql"; - db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize"; - }; - - systemd.services.documize-server = { - after = [ "postgresql.service" ]; - requires = [ "postgresql.service" ]; - }; - - services.postgresql = { - enable = true; - initialScript = pkgs.writeText "psql-init" '' - CREATE ROLE documize WITH LOGIN PASSWORD 'documize'; - CREATE DATABASE documize WITH OWNER documize; - ''; - }; + services.documize = { + enable = true; + port = 3000; + dbtype = "postgresql"; + db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize"; }; - testScript = '' - start_all() + systemd.services.documize-server = { + after = [ "postgresql.service" ]; + requires = [ "postgresql.service" ]; + }; - machine.wait_for_unit("documize-server.service") - machine.wait_for_open_port(3000) + services.postgresql = { + enable = true; + initialScript = pkgs.writeText "psql-init" '' + CREATE ROLE documize WITH LOGIN PASSWORD 'documize'; + CREATE DATABASE documize WITH OWNER documize; + ''; + }; + }; - dbhash = machine.succeed( - "curl -f localhost:3000 | grep 'property=\"dbhash' | grep -Po 'content=\"\\K[^\"]*'" - ) + testScript = '' + start_all() - dbhash = dbhash.strip() + machine.wait_for_unit("documize-server.service") + machine.wait_for_open_port(3000) - machine.succeed( - ( - "curl -X POST" - " --data 'dbname=documize'" - " --data 'dbhash={}'" - " --data 'title=NixOS'" - " --data 'message=Docs'" - " --data 'firstname=Bob'" - " --data 'lastname=Foobar'" - " --data 'email=bob.foobar@nixos.org'" - " --data 'password=verysafe'" - " -f localhost:3000/api/setup" - ).format(dbhash) - ) + dbhash = machine.succeed( + "curl -f localhost:3000 | grep 'property=\"dbhash' | grep -Po 'content=\"\\K[^\"]*'" + ) - machine.succeed( - 'test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"' - ) - ''; - } -) + dbhash = dbhash.strip() + + machine.succeed( + ( + "curl -X POST" + " --data 'dbname=documize'" + " --data 'dbhash={}'" + " --data 'title=NixOS'" + " --data 'message=Docs'" + " --data 'firstname=Bob'" + " --data 'lastname=Foobar'" + " --data 'email=bob.foobar@nixos.org'" + " --data 'password=verysafe'" + " -f localhost:3000/api/setup" + ).format(dbhash) + ) + + machine.succeed( + 'test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"' + ) + ''; +} diff --git a/nixos/tests/doh-proxy-rust.nix b/nixos/tests/doh-proxy-rust.nix index f9c73edcb778..26e6ea2c3885 100644 --- a/nixos/tests/doh-proxy-rust.nix +++ b/nixos/tests/doh-proxy-rust.nix @@ -1,48 +1,46 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "doh-proxy-rust"; - meta.maintainers = with lib.maintainers; [ stephank ]; +{ lib, pkgs, ... }: +{ + name = "doh-proxy-rust"; + meta.maintainers = with lib.maintainers; [ stephank ]; - nodes = { - machine = - { pkgs, lib, ... }: - { - services.bind = { - enable = true; - extraOptions = "empty-zones-enable no;"; - zones = lib.singleton { - name = "."; - master = true; - file = pkgs.writeText "root.zone" '' - $TTL 3600 - . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d ) - . IN NS ns.example.org. - ns.example.org. IN A 192.168.0.1 - ''; - }; - }; - services.doh-proxy-rust = { - enable = true; - flags = [ - "--server-address=127.0.0.1:53" - ]; + nodes = { + machine = + { pkgs, lib, ... }: + { + services.bind = { + enable = true; + extraOptions = "empty-zones-enable no;"; + zones = lib.singleton { + name = "."; + master = true; + file = pkgs.writeText "root.zone" '' + $TTL 3600 + . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d ) + . IN NS ns.example.org. + ns.example.org. IN A 192.168.0.1 + ''; }; }; - }; + services.doh-proxy-rust = { + enable = true; + flags = [ + "--server-address=127.0.0.1:53" + ]; + }; + }; + }; - testScript = - { nodes, ... }: - '' - url = "http://localhost:3000/dns-query" - query = "AAABAAABAAAAAAAAAm5zB2V4YW1wbGUDb3JnAAABAAE=" # IN A ns.example.org. - bin_ip = r"$'\xC0\xA8\x00\x01'" # 192.168.0.1, as shell binary string + testScript = + { nodes, ... }: + '' + url = "http://localhost:3000/dns-query" + query = "AAABAAABAAAAAAAAAm5zB2V4YW1wbGUDb3JnAAABAAE=" # IN A ns.example.org. + bin_ip = r"$'\xC0\xA8\x00\x01'" # 192.168.0.1, as shell binary string - machine.wait_for_unit("bind.service") - machine.wait_for_unit("doh-proxy-rust.service") - machine.wait_for_open_port(53) - machine.wait_for_open_port(3000) - machine.succeed(f"curl --fail -H 'Accept: application/dns-message' '{url}?dns={query}' | grep -F {bin_ip}") - ''; - } -) + machine.wait_for_unit("bind.service") + machine.wait_for_unit("doh-proxy-rust.service") + machine.wait_for_open_port(53) + machine.wait_for_open_port(3000) + machine.succeed(f"curl --fail -H 'Accept: application/dns-message' '{url}?dns={query}' | grep -F {bin_ip}") + ''; +} diff --git a/nixos/tests/domination.nix b/nixos/tests/domination.nix index d37a7ba98be9..48f5825afc97 100644 --- a/nixos/tests/domination.nix +++ b/nixos/tests/domination.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "domination"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "domination"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.domination ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.domination ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - # Add a dummy sound card, or an error reporting popup will appear, - # covering the main window and preventing OCR - machine.execute("modprobe snd-dummy") - machine.execute("domination >&2 &") - machine.wait_for_window("Menu") - machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + # Add a dummy sound card, or an error reporting popup will appear, + # covering the main window and preventing OCR + machine.execute("modprobe snd-dummy") + machine.execute("domination >&2 &") + machine.wait_for_window("Menu") + machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/drbd-driver.nix b/nixos/tests/drbd-driver.nix index 9ba7c438eabc..9df032bd797c 100644 --- a/nixos/tests/drbd-driver.nix +++ b/nixos/tests/drbd-driver.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "drbd-driver"; - meta.maintainers = with pkgs.lib.maintainers; [ birkb ]; +{ lib, pkgs, ... }: +{ + name = "drbd-driver"; + meta.maintainers = with pkgs.lib.maintainers; [ birkb ]; - nodes = { - machine = - { config, pkgs, ... }: - { - boot = { - kernelModules = [ "drbd" ]; - extraModulePackages = with config.boot.kernelPackages; [ drbd ]; - kernelPackages = pkgs.linuxPackages; - }; + nodes = { + machine = + { config, pkgs, ... }: + { + boot = { + kernelModules = [ "drbd" ]; + extraModulePackages = with config.boot.kernelPackages; [ drbd ]; + kernelPackages = pkgs.linuxPackages; }; - }; + }; + }; - testScript = '' - machine.start(); - machine.succeed("modinfo drbd | grep --extended-regexp '^version:\s+${pkgs.linuxPackages.drbd.version}$'") - ''; - } -) + testScript = '' + machine.start(); + machine.succeed("modinfo drbd | grep --extended-regexp '^version:\s+${pkgs.linuxPackages.drbd.version}$'") + ''; +} diff --git a/nixos/tests/drbd.nix b/nixos/tests/drbd.nix index 9d0a26a3252f..fb88362b10a2 100644 --- a/nixos/tests/drbd.nix +++ b/nixos/tests/drbd.nix @@ -1,93 +1,91 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - drbdPort = 7789; +{ pkgs, lib, ... }: +let + drbdPort = 7789; - drbdConfig = - { nodes, ... }: - { - virtualisation.emptyDiskImages = [ 1 ]; - networking.firewall.allowedTCPPorts = [ drbdPort ]; + drbdConfig = + { nodes, ... }: + { + virtualisation.emptyDiskImages = [ 1 ]; + networking.firewall.allowedTCPPorts = [ drbdPort ]; - services.drbd = { - enable = true; - config = '' - global { - usage-count yes; + services.drbd = { + enable = true; + config = '' + global { + usage-count yes; + } + + common { + net { + protocol C; + ping-int 1; + } + } + + resource r0 { + volume 0 { + device /dev/drbd0; + disk /dev/vdb; + meta-disk internal; } - common { - net { - protocol C; - ping-int 1; - } + on drbd1 { + address ${nodes.drbd1.networking.primaryIPAddress}:${toString drbdPort}; } - resource r0 { - volume 0 { - device /dev/drbd0; - disk /dev/vdb; - meta-disk internal; - } - - on drbd1 { - address ${nodes.drbd1.networking.primaryIPAddress}:${toString drbdPort}; - } - - on drbd2 { - address ${nodes.drbd2.networking.primaryIPAddress}:${toString drbdPort}; - } + on drbd2 { + address ${nodes.drbd2.networking.primaryIPAddress}:${toString drbdPort}; } - ''; - }; + } + ''; }; - in - { - name = "drbd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - ryantm - astro - birkb - ]; }; +in +{ + name = "drbd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + ryantm + astro + birkb + ]; + }; - nodes.drbd1 = drbdConfig; - nodes.drbd2 = drbdConfig; + nodes.drbd1 = drbdConfig; + nodes.drbd2 = drbdConfig; - testScript = - { nodes }: - '' - drbd1.start() - drbd2.start() + testScript = + { nodes }: + '' + drbd1.start() + drbd2.start() - drbd1.wait_for_unit("network.target") - drbd2.wait_for_unit("network.target") + drbd1.wait_for_unit("network.target") + drbd2.wait_for_unit("network.target") - drbd1.succeed( - "drbdadm create-md r0", - "drbdadm up r0", - "drbdadm primary r0 --force", - ) + drbd1.succeed( + "drbdadm create-md r0", + "drbdadm up r0", + "drbdadm primary r0 --force", + ) - drbd2.succeed("drbdadm create-md r0", "drbdadm up r0") + drbd2.succeed("drbdadm create-md r0", "drbdadm up r0") - drbd1.succeed( - "mkfs.ext4 /dev/drbd0", - "mkdir -p /mnt/drbd", - "mount /dev/drbd0 /mnt/drbd", - "touch /mnt/drbd/hello", - "umount /mnt/drbd", - "drbdadm secondary r0", - ) - drbd1.sleep(1) + drbd1.succeed( + "mkfs.ext4 /dev/drbd0", + "mkdir -p /mnt/drbd", + "mount /dev/drbd0 /mnt/drbd", + "touch /mnt/drbd/hello", + "umount /mnt/drbd", + "drbdadm secondary r0", + ) + drbd1.sleep(1) - drbd2.succeed( - "drbdadm primary r0", - "mkdir -p /mnt/drbd", - "mount /dev/drbd0 /mnt/drbd", - "ls /mnt/drbd/hello", - ) - ''; - } -) + drbd2.succeed( + "drbdadm primary r0", + "mkdir -p /mnt/drbd", + "mount /dev/drbd0 /mnt/drbd", + "ls /mnt/drbd/hello", + ) + ''; +} diff --git a/nixos/tests/dublin-traceroute.nix b/nixos/tests/dublin-traceroute.nix index cae8e4f894f3..51532ede15eb 100644 --- a/nixos/tests/dublin-traceroute.nix +++ b/nixos/tests/dublin-traceroute.nix @@ -3,75 +3,73 @@ # client on the inside network, a server on the outside network, and a # router connected to both that performs Network Address Translation # for the client. -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - routerBase = lib.mkMerge [ - { - virtualisation.vlans = [ - 2 - 1 - ]; - networking.nftables.enable = true; - networking.nat.internalIPs = [ "192.168.1.0/24" ]; - networking.nat.externalInterface = "eth1"; - } - ]; - in - { - name = "dublin-traceroute"; - meta = with pkgs.lib.maintainers; { - maintainers = [ baloo ]; +{ pkgs, lib, ... }: +let + routerBase = lib.mkMerge [ + { + virtualisation.vlans = [ + 2 + 1 + ]; + networking.nftables.enable = true; + networking.nat.internalIPs = [ "192.168.1.0/24" ]; + networking.nat.externalInterface = "eth1"; + } + ]; +in +{ + name = "dublin-traceroute"; + meta = with pkgs.lib.maintainers; { + maintainers = [ baloo ]; + }; + + nodes.client = + { nodes, ... }: + { + imports = [ ./common/user-account.nix ]; + virtualisation.vlans = [ 1 ]; + + networking.defaultGateway = + (builtins.head nodes.router.networking.interfaces.eth2.ipv4.addresses).address; + networking.nftables.enable = true; + + programs.dublin-traceroute.enable = true; }; - nodes.client = - { nodes, ... }: - { - imports = [ ./common/user-account.nix ]; - virtualisation.vlans = [ 1 ]; + nodes.router = + { ... }: + { + virtualisation.vlans = [ + 2 + 1 + ]; + networking.nftables.enable = true; + networking.nat.internalIPs = [ "192.168.1.0/24" ]; + networking.nat.externalInterface = "eth1"; + networking.nat.enable = true; + }; - networking.defaultGateway = - (builtins.head nodes.router.networking.interfaces.eth2.ipv4.addresses).address; - networking.nftables.enable = true; + nodes.server = + { ... }: + { + virtualisation.vlans = [ 2 ]; + networking.firewall.enable = false; + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + services.vsftpd.enable = true; + services.vsftpd.anonymousUser = true; + }; - programs.dublin-traceroute.enable = true; - }; + testScript = '' + client.start() + router.start() + server.start() - nodes.router = - { ... }: - { - virtualisation.vlans = [ - 2 - 1 - ]; - networking.nftables.enable = true; - networking.nat.internalIPs = [ "192.168.1.0/24" ]; - networking.nat.externalInterface = "eth1"; - networking.nat.enable = true; - }; + server.wait_for_unit("network.target") + router.wait_for_unit("network.target") + client.wait_for_unit("network.target") - nodes.server = - { ... }: - { - virtualisation.vlans = [ 2 ]; - networking.firewall.enable = false; - services.httpd.enable = true; - services.httpd.adminAddr = "foo@example.org"; - services.vsftpd.enable = true; - services.vsftpd.anonymousUser = true; - }; - - testScript = '' - client.start() - router.start() - server.start() - - server.wait_for_unit("network.target") - router.wait_for_unit("network.target") - client.wait_for_unit("network.target") - - # Make sure we can trace from an unprivileged user - client.succeed("sudo -u alice dublin-traceroute server") - ''; - } -) + # Make sure we can trace from an unprivileged user + client.succeed("sudo -u alice dublin-traceroute server") + ''; +} diff --git a/nixos/tests/ecryptfs.nix b/nixos/tests/ecryptfs.nix index ceeeb1b15da2..e0deaa7e5013 100644 --- a/nixos/tests/ecryptfs.nix +++ b/nixos/tests/ecryptfs.nix @@ -1,89 +1,87 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "ecryptfs"; +{ ... }: +{ + name = "ecryptfs"; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/user-account.nix ]; - boot.kernelModules = [ "ecryptfs" ]; - security.pam.enableEcryptfs = true; - environment.systemPackages = with pkgs; [ keyutils ]; - }; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/user-account.nix ]; + boot.kernelModules = [ "ecryptfs" ]; + security.pam.enableEcryptfs = true; + environment.systemPackages = with pkgs; [ keyutils ]; + }; - testScript = '' - def login_as_alice(): - machine.wait_until_tty_matches("1", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("1", "Password: ") - machine.send_chars("foobar\n") - machine.wait_until_tty_matches("1", "alice\@machine") + testScript = '' + def login_as_alice(): + machine.wait_until_tty_matches("1", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("1", "Password: ") + machine.send_chars("foobar\n") + machine.wait_until_tty_matches("1", "alice\@machine") - def logout(): - machine.send_chars("logout\n") - machine.wait_until_tty_matches("1", "login: ") + def logout(): + machine.send_chars("logout\n") + machine.wait_until_tty_matches("1", "login: ") - machine.wait_for_unit("default.target") + machine.wait_for_unit("default.target") - with subtest("Set alice up with a password and a home"): - machine.succeed("(echo foobar; echo foobar) | passwd alice") - machine.succeed("chown -R alice.users ~alice") + with subtest("Set alice up with a password and a home"): + machine.succeed("(echo foobar; echo foobar) | passwd alice") + machine.succeed("chown -R alice.users ~alice") - with subtest("Migrate alice's home"): - out = machine.succeed("echo foobar | ecryptfs-migrate-home -u alice") - machine.log(f"ecryptfs-migrate-home said: {out}") + with subtest("Migrate alice's home"): + out = machine.succeed("echo foobar | ecryptfs-migrate-home -u alice") + machine.log(f"ecryptfs-migrate-home said: {out}") - with subtest("Log alice in (ecryptfs passwhrase is wrapped during first login)"): - login_as_alice() - machine.send_chars("logout\n") - machine.wait_until_tty_matches("1", "login: ") + with subtest("Log alice in (ecryptfs passwhrase is wrapped during first login)"): + login_as_alice() + machine.send_chars("logout\n") + machine.wait_until_tty_matches("1", "login: ") - # Why do I need to do this?? - machine.succeed("su alice -c ecryptfs-umount-private || true") - machine.sleep(1) + # Why do I need to do this?? + machine.succeed("su alice -c ecryptfs-umount-private || true") + machine.sleep(1) - with subtest("check that encrypted home is not mounted"): - machine.fail("mount | grep ecryptfs") + with subtest("check that encrypted home is not mounted"): + machine.fail("mount | grep ecryptfs") - with subtest("Show contents of the user keyring"): - out = machine.succeed("su - alice -c 'keyctl list \@u'") - machine.log(f"keyctl unlink said: {out}") + with subtest("Show contents of the user keyring"): + out = machine.succeed("su - alice -c 'keyctl list \@u'") + machine.log(f"keyctl unlink said: {out}") - with subtest("Log alice again"): - login_as_alice() + with subtest("Log alice again"): + login_as_alice() - with subtest("Create some files in encrypted home"): - machine.succeed("su alice -c 'touch ~alice/a'") - machine.succeed("su alice -c 'echo c > ~alice/b'") + with subtest("Create some files in encrypted home"): + machine.succeed("su alice -c 'touch ~alice/a'") + machine.succeed("su alice -c 'echo c > ~alice/b'") - with subtest("Logout"): - logout() + with subtest("Logout"): + logout() - # Why do I need to do this?? - machine.succeed("su alice -c ecryptfs-umount-private || true") - machine.sleep(1) + # Why do I need to do this?? + machine.succeed("su alice -c ecryptfs-umount-private || true") + machine.sleep(1) - with subtest("Check that the filesystem is not accessible"): - machine.fail("mount | grep ecryptfs") - machine.succeed("su alice -c 'test \! -f ~alice/a'") - machine.succeed("su alice -c 'test \! -f ~alice/b'") + with subtest("Check that the filesystem is not accessible"): + machine.fail("mount | grep ecryptfs") + machine.succeed("su alice -c 'test \! -f ~alice/a'") + machine.succeed("su alice -c 'test \! -f ~alice/b'") - with subtest("Log alice once more"): - login_as_alice() + with subtest("Log alice once more"): + login_as_alice() - with subtest("Check that the files are there"): - machine.sleep(1) - machine.succeed("su alice -c 'test -f ~alice/a'") - machine.succeed("su alice -c 'test -f ~alice/b'") - machine.succeed('test "$(cat ~alice/b)" = "c"') + with subtest("Check that the files are there"): + machine.sleep(1) + machine.succeed("su alice -c 'test -f ~alice/a'") + machine.succeed("su alice -c 'test -f ~alice/b'") + machine.succeed('test "$(cat ~alice/b)" = "c"') - with subtest("Catch https://github.com/NixOS/nixpkgs/issues/16766"): - machine.succeed("su alice -c 'ls -lh ~alice/'") + with subtest("Catch https://github.com/NixOS/nixpkgs/issues/16766"): + machine.succeed("su alice -c 'ls -lh ~alice/'") - logout() - ''; - } -) + logout() + ''; +} diff --git a/nixos/tests/endlessh-go.nix b/nixos/tests/endlessh-go.nix index 0c2402af2568..892d54af6101 100644 --- a/nixos/tests/endlessh-go.nix +++ b/nixos/tests/endlessh-go.nix @@ -1,71 +1,69 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "endlessh-go"; - meta.maintainers = with lib.maintainers; [ azahi ]; +{ lib, pkgs, ... }: +{ + name = "endlessh-go"; + meta.maintainers = with lib.maintainers; [ azahi ]; - nodes = { - server = - { ... }: - { - services.endlessh-go = { - enable = true; - prometheus.enable = true; - openFirewall = true; + nodes = { + server = + { ... }: + { + services.endlessh-go = { + enable = true; + prometheus.enable = true; + openFirewall = true; + }; + + specialisation = { + unprivileged.configuration = { + services.endlessh-go = { + port = 2222; + prometheus.port = 9229; + }; }; - specialisation = { - unprivileged.configuration = { - services.endlessh-go = { - port = 2222; - prometheus.port = 9229; - }; - }; - - privileged.configuration = { - services.endlessh-go = { - port = 22; - prometheus.port = 92; - }; + privileged.configuration = { + services.endlessh-go = { + port = 22; + prometheus.port = 92; }; }; }; + }; - client = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - curl - netcat - ]; - }; - }; + client = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + curl + netcat + ]; + }; + }; - testScript = '' - def activate_specialisation(name: str): - server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2") + testScript = '' + def activate_specialisation(name: str): + server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2") - start_all() + start_all() - with subtest("Unprivileged"): - activate_specialisation("unprivileged") - server.wait_for_unit("endlessh-go.service") - server.wait_for_open_port(2222) - server.wait_for_open_port(9229) - server.fail("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total") - client.succeed("nc -dvW5 server 2222") - server.succeed("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total") - client.fail("curl -sSfm 5 server:9229/metrics") + with subtest("Unprivileged"): + activate_specialisation("unprivileged") + server.wait_for_unit("endlessh-go.service") + server.wait_for_open_port(2222) + server.wait_for_open_port(9229) + server.fail("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total") + client.succeed("nc -dvW5 server 2222") + server.succeed("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total") + client.fail("curl -sSfm 5 server:9229/metrics") - with subtest("Privileged"): - activate_specialisation("privileged") - server.wait_for_unit("endlessh-go.service") - server.wait_for_open_port(22) - server.wait_for_open_port(92) - server.fail("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total") - client.succeed("nc -dvW5 server 22") - server.succeed("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total") - client.fail("curl -sSfm 5 server:92/metrics") - ''; - } -) + with subtest("Privileged"): + activate_specialisation("privileged") + server.wait_for_unit("endlessh-go.service") + server.wait_for_open_port(22) + server.wait_for_open_port(92) + server.fail("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total") + client.succeed("nc -dvW5 server 22") + server.succeed("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total") + client.fail("curl -sSfm 5 server:92/metrics") + ''; +} diff --git a/nixos/tests/endlessh.nix b/nixos/tests/endlessh.nix index 696ef1b6013f..95cdbf8586c2 100644 --- a/nixos/tests/endlessh.nix +++ b/nixos/tests/endlessh.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "endlessh"; - meta.maintainers = with lib.maintainers; [ azahi ]; +{ lib, pkgs, ... }: +{ + name = "endlessh"; + meta.maintainers = with lib.maintainers; [ azahi ]; - nodes = { - server = - { ... }: - { - services.endlessh = { - enable = true; - openFirewall = true; - }; - - specialisation = { - unprivileged.configuration.services.endlessh.port = 2222; - - privileged.configuration.services.endlessh.port = 22; - }; + nodes = { + server = + { ... }: + { + services.endlessh = { + enable = true; + openFirewall = true; }; - client = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - curl - netcat - ]; + specialisation = { + unprivileged.configuration.services.endlessh.port = 2222; + + privileged.configuration.services.endlessh.port = 22; }; - }; + }; - testScript = '' - def activate_specialisation(name: str): - server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2") + client = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + curl + netcat + ]; + }; + }; - start_all() + testScript = '' + def activate_specialisation(name: str): + server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2") - with subtest("Unprivileged"): - activate_specialisation("unprivileged") - server.wait_for_unit("endlessh.service") - server.wait_for_open_port(2222) - client.succeed("nc -dvW5 server 2222") + start_all() - with subtest("Privileged"): - activate_specialisation("privileged") - server.wait_for_unit("endlessh.service") - server.wait_for_open_port(22) - client.succeed("nc -dvW5 server 22") - ''; - } -) + with subtest("Unprivileged"): + activate_specialisation("unprivileged") + server.wait_for_unit("endlessh.service") + server.wait_for_open_port(2222) + client.succeed("nc -dvW5 server 2222") + + with subtest("Privileged"): + activate_specialisation("privileged") + server.wait_for_unit("endlessh.service") + server.wait_for_open_port(22) + client.succeed("nc -dvW5 server 22") + ''; +} diff --git a/nixos/tests/engelsystem.nix b/nixos/tests/engelsystem.nix index 69187973ada9..a0eaff6bb1fe 100644 --- a/nixos/tests/engelsystem.nix +++ b/nixos/tests/engelsystem.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "engelsystem"; - meta = with pkgs.lib.maintainers; { - maintainers = [ talyz ]; +{ pkgs, lib, ... }: +{ + name = "engelsystem"; + meta = with pkgs.lib.maintainers; { + maintainers = [ talyz ]; + }; + + nodes.engelsystem = + { ... }: + { + services.engelsystem = { + enable = true; + domain = "engelsystem"; + createDatabase = true; + }; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; + environment.systemPackages = with pkgs; [ + xmlstarlet + libxml2 + ]; }; - nodes.engelsystem = - { ... }: - { - services.engelsystem = { - enable = true; - domain = "engelsystem"; - createDatabase = true; - }; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; - environment.systemPackages = with pkgs; [ - xmlstarlet - libxml2 - ]; - }; - - testScript = '' - engelsystem.start() - engelsystem.wait_for_unit("phpfpm-engelsystem.service") - engelsystem.wait_until_succeeds("curl engelsystem/login -sS -f") - engelsystem.succeed( - "curl engelsystem/login -sS -f -c cookie | xmllint -html -xmlout - >login" - ) - engelsystem.succeed( - "xml sel -T -t -m \"html/head/meta[@name='csrf-token']\" -v @content login >token" - ) - engelsystem.succeed( - "curl engelsystem/login -sS -f -b cookie -F 'login=admin' -F 'password=asdfasdf' -F '_token=news" - ) - engelsystem.succeed( - "test 'News - Engelsystem' = \"$(xml sel -T -t -c html/head/title news)\"" - ) - ''; - } -) + testScript = '' + engelsystem.start() + engelsystem.wait_for_unit("phpfpm-engelsystem.service") + engelsystem.wait_until_succeeds("curl engelsystem/login -sS -f") + engelsystem.succeed( + "curl engelsystem/login -sS -f -c cookie | xmllint -html -xmlout - >login" + ) + engelsystem.succeed( + "xml sel -T -t -m \"html/head/meta[@name='csrf-token']\" -v @content login >token" + ) + engelsystem.succeed( + "curl engelsystem/login -sS -f -b cookie -F 'login=admin' -F 'password=asdfasdf' -F '_token=news" + ) + engelsystem.succeed( + "test 'News - Engelsystem' = \"$(xml sel -T -t -c html/head/title news)\"" + ) + ''; +} diff --git a/nixos/tests/enlightenment.nix b/nixos/tests/enlightenment.nix index f2fc23685dd6..77e731feddbf 100644 --- a/nixos/tests/enlightenment.nix +++ b/nixos/tests/enlightenment.nix @@ -1,104 +1,102 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "enlightenment"; +{ pkgs, ... }: +{ + name = "enlightenment"; - meta = with pkgs.lib.maintainers; { - maintainers = [ romildo ]; - timeout = 600; - # OCR tests are flaky - broken = true; + meta = with pkgs.lib.maintainers; { + maintainers = [ romildo ]; + timeout = 600; + # OCR tests are flaky + broken = true; + }; + + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.xserver.desktopManager.enlightenment.enable = true; + services.xserver.displayManager = { + lightdm.enable = true; + autoLogin = { + enable = true; + user = "alice"; + }; + }; + environment.systemPackages = [ pkgs.xdotool ]; + services.acpid.enable = true; + services.connman.enable = true; + services.connman.package = pkgs.connmanMinimal; }; - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.xserver.desktopManager.enlightenment.enable = true; - services.xserver.displayManager = { - lightdm.enable = true; - autoLogin = { - enable = true; - user = "alice"; - }; - }; - environment.systemPackages = [ pkgs.xdotool ]; - services.acpid.enable = true; - services.connman.enable = true; - services.connman.package = pkgs.connmanMinimal; - }; + enableOCR = true; - enableOCR = true; + testScript = + { nodes, ... }: + let + user = nodes.machine.config.users.users.alice; + in + '' + with subtest("Ensure x starts"): + machine.wait_for_x() + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") - testScript = - { nodes, ... }: - let - user = nodes.machine.config.users.users.alice; - in - '' - with subtest("Ensure x starts"): - machine.wait_for_x() - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + with subtest("First time wizard"): + machine.wait_for_text("Default") # Language + machine.screenshot("wizard1") + machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.screenshot("wizard2") - with subtest("First time wizard"): - machine.wait_for_text("Default") # Language - machine.screenshot("wizard1") - machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.screenshot("wizard2") + machine.wait_for_text("English") # Keyboard (default) + machine.screenshot("wizard3") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("English") # Keyboard (default) - machine.screenshot("wizard3") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("Standard") # Profile (default) + machine.screenshot("wizard4") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("Standard") # Profile (default) - machine.screenshot("wizard4") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("Title") # Sizing (default) + machine.screenshot("wizard5") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("Title") # Sizing (default) - machine.screenshot("wizard5") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("clicked") # Windows Focus + machine.succeed("xdotool mousemove 512 370 click 1") # Click + machine.screenshot("wizard6") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("clicked") # Windows Focus - machine.succeed("xdotool mousemove 512 370 click 1") # Click - machine.screenshot("wizard6") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("Connman") # Network Management (default) + machine.screenshot("wizard7") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("Connman") # Network Management (default) - machine.screenshot("wizard7") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("BlusZ") # Bluetooth Management (default) + machine.screenshot("wizard8") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("BlusZ") # Bluetooth Management (default) - machine.screenshot("wizard8") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("OpenGL") # Compositing (default) + machine.screenshot("wizard9") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("OpenGL") # Compositing (default) - machine.screenshot("wizard9") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("update") # Updates + machine.succeed("xdotool mousemove 512 495 click 1") # Disable + machine.screenshot("wizard10") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("update") # Updates - machine.succeed("xdotool mousemove 512 495 click 1") # Disable - machine.screenshot("wizard10") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("taskbar") # Taskbar + machine.succeed("xdotool mousemove 480 410 click 1") # Enable + machine.screenshot("wizard11") + machine.succeed("xdotool mousemove 512 740 click 1") # Next - machine.wait_for_text("taskbar") # Taskbar - machine.succeed("xdotool mousemove 480 410 click 1") # Enable - machine.screenshot("wizard11") - machine.succeed("xdotool mousemove 512 740 click 1") # Next + machine.wait_for_text("Home") # The desktop + machine.screenshot("wizard12") - machine.wait_for_text("Home") # The desktop - machine.screenshot("wizard12") - - with subtest("Run Terminology"): - machine.succeed("terminology >&2 &") - machine.sleep(5) - machine.send_chars("ls --color -alF\n") - machine.sleep(2) - machine.screenshot("terminology") - ''; - } -) + with subtest("Run Terminology"): + machine.succeed("terminology >&2 &") + machine.sleep(5) + machine.send_chars("ls --color -alF\n") + machine.sleep(2) + machine.screenshot("terminology") + ''; +} diff --git a/nixos/tests/env.nix b/nixos/tests/env.nix index 5636a423469a..6e94771b1ff1 100644 --- a/nixos/tests/env.nix +++ b/nixos/tests/env.nix @@ -1,49 +1,47 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "environment"; - meta = with pkgs.lib.maintainers; { - maintainers = [ nequissimus ]; - }; +{ pkgs, ... }: +{ + name = "environment"; + meta = with pkgs.lib.maintainers; { + maintainers = [ nequissimus ]; + }; - nodes.machine = - { pkgs, lib, ... }: - lib.mkMerge [ - { - boot.kernelPackages = pkgs.linuxPackages; - environment.etc.plainFile.text = '' - Hello World - ''; - environment.etc."folder/with/file".text = '' - Foo Bar! - ''; + nodes.machine = + { pkgs, lib, ... }: + lib.mkMerge [ + { + boot.kernelPackages = pkgs.linuxPackages; + environment.etc.plainFile.text = '' + Hello World + ''; + environment.etc."folder/with/file".text = '' + Foo Bar! + ''; - environment.sessionVariables = { - TERMINFO_DIRS = "/run/current-system/sw/share/terminfo"; - NIXCON = "awesome"; - SHOULD_NOT_BE_SET = "oops"; - }; - } - { - environment.sessionVariables = { - SHOULD_NOT_BE_SET = lib.mkForce null; - }; - } - ]; + environment.sessionVariables = { + TERMINFO_DIRS = "/run/current-system/sw/share/terminfo"; + NIXCON = "awesome"; + SHOULD_NOT_BE_SET = "oops"; + }; + } + { + environment.sessionVariables = { + SHOULD_NOT_BE_SET = lib.mkForce null; + }; + } + ]; - testScript = '' - machine.succeed('[ -L "/etc/plainFile" ]') - assert "Hello World" in machine.succeed('cat "/etc/plainFile"') - machine.succeed('[ -d "/etc/folder" ]') - machine.succeed('[ -d "/etc/folder/with" ]') - machine.succeed('[ -L "/etc/folder/with/file" ]') - assert "Hello World" in machine.succeed('cat "/etc/plainFile"') + testScript = '' + machine.succeed('[ -L "/etc/plainFile" ]') + assert "Hello World" in machine.succeed('cat "/etc/plainFile"') + machine.succeed('[ -d "/etc/folder" ]') + machine.succeed('[ -d "/etc/folder/with" ]') + machine.succeed('[ -L "/etc/folder/with/file" ]') + assert "Hello World" in machine.succeed('cat "/etc/plainFile"') - assert "/run/current-system/sw/share/terminfo" in machine.succeed( - "echo ''${TERMINFO_DIRS}" - ) - assert "awesome" in machine.succeed("echo ''${NIXCON}") - machine.fail("printenv SHOULD_NOT_BE_SET") - ''; - } -) + assert "/run/current-system/sw/share/terminfo" in machine.succeed( + "echo ''${TERMINFO_DIRS}" + ) + assert "awesome" in machine.succeed("echo ''${NIXCON}") + machine.fail("printenv SHOULD_NOT_BE_SET") + ''; +} diff --git a/nixos/tests/envfs.nix b/nixos/tests/envfs.nix index b9067467eb2b..6e8ee7e55aea 100644 --- a/nixos/tests/envfs.nix +++ b/nixos/tests/envfs.nix @@ -1,42 +1,40 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - pythonShebang = pkgs.writeScript "python-shebang" '' - #!/usr/bin/python - print("OK") - ''; +{ lib, pkgs, ... }: +let + pythonShebang = pkgs.writeScript "python-shebang" '' + #!/usr/bin/python + print("OK") + ''; - bashShebang = pkgs.writeScript "bash-shebang" '' - #!/usr/bin/bash - echo "OK" - ''; - in - { - name = "envfs"; - nodes.machine.services.envfs.enable = true; + bashShebang = pkgs.writeScript "bash-shebang" '' + #!/usr/bin/bash + echo "OK" + ''; +in +{ + name = "envfs"; + nodes.machine.services.envfs.enable = true; - testScript = '' - start_all() - machine.wait_until_succeeds("mountpoint -q /usr/bin/") - machine.succeed( - "PATH=${pkgs.coreutils}/bin /usr/bin/cp --version", - # check fallback paths - "PATH= /usr/bin/sh --version", - "PATH= /usr/bin/env --version", - "PATH= test -e /usr/bin/sh", - "PATH= test -e /usr/bin/env", - # also picks up PATH that was set after execve - "! /usr/bin/hello", - "PATH=${pkgs.hello}/bin /usr/bin/hello", - ) + testScript = '' + start_all() + machine.wait_until_succeeds("mountpoint -q /usr/bin/") + machine.succeed( + "PATH=${pkgs.coreutils}/bin /usr/bin/cp --version", + # check fallback paths + "PATH= /usr/bin/sh --version", + "PATH= /usr/bin/env --version", + "PATH= test -e /usr/bin/sh", + "PATH= test -e /usr/bin/env", + # also picks up PATH that was set after execve + "! /usr/bin/hello", + "PATH=${pkgs.hello}/bin /usr/bin/hello", + ) - out = machine.succeed("PATH=${pkgs.python3}/bin ${pythonShebang}") - print(out) - assert out == "OK\n" + out = machine.succeed("PATH=${pkgs.python3}/bin ${pythonShebang}") + print(out) + assert out == "OK\n" - out = machine.succeed("PATH=${pkgs.bash}/bin ${bashShebang}") - print(out) - assert out == "OK\n" - ''; - } -) + out = machine.succeed("PATH=${pkgs.bash}/bin ${bashShebang}") + print(out) + assert out == "OK\n" + ''; +} diff --git a/nixos/tests/ergo.nix b/nixos/tests/ergo.nix index fadf44589906..816bec0d3ac4 100644 --- a/nixos/tests/ergo.nix +++ b/nixos/tests/ergo.nix @@ -1,23 +1,21 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "ergo"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mmahut ]; - }; +{ pkgs, ... }: +{ + name = "ergo"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mmahut ]; + }; - nodes = { - machine = - { ... }: - { - services.ergo.enable = true; - services.ergo.api.keyHash = "324dcf027dd4a30a932c441f365a25e86b173defa4b8e58948253471b81b72cf"; - }; - }; + nodes = { + machine = + { ... }: + { + services.ergo.enable = true; + services.ergo.api.keyHash = "324dcf027dd4a30a932c441f365a25e86b173defa4b8e58948253471b81b72cf"; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("ergo.service") - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("ergo.service") + ''; +} diff --git a/nixos/tests/ergochat.nix b/nixos/tests/ergochat.nix index 6dd9efbb2ccf..30f262bf5569 100644 --- a/nixos/tests/ergochat.nix +++ b/nixos/tests/ergochat.nix @@ -9,100 +9,98 @@ let iiDir = "/tmp/irc"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "ergochat"; - nodes = - { - "${server}" = { - networking.firewall.allowedTCPPorts = [ ircPort ]; - services.ergochat = { - enable = true; - settings.server.motd = pkgs.writeText "ergo.motd" '' - The default MOTD doesn't contain the word "nixos" in it. - This one does. - ''; - }; +{ pkgs, lib, ... }: +{ + name = "ergochat"; + nodes = + { + "${server}" = { + networking.firewall.allowedTCPPorts = [ ircPort ]; + services.ergochat = { + enable = true; + settings.server.motd = pkgs.writeText "ergo.motd" '' + The default MOTD doesn't contain the word "nixos" in it. + This one does. + ''; }; - } - // lib.listToAttrs ( - builtins.map ( - client: - lib.nameValuePair client { - imports = [ - ./common/user-account.nix - ]; + }; + } + // lib.listToAttrs ( + builtins.map ( + client: + lib.nameValuePair client { + imports = [ + ./common/user-account.nix + ]; - systemd.services.ii = { - requires = [ "network.target" ]; - wantedBy = [ "default.target" ]; + systemd.services.ii = { + requires = [ "network.target" ]; + wantedBy = [ "default.target" ]; - serviceConfig = { - Type = "simple"; - ExecPreStartPre = "mkdir -p ${iiDir}"; - ExecStart = '' - ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir} - ''; - User = "alice"; - }; + serviceConfig = { + Type = "simple"; + ExecPreStartPre = "mkdir -p ${iiDir}"; + ExecStart = '' + ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir} + ''; + User = "alice"; }; - } - ) clients - ); + }; + } + ) clients + ); - testScript = - let - msg = client: "Hello, my name is ${client}"; - clientScript = - client: - [ - '' - ${client}.wait_for_unit("network.target") - ${client}.systemctl("start ii") - ${client}.wait_for_unit("ii") - ${client}.wait_for_file("${iiDir}/${server}/out") - '' - # look for the custom text in the MOTD. - '' - ${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out") - '' - # wait until first PING from server arrives before joining, - # so we don't try it too early - '' - ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out") - '' - # join ${channel} - '' - ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in") - ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in") - '' - # send a greeting - '' - ${client}.succeed( - "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in" - ) - '' - # check that all greetings arrived on all clients - ] - ++ builtins.map (other: '' + testScript = + let + msg = client: "Hello, my name is ${client}"; + clientScript = + client: + [ + '' + ${client}.wait_for_unit("network.target") + ${client}.systemctl("start ii") + ${client}.wait_for_unit("ii") + ${client}.wait_for_file("${iiDir}/${server}/out") + '' + # look for the custom text in the MOTD. + '' + ${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out") + '' + # wait until first PING from server arrives before joining, + # so we don't try it too early + '' + ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out") + '' + # join ${channel} + '' + ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in") + ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in") + '' + # send a greeting + '' ${client}.succeed( - "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out" + "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in" ) - '') clients; + '' + # check that all greetings arrived on all clients + ] + ++ builtins.map (other: '' + ${client}.succeed( + "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out" + ) + '') clients; - # foldl', but requires a non-empty list instead of a start value - reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list); - in - '' - start_all() - ${server}.systemctl("status ergochat") - ${server}.wait_for_open_port(${toString ircPort}) + # foldl', but requires a non-empty list instead of a start value + reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list); + in + '' + start_all() + ${server}.systemctl("status ergochat") + ${server}.wait_for_open_port(${toString ircPort}) - # run clientScript for all clients so that every list - # entry is executed by every client before advancing - # to the next one. - '' - + lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients)); - } -) + # run clientScript for all clients so that every list + # entry is executed by every client before advancing + # to the next one. + '' + + lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients)); +} diff --git a/nixos/tests/eris-server.nix b/nixos/tests/eris-server.nix index 795be7f5086c..55d02c1a9803 100644 --- a/nixos/tests/eris-server.nix +++ b/nixos/tests/eris-server.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "eris-server"; - meta.maintainers = with lib.maintainers; [ ehmry ]; +{ pkgs, lib, ... }: +{ + name = "eris-server"; + meta.maintainers = with lib.maintainers; [ ehmry ]; - nodes.server = { - environment.systemPackages = [ - pkgs.eris-go - pkgs.eriscmd - ]; - services.eris-server = { - enable = true; - decode = true; - listenHttp = "[::1]:80"; - backends = [ "badger+file:///var/cache/eris.badger?get&put" ]; - mountpoint = "/eris"; - }; + nodes.server = { + environment.systemPackages = [ + pkgs.eris-go + pkgs.eriscmd + ]; + services.eris-server = { + enable = true; + decode = true; + listenHttp = "[::1]:80"; + backends = [ "badger+file:///var/cache/eris.badger?get&put" ]; + mountpoint = "/eris"; }; + }; - testScript = '' - start_all() - server.wait_for_unit("eris-server.service") - server.wait_for_open_port(5683) - server.wait_for_open_port(80) - server.succeed("eriscmd get http://[::1] $(echo 'Hail ERIS!' | eriscmd put coap+tcp://[::1]:5683)") - ''; - } -) + testScript = '' + start_all() + server.wait_for_unit("eris-server.service") + server.wait_for_open_port(5683) + server.wait_for_open_port(80) + server.succeed("eriscmd get http://[::1] $(echo 'Hail ERIS!' | eriscmd put coap+tcp://[::1]:5683)") + ''; +} diff --git a/nixos/tests/esphome.nix b/nixos/tests/esphome.nix index 4fe0a9303ce8..8a84213f971d 100644 --- a/nixos/tests/esphome.nix +++ b/nixos/tests/esphome.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - testPort = 6052; - unixSocket = "/run/esphome/esphome.sock"; - in - { - name = "esphome"; - meta.maintainers = with lib.maintainers; [ oddlama ]; +let + testPort = 6052; + unixSocket = "/run/esphome/esphome.sock"; +in +{ + name = "esphome"; + meta.maintainers = with lib.maintainers; [ oddlama ]; - nodes = { - esphomeTcp = - { ... }: - { - services.esphome = { - enable = true; - port = testPort; - address = "0.0.0.0"; - openFirewall = true; - }; + nodes = { + esphomeTcp = + { ... }: + { + services.esphome = { + enable = true; + port = testPort; + address = "0.0.0.0"; + openFirewall = true; }; + }; - esphomeUnix = - { ... }: - { - services.esphome = { - enable = true; - enableUnixSocket = true; - }; + esphomeUnix = + { ... }: + { + services.esphome = { + enable = true; + enableUnixSocket = true; }; - }; + }; + }; - testScript = '' - esphomeTcp.wait_for_unit("esphome.service") - esphomeTcp.wait_for_open_port(${toString testPort}) - esphomeTcp.succeed("curl --fail http://localhost:${toString testPort}/") + testScript = '' + esphomeTcp.wait_for_unit("esphome.service") + esphomeTcp.wait_for_open_port(${toString testPort}) + esphomeTcp.succeed("curl --fail http://localhost:${toString testPort}/") - esphomeUnix.wait_for_unit("esphome.service") - esphomeUnix.wait_for_file("${unixSocket}") - esphomeUnix.succeed("curl --fail --unix-socket ${unixSocket} http://localhost/") - ''; - } -) + esphomeUnix.wait_for_unit("esphome.service") + esphomeUnix.wait_for_file("${unixSocket}") + esphomeUnix.succeed("curl --fail --unix-socket ${unixSocket} http://localhost/") + ''; +} diff --git a/nixos/tests/etebase-server.nix b/nixos/tests/etebase-server.nix index 110338ef8915..4d848b8b0a06 100644 --- a/nixos/tests/etebase-server.nix +++ b/nixos/tests/etebase-server.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - dataDir = "/var/lib/foobar"; +let + dataDir = "/var/lib/foobar"; - in - { - name = "etebase-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ felschr ]; +in +{ + name = "etebase-server"; + meta = with pkgs.lib.maintainers; { + maintainers = [ felschr ]; + }; + + nodes.machine = + { pkgs, ... }: + { + services.etebase-server = { + inherit dataDir; + enable = true; + settings.global.secret_file = toString (pkgs.writeText "secret" "123456"); + }; }; - nodes.machine = - { pkgs, ... }: - { - services.etebase-server = { - inherit dataDir; - enable = true; - settings.global.secret_file = toString (pkgs.writeText "secret" "123456"); - }; - }; + testScript = '' + machine.wait_for_unit("etebase-server.service") + machine.wait_for_open_port(8001) - testScript = '' - machine.wait_for_unit("etebase-server.service") - machine.wait_for_open_port(8001) + with subtest("Database & src-version were created"): + machine.wait_for_file("${dataDir}/src-version") + assert ( + "${pkgs.etebase-server}" + in machine.succeed("cat ${dataDir}/src-version") + ) + machine.wait_for_file("${dataDir}/db.sqlite3") + machine.wait_for_file("${dataDir}/static") - with subtest("Database & src-version were created"): - machine.wait_for_file("${dataDir}/src-version") - assert ( - "${pkgs.etebase-server}" - in machine.succeed("cat ${dataDir}/src-version") - ) - machine.wait_for_file("${dataDir}/db.sqlite3") - machine.wait_for_file("${dataDir}/static") + with subtest("Only allow access from allowed_hosts"): + machine.succeed("curl -sSfL http://0.0.0.0:8001/") + machine.fail("curl -sSfL http://127.0.0.1:8001/") + machine.fail("curl -sSfL http://localhost:8001/") - with subtest("Only allow access from allowed_hosts"): - machine.succeed("curl -sSfL http://0.0.0.0:8001/") - machine.fail("curl -sSfL http://127.0.0.1:8001/") - machine.fail("curl -sSfL http://localhost:8001/") + with subtest("Run tests"): + machine.succeed("etebase-server check") + machine.succeed("etebase-server test") - with subtest("Run tests"): - machine.succeed("etebase-server check") - machine.succeed("etebase-server test") - - with subtest("Create superuser"): - machine.succeed( - "etebase-server createsuperuser --no-input --username admin --email root@localhost" - ) - ''; - } -) + with subtest("Create superuser"): + machine.succeed( + "etebase-server createsuperuser --no-input --username admin --email root@localhost" + ) + ''; +} diff --git a/nixos/tests/etesync-dav.nix b/nixos/tests/etesync-dav.nix index c8f86a84e371..feff9030f2a9 100644 --- a/nixos/tests/etesync-dav.nix +++ b/nixos/tests/etesync-dav.nix @@ -1,28 +1,26 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { +{ pkgs, ... }: +{ - name = "etesync-dav"; - meta = with pkgs.lib.maintainers; { - maintainers = [ _3699n ]; + name = "etesync-dav"; + meta = with pkgs.lib.maintainers; { + maintainers = [ _3699n ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + environment.systemPackages = [ + pkgs.curl + pkgs.etesync-dav + ]; }; - nodes.machine = - { config, pkgs, ... }: - { - environment.systemPackages = [ - pkgs.curl - pkgs.etesync-dav - ]; - }; - - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed("etesync-dav --version") - machine.execute("etesync-dav >&2 &") - machine.wait_for_open_port(37358) - with subtest("Check that the web interface is accessible"): - assert "Add User" in machine.succeed("curl -s http://localhost:37358/.web/add/") - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed("etesync-dav --version") + machine.execute("etesync-dav >&2 &") + machine.wait_for_open_port(37358) + with subtest("Check that the web interface is accessible"): + assert "Add User" in machine.succeed("curl -s http://localhost:37358/.web/add/") + ''; +} diff --git a/nixos/tests/fakeroute.nix b/nixos/tests/fakeroute.nix index 24919d6b242c..3886b976f1c7 100644 --- a/nixos/tests/fakeroute.nix +++ b/nixos/tests/fakeroute.nix @@ -1,27 +1,25 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "fakeroute"; - meta.maintainers = with lib.maintainers; [ rnhmjoj ]; +{ lib, pkgs, ... }: +{ + name = "fakeroute"; + meta.maintainers = with lib.maintainers; [ rnhmjoj ]; - nodes.machine = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - services.fakeroute.enable = true; - services.fakeroute.route = [ - "216.102.187.130" - "4.0.1.122" - "198.116.142.34" - "63.199.8.242" - ]; - environment.systemPackages = [ pkgs.traceroute ]; - }; + nodes.machine = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + services.fakeroute.enable = true; + services.fakeroute.route = [ + "216.102.187.130" + "4.0.1.122" + "198.116.142.34" + "63.199.8.242" + ]; + environment.systemPackages = [ pkgs.traceroute ]; + }; - testScript = '' - start_all() - machine.wait_for_unit("fakeroute.service") - machine.succeed("traceroute 127.0.0.1 | grep -q 216.102.187.130") - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("fakeroute.service") + machine.succeed("traceroute 127.0.0.1 | grep -q 216.102.187.130") + ''; +} diff --git a/nixos/tests/fanout.nix b/nixos/tests/fanout.nix index a6ee1b8120b6..8a75d51e07a6 100644 --- a/nixos/tests/fanout.nix +++ b/nixos/tests/fanout.nix @@ -3,36 +3,34 @@ config ? { }, pkgs ? import ../.. { inherit system config; }, }: -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "fanout"; - meta.maintainers = [ lib.maintainers.therishidesai ]; +{ lib, pkgs, ... }: +{ + name = "fanout"; + meta.maintainers = [ lib.maintainers.therishidesai ]; - nodes = - let - cfg = - { ... }: - { - services.fanout = { - enable = true; - fanoutDevices = 2; - bufferSize = 8192; - }; + nodes = + let + cfg = + { ... }: + { + services.fanout = { + enable = true; + fanoutDevices = 2; + bufferSize = 8192; }; - in - { - machine = cfg; - }; + }; + in + { + machine = cfg; + }; - testScript = '' - start_all() + testScript = '' + start_all() - # mDNS. - machine.wait_for_unit("multi-user.target") + # mDNS. + machine.wait_for_unit("multi-user.target") - machine.succeed("test -c /dev/fanout0") - machine.succeed("test -c /dev/fanout1") - ''; - } -) + machine.succeed("test -c /dev/fanout0") + machine.succeed("test -c /dev/fanout1") + ''; +} diff --git a/nixos/tests/fenics.nix b/nixos/tests/fenics.nix index bf991fed9b0c..ebfff402627f 100644 --- a/nixos/tests/fenics.nix +++ b/nixos/tests/fenics.nix @@ -1,53 +1,51 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - fenicsScript = pkgs.writeScript "poisson.py" '' - #!/usr/bin/env python - from dolfin import * +let + fenicsScript = pkgs.writeScript "poisson.py" '' + #!/usr/bin/env python + from dolfin import * - mesh = UnitSquareMesh(4, 4) - V = FunctionSpace(mesh, "Lagrange", 1) + mesh = UnitSquareMesh(4, 4) + V = FunctionSpace(mesh, "Lagrange", 1) - def boundary(x): - return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS + def boundary(x): + return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS - u0 = Constant(0.0) - bc = DirichletBC(V, u0, boundary) + u0 = Constant(0.0) + bc = DirichletBC(V, u0, boundary) - u = TrialFunction(V) - v = TestFunction(V) - f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2) - g = Expression("sin(5*x[0])", degree=2) - a = inner(grad(u), grad(v))*dx - L = f*v*dx + g*v*ds + u = TrialFunction(V) + v = TestFunction(V) + f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2) + g = Expression("sin(5*x[0])", degree=2) + a = inner(grad(u), grad(v))*dx + L = f*v*dx + g*v*ds - u = Function(V) - solve(a == L, u, bc) - print(u) + u = Function(V) + solve(a == L, u, bc) + print(u) + ''; +in +{ + name = "fenics"; + meta = { + maintainers = with pkgs.lib.maintainers; [ ]; + }; + + nodes = { + fenicsnode = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + gcc + (python3.withPackages (ps: with ps; [ fenics ])) + ]; + }; + }; + testScript = + { nodes, ... }: + '' + start_all() + fenicsnode.succeed("${fenicsScript}") ''; - in - { - name = "fenics"; - meta = { - maintainers = with pkgs.lib.maintainers; [ ]; - }; - - nodes = { - fenicsnode = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - gcc - (python3.withPackages (ps: with ps; [ fenics ])) - ]; - }; - }; - testScript = - { nodes, ... }: - '' - start_all() - fenicsnode.succeed("${fenicsScript}") - ''; - } -) +} diff --git a/nixos/tests/ferm.nix b/nixos/tests/ferm.nix index b0a3a828384b..55781a3b5c05 100644 --- a/nixos/tests/ferm.nix +++ b/nixos/tests/ferm.nix @@ -1,98 +1,96 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "ferm"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mic92 ]; - }; +{ pkgs, ... }: +{ + name = "ferm"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mic92 ]; + }; - nodes = { - client = - { pkgs, ... }: - with pkgs.lib; - { - networking = { - dhcpcd.enable = false; - interfaces.eth1.ipv6.addresses = mkOverride 0 [ - { - address = "fd00::2"; - prefixLength = 64; - } - ]; - interfaces.eth1.ipv4.addresses = mkOverride 0 [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; + nodes = { + client = + { pkgs, ... }: + with pkgs.lib; + { + networking = { + dhcpcd.enable = false; + interfaces.eth1.ipv6.addresses = mkOverride 0 [ + { + address = "fd00::2"; + prefixLength = 64; + } + ]; + interfaces.eth1.ipv4.addresses = mkOverride 0 [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; }; - server = - { pkgs, ... }: - with pkgs.lib; - { - networking = { - dhcpcd.enable = false; - useNetworkd = true; - useDHCP = false; - interfaces.eth1.ipv6.addresses = mkOverride 0 [ - { - address = "fd00::1"; - prefixLength = 64; - } - ]; - interfaces.eth1.ipv4.addresses = mkOverride 0 [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; - }; - - services = { - ferm.enable = true; - ferm.config = '' - domain (ip ip6) table filter chain INPUT { - interface lo ACCEPT; - proto tcp dport 8080 REJECT reject-with tcp-reset; - } - ''; - nginx.enable = true; - nginx.httpConfig = '' - server { - listen 80; - listen [::]:80; - listen 8080; - listen [::]:8080; - - location /status { stub_status on; } - } - ''; - }; + }; + server = + { pkgs, ... }: + with pkgs.lib; + { + networking = { + dhcpcd.enable = false; + useNetworkd = true; + useDHCP = false; + interfaces.eth1.ipv6.addresses = mkOverride 0 [ + { + address = "fd00::1"; + prefixLength = 64; + } + ]; + interfaces.eth1.ipv4.addresses = mkOverride 0 [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; }; - }; - testScript = '' - start_all() + services = { + ferm.enable = true; + ferm.config = '' + domain (ip ip6) table filter chain INPUT { + interface lo ACCEPT; + proto tcp dport 8080 REJECT reject-with tcp-reset; + } + ''; + nginx.enable = true; + nginx.httpConfig = '' + server { + listen 80; + listen [::]:80; + listen 8080; + listen [::]:8080; - client.systemctl("start network-online.target") - server.systemctl("start network-online.target") - client.wait_for_unit("network-online.target") - server.wait_for_unit("network-online.target") - server.wait_for_unit("ferm.service") - server.wait_for_unit("nginx.service") - server.wait_until_succeeds("ss -ntl | grep -q 80") + location /status { stub_status on; } + } + ''; + }; + }; + }; - with subtest("port 80 is allowed"): - client.succeed("curl --fail -g http://192.168.1.1:80/status") - client.succeed("curl --fail -g http://[fd00::1]:80/status") + testScript = '' + start_all() - with subtest("port 8080 is not allowed"): - server.succeed("curl --fail -g http://192.168.1.1:8080/status") - server.succeed("curl --fail -g http://[fd00::1]:8080/status") + client.systemctl("start network-online.target") + server.systemctl("start network-online.target") + client.wait_for_unit("network-online.target") + server.wait_for_unit("network-online.target") + server.wait_for_unit("ferm.service") + server.wait_for_unit("nginx.service") + server.wait_until_succeeds("ss -ntl | grep -q 80") - client.fail("curl --fail -g http://192.168.1.1:8080/status") - client.fail("curl --fail -g http://[fd00::1]:8080/status") - ''; - } -) + with subtest("port 80 is allowed"): + client.succeed("curl --fail -g http://192.168.1.1:80/status") + client.succeed("curl --fail -g http://[fd00::1]:80/status") + + with subtest("port 8080 is not allowed"): + server.succeed("curl --fail -g http://192.168.1.1:8080/status") + server.succeed("curl --fail -g http://[fd00::1]:8080/status") + + client.fail("curl --fail -g http://192.168.1.1:8080/status") + client.fail("curl --fail -g http://[fd00::1]:8080/status") + ''; +} diff --git a/nixos/tests/filesender.nix b/nixos/tests/filesender.nix index 5c4c1a738243..b81b2ab04af3 100644 --- a/nixos/tests/filesender.nix +++ b/nixos/tests/filesender.nix @@ -1,148 +1,146 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "filesender"; - meta = { - maintainers = with lib.maintainers; [ nhnn ]; - broken = pkgs.stdenv.hostPlatform.isAarch64; # selenium.common.exceptions.WebDriverException: Message: Unsupported platform/architecture combination: linux/aarch64 - }; +{ pkgs, lib, ... }: +{ + name = "filesender"; + meta = { + maintainers = with lib.maintainers; [ nhnn ]; + broken = pkgs.stdenv.hostPlatform.isAarch64; # selenium.common.exceptions.WebDriverException: Message: Unsupported platform/architecture combination: linux/aarch64 + }; - nodes.filesender = - { ... }: - let - format = pkgs.formats.php { }; - in - { - networking.firewall.allowedTCPPorts = [ 80 ]; + nodes.filesender = + { ... }: + let + format = pkgs.formats.php { }; + in + { + networking.firewall.allowedTCPPorts = [ 80 ]; - services.filesender.enable = true; - services.filesender.localDomain = "filesender"; - services.filesender.settings = { - auth_sp_saml_authentication_source = "default"; - auth_sp_saml_uid_attribute = "uid"; - storage_filesystem_path = "/tmp"; - site_url = "http://filesender"; - force_ssl = false; - admin = ""; - admin_email = "admin@localhost"; - email_reply_to = "noreply@localhost"; + services.filesender.enable = true; + services.filesender.localDomain = "filesender"; + services.filesender.settings = { + auth_sp_saml_authentication_source = "default"; + auth_sp_saml_uid_attribute = "uid"; + storage_filesystem_path = "/tmp"; + site_url = "http://filesender"; + force_ssl = false; + admin = ""; + admin_email = "admin@localhost"; + email_reply_to = "noreply@localhost"; + }; + services.simplesamlphp.filesender = { + settings = { + baseurlpath = "http://filesender/saml"; + "module.enable".exampleauth = true; }; - services.simplesamlphp.filesender = { - settings = { - baseurlpath = "http://filesender/saml"; - "module.enable".exampleauth = true; - }; - authSources = { - admin = [ "core:AdminPassword" ]; - default = format.lib.mkMixedArray [ "exampleauth:UserPass" ] { - "user:password" = { - uid = [ "user" ]; - cn = [ "user" ]; - mail = [ "user@nixos.org" ]; - }; + authSources = { + admin = [ "core:AdminPassword" ]; + default = format.lib.mkMixedArray [ "exampleauth:UserPass" ] { + "user:password" = { + uid = [ "user" ]; + cn = [ "user" ]; + mail = [ "user@nixos.org" ]; }; }; }; }; + }; - nodes.client = - { - pkgs, - nodes, - ... - }: - let - filesenderIP = (builtins.head (nodes.filesender.networking.interfaces.eth1.ipv4.addresses)).address; - in - { - networking.hosts.${filesenderIP} = [ "filesender" ]; + nodes.client = + { + pkgs, + nodes, + ... + }: + let + filesenderIP = (builtins.head (nodes.filesender.networking.interfaces.eth1.ipv4.addresses)).address; + in + { + networking.hosts.${filesenderIP} = [ "filesender" ]; - environment.systemPackages = - let - username = "user"; - password = "password"; - browser-test = - pkgs.writers.writePython3Bin "browser-test" - { - libraries = [ pkgs.python3Packages.selenium ]; - flakeIgnore = [ - "E124" - "E501" - ]; - } - '' - from selenium.webdriver.common.by import By - from selenium.webdriver import Firefox - from selenium.webdriver.firefox.options import Options - from selenium.webdriver.firefox.firefox_profile import FirefoxProfile - from selenium.webdriver.firefox.service import Service - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - from subprocess import STDOUT - import string - import random - import logging - import time - selenium_logger = logging.getLogger("selenium") - selenium_logger.setLevel(logging.DEBUG) - selenium_logger.addHandler(logging.StreamHandler()) - profile = FirefoxProfile() - profile.set_preference("browser.download.folderList", 2) - profile.set_preference("browser.download.manager.showWhenStarting", False) - profile.set_preference("browser.download.dir", "/tmp/firefox") - profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain;text/txt") - options = Options() - options.profile = profile - options.add_argument('--headless') - service = Service(log_output=STDOUT) - driver = Firefox(options=options) - driver.set_window_size(1024, 768) - driver.implicitly_wait(30) - driver.get('http://filesender/') - wait = WebDriverWait(driver, 20) - wait.until(EC.title_contains("FileSender")) - driver.find_element(By.ID, "btn_logon").click() - wait.until(EC.title_contains("Enter your username and password")) - driver.find_element(By.ID, 'username').send_keys( - '${username}' - ) - driver.find_element(By.ID, 'password').send_keys( - '${password}' - ) - driver.find_element(By.ID, "submit_button").click() - wait.until(EC.title_contains("FileSender")) - wait.until(EC.presence_of_element_located((By.ID, "topmenu_logoff"))) - test_string = "".join(random.choices(string.ascii_uppercase + string.digits, k=20)) - with open("/tmp/test_file.txt", "w") as file: - file.write(test_string) - driver.find_element(By.ID, "files").send_keys("/tmp/test_file.txt") - time.sleep(2) - driver.find_element(By.CSS_SELECTOR, '.start').click() - wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download_link"))) - download_link = driver.find_element(By.CSS_SELECTOR, '.download_link > textarea').get_attribute('value').strip() - driver.get(download_link) - wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download"))) - driver.find_element(By.CSS_SELECTOR, '.download').click() - wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)"))) - driver.find_element(By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)").click() - driver.close() - driver.quit() - ''; - in - [ - pkgs.firefox-unwrapped - pkgs.geckodriver - browser-test - ]; - }; + environment.systemPackages = + let + username = "user"; + password = "password"; + browser-test = + pkgs.writers.writePython3Bin "browser-test" + { + libraries = [ pkgs.python3Packages.selenium ]; + flakeIgnore = [ + "E124" + "E501" + ]; + } + '' + from selenium.webdriver.common.by import By + from selenium.webdriver import Firefox + from selenium.webdriver.firefox.options import Options + from selenium.webdriver.firefox.firefox_profile import FirefoxProfile + from selenium.webdriver.firefox.service import Service + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + from subprocess import STDOUT + import string + import random + import logging + import time + selenium_logger = logging.getLogger("selenium") + selenium_logger.setLevel(logging.DEBUG) + selenium_logger.addHandler(logging.StreamHandler()) + profile = FirefoxProfile() + profile.set_preference("browser.download.folderList", 2) + profile.set_preference("browser.download.manager.showWhenStarting", False) + profile.set_preference("browser.download.dir", "/tmp/firefox") + profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain;text/txt") + options = Options() + options.profile = profile + options.add_argument('--headless') + service = Service(log_output=STDOUT) + driver = Firefox(options=options) + driver.set_window_size(1024, 768) + driver.implicitly_wait(30) + driver.get('http://filesender/') + wait = WebDriverWait(driver, 20) + wait.until(EC.title_contains("FileSender")) + driver.find_element(By.ID, "btn_logon").click() + wait.until(EC.title_contains("Enter your username and password")) + driver.find_element(By.ID, 'username').send_keys( + '${username}' + ) + driver.find_element(By.ID, 'password').send_keys( + '${password}' + ) + driver.find_element(By.ID, "submit_button").click() + wait.until(EC.title_contains("FileSender")) + wait.until(EC.presence_of_element_located((By.ID, "topmenu_logoff"))) + test_string = "".join(random.choices(string.ascii_uppercase + string.digits, k=20)) + with open("/tmp/test_file.txt", "w") as file: + file.write(test_string) + driver.find_element(By.ID, "files").send_keys("/tmp/test_file.txt") + time.sleep(2) + driver.find_element(By.CSS_SELECTOR, '.start').click() + wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download_link"))) + download_link = driver.find_element(By.CSS_SELECTOR, '.download_link > textarea').get_attribute('value').strip() + driver.get(download_link) + wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download"))) + driver.find_element(By.CSS_SELECTOR, '.download').click() + wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)"))) + driver.find_element(By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)").click() + driver.close() + driver.quit() + ''; + in + [ + pkgs.firefox-unwrapped + pkgs.geckodriver + browser-test + ]; + }; - testScript = '' - start_all() - filesender.wait_for_file("/run/phpfpm/filesender.sock") - filesender.wait_for_open_port(80) - if "If you have received an invitation to access this site as a guest" not in client.wait_until_succeeds("curl -sS -f http://filesender"): - raise Exception("filesender returned invalid html") - client.succeed("browser-test") - ''; - } -) + testScript = '' + start_all() + filesender.wait_for_file("/run/phpfpm/filesender.sock") + filesender.wait_for_open_port(80) + if "If you have received an invitation to access this site as a guest" not in client.wait_until_succeeds("curl -sS -f http://filesender"): + raise Exception("filesender returned invalid html") + client.succeed("browser-test") + ''; +} diff --git a/nixos/tests/firefoxpwa.nix b/nixos/tests/firefoxpwa.nix index 546534d1f05e..0e664dfc6e49 100644 --- a/nixos/tests/firefoxpwa.nix +++ b/nixos/tests/firefoxpwa.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "firefoxpwa"; - meta.maintainers = with lib.maintainers; [ camillemndn ]; +{ + name = "firefoxpwa"; + meta.maintainers = with lib.maintainers; [ camillemndn ]; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/x11.nix ]; - environment.systemPackages = with pkgs; [ - firefoxpwa - jq - ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/x11.nix ]; + environment.systemPackages = with pkgs; [ + firefoxpwa + jq + ]; - programs.firefox = { - enable = true; - nativeMessagingHosts.packages = [ pkgs.firefoxpwa ]; - }; - - services.jellyfin.enable = true; + programs.firefox = { + enable = true; + nativeMessagingHosts.packages = [ pkgs.firefoxpwa ]; }; - enableOCR = true; + services.jellyfin.enable = true; + }; - testScript = '' - machine.start() + enableOCR = true; - with subtest("Install a progressive web app"): - machine.wait_for_unit("jellyfin.service") - machine.wait_for_open_port(8096) - machine.succeed("firefoxpwa site install http://localhost:8096/web/manifest.json >&2") + testScript = '' + machine.start() - with subtest("Launch the progressive web app"): - machine.succeed("firefoxpwa site launch $(jq -r < ~/.local/share/firefoxpwa/config.json '.sites | keys[0]') >&2") - machine.wait_for_window("Jellyfin") - machine.wait_for_text("Jellyfin") - ''; - } -) + with subtest("Install a progressive web app"): + machine.wait_for_unit("jellyfin.service") + machine.wait_for_open_port(8096) + machine.succeed("firefoxpwa site install http://localhost:8096/web/manifest.json >&2") + + with subtest("Launch the progressive web app"): + machine.succeed("firefoxpwa site launch $(jq -r < ~/.local/share/firefoxpwa/config.json '.sites | keys[0]') >&2") + machine.wait_for_window("Jellyfin") + machine.wait_for_text("Jellyfin") + ''; +} diff --git a/nixos/tests/firejail.nix b/nixos/tests/firejail.nix index 24ec4dbe2a7e..6ac163ba0678 100644 --- a/nixos/tests/firejail.nix +++ b/nixos/tests/firejail.nix @@ -1,95 +1,93 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "firejail"; - meta = with pkgs.lib.maintainers; { - maintainers = [ sgo ]; - }; +{ pkgs, ... }: +{ + name = "firejail"; + meta = with pkgs.lib.maintainers; { + maintainers = [ sgo ]; + }; - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; - programs.firejail = { - enable = true; - wrappedBinaries = { - bash-jailed = "${pkgs.bash}/bin/bash"; - bash-jailed2 = { - executable = "${pkgs.bash}/bin/bash"; - extraArgs = [ "--private=~/firejail-home" ]; - }; + programs.firejail = { + enable = true; + wrappedBinaries = { + bash-jailed = "${pkgs.bash}/bin/bash"; + bash-jailed2 = { + executable = "${pkgs.bash}/bin/bash"; + extraArgs = [ "--private=~/firejail-home" ]; }; }; - - systemd.services.setupFirejailTest = { - wantedBy = [ "multi-user.target" ]; - before = [ "multi-user.target" ]; - - environment = { - HOME = "/home/alice"; - }; - - unitConfig = { - type = "oneshot"; - RemainAfterExit = true; - user = "alice"; - }; - - script = '' - cd $HOME - - mkdir .password-store && echo s3cret > .password-store/secret - mkdir my-secrets && echo s3cret > my-secrets/secret - - echo publ1c > public - - mkdir -p .config/firejail - echo 'blacklist ''${HOME}/my-secrets' > .config/firejail/globals.local - ''; - }; }; - testScript = '' - start_all() - machine.wait_for_unit("multi-user.target") + systemd.services.setupFirejailTest = { + wantedBy = [ "multi-user.target" ]; + before = [ "multi-user.target" ]; - # Test path acl with wrapper - machine.succeed("sudo -u alice bash-jailed -c 'cat ~/public' | grep -q publ1c") - machine.fail( - "sudo -u alice bash-jailed -c 'cat ~/.password-store/secret' | grep -q s3cret" - ) - machine.fail("sudo -u alice bash-jailed -c 'cat ~/my-secrets/secret' | grep -q s3cret") + environment = { + HOME = "/home/alice"; + }; - # Test extraArgs - machine.succeed("sudo -u alice mkdir /home/alice/firejail-home") - machine.succeed("sudo -u alice bash-jailed2 -c 'echo test > /home/alice/foo'") - machine.fail("sudo -u alice cat /home/alice/foo") - machine.succeed("sudo -u alice cat /home/alice/firejail-home/foo | grep test") + unitConfig = { + type = "oneshot"; + RemainAfterExit = true; + user = "alice"; + }; - # Test path acl with firejail executable - machine.succeed("sudo -u alice firejail -- bash -c 'cat ~/public' | grep -q publ1c") - machine.fail( - "sudo -u alice firejail -- bash -c 'cat ~/.password-store/secret' | grep -q s3cret" - ) - machine.fail( - "sudo -u alice firejail -- bash -c 'cat ~/my-secrets/secret' | grep -q s3cret" - ) + script = '' + cd $HOME - # Disabling profiles - machine.succeed( - "sudo -u alice bash -c 'firejail --noprofile -- cat ~/.password-store/secret' | grep -q s3cret" - ) + mkdir .password-store && echo s3cret > .password-store/secret + mkdir my-secrets && echo s3cret > my-secrets/secret - # CVE-2020-17367 - machine.fail( - "sudo -u alice firejail --private-tmp id --output=/tmp/vuln1 && cat /tmp/vuln1" - ) + echo publ1c > public - # CVE-2020-17368 - machine.fail( - "sudo -u alice firejail --private-tmp --output=/tmp/foo 'bash -c $(id>/tmp/vuln2;echo id)' && cat /tmp/vuln2" - ) - ''; - } -) + mkdir -p .config/firejail + echo 'blacklist ''${HOME}/my-secrets' > .config/firejail/globals.local + ''; + }; + }; + + testScript = '' + start_all() + machine.wait_for_unit("multi-user.target") + + # Test path acl with wrapper + machine.succeed("sudo -u alice bash-jailed -c 'cat ~/public' | grep -q publ1c") + machine.fail( + "sudo -u alice bash-jailed -c 'cat ~/.password-store/secret' | grep -q s3cret" + ) + machine.fail("sudo -u alice bash-jailed -c 'cat ~/my-secrets/secret' | grep -q s3cret") + + # Test extraArgs + machine.succeed("sudo -u alice mkdir /home/alice/firejail-home") + machine.succeed("sudo -u alice bash-jailed2 -c 'echo test > /home/alice/foo'") + machine.fail("sudo -u alice cat /home/alice/foo") + machine.succeed("sudo -u alice cat /home/alice/firejail-home/foo | grep test") + + # Test path acl with firejail executable + machine.succeed("sudo -u alice firejail -- bash -c 'cat ~/public' | grep -q publ1c") + machine.fail( + "sudo -u alice firejail -- bash -c 'cat ~/.password-store/secret' | grep -q s3cret" + ) + machine.fail( + "sudo -u alice firejail -- bash -c 'cat ~/my-secrets/secret' | grep -q s3cret" + ) + + # Disabling profiles + machine.succeed( + "sudo -u alice bash -c 'firejail --noprofile -- cat ~/.password-store/secret' | grep -q s3cret" + ) + + # CVE-2020-17367 + machine.fail( + "sudo -u alice firejail --private-tmp id --output=/tmp/vuln1 && cat /tmp/vuln1" + ) + + # CVE-2020-17368 + machine.fail( + "sudo -u alice firejail --private-tmp --output=/tmp/foo 'bash -c $(id>/tmp/vuln2;echo id)' && cat /tmp/vuln2" + ) + ''; +} diff --git a/nixos/tests/firezone/firezone.nix b/nixos/tests/firezone/firezone.nix index 10d89778a4c6..82b05b47086c 100644 --- a/nixos/tests/firezone/firezone.nix +++ b/nixos/tests/firezone/firezone.nix @@ -1,349 +1,347 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - certs = import ../common/acme/server/snakeoil-certs.nix; - domain = certs.domain; - in - { - name = "firezone"; - meta.maintainers = with pkgs.lib.maintainers; [ oddlama ]; +{ pkgs, ... }: +let + certs = import ../common/acme/server/snakeoil-certs.nix; + domain = certs.domain; +in +{ + name = "firezone"; + meta.maintainers = with pkgs.lib.maintainers; [ oddlama ]; - nodes = { - server = - { - config, - lib, - pkgs, - ... - }: - { - security.pki.certificateFiles = [ certs.ca.cert ]; + nodes = { + server = + { + config, + lib, + pkgs, + ... + }: + { + security.pki.certificateFiles = [ certs.ca.cert ]; - networking.extraHosts = '' - ${config.networking.primaryIPAddress} ${domain} - ${config.networking.primaryIPv6Address} ${domain} - ''; + networking.extraHosts = '' + ${config.networking.primaryIPAddress} ${domain} + ${config.networking.primaryIPv6Address} ${domain} + ''; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; - services.nginx = { - enable = true; - virtualHosts.${domain} = { - sslCertificate = certs.${domain}.cert; - sslCertificateKey = certs.${domain}.key; - }; + services.nginx = { + enable = true; + virtualHosts.${domain} = { + sslCertificate = certs.${domain}.cert; + sslCertificateKey = certs.${domain}.key; + }; + }; + + services.firezone.server = { + enable = true; + enableLocalDB = true; + nginx.enable = true; + + # Doesn't need to work for this test, but needs to be configured + # otherwise the server will not start. + smtp = { + from = "firezone@example.com"; + host = "mail.localhost"; + port = 465; + implicitTls = true; + username = "firezone@example.com"; + passwordFile = pkgs.writeText "tmpmailpasswd" "supermailpassword"; }; - services.firezone.server = { + provision = { enable = true; - enableLocalDB = true; - nginx.enable = true; - - # Doesn't need to work for this test, but needs to be configured - # otherwise the server will not start. - smtp = { - from = "firezone@example.com"; - host = "mail.localhost"; - port = 465; - implicitTls = true; - username = "firezone@example.com"; - passwordFile = pkgs.writeText "tmpmailpasswd" "supermailpassword"; - }; - - provision = { - enable = true; - accounts.main = { - name = "My Account"; - relayGroups.my-relays.name = "Relays"; - gatewayGroups.site.name = "Site"; - actors = { - admin = { - type = "account_admin_user"; - name = "Admin"; - email = "admin@example.com"; - }; - client = { - type = "service_account"; - name = "A client"; - email = "client@example.com"; - }; + accounts.main = { + name = "My Account"; + relayGroups.my-relays.name = "Relays"; + gatewayGroups.site.name = "Site"; + actors = { + admin = { + type = "account_admin_user"; + name = "Admin"; + email = "admin@example.com"; }; - resources.res1 = { - type = "dns"; - name = "Dns Resource"; - address = "resource.example.com"; - gatewayGroups = [ "site" ]; - filters = [ - { protocol = "icmp"; } - { - protocol = "tcp"; - ports = [ 80 ]; - } - ]; - }; - resources.res2 = { - type = "ip"; - name = "Ip Resource"; - address = "172.20.2.1"; - gatewayGroups = [ "site" ]; - }; - resources.res3 = { - type = "cidr"; - name = "Cidr Resource"; - address = "172.20.1.0/24"; - gatewayGroups = [ "site" ]; - }; - policies.pol1 = { - description = "Allow anyone res1 access"; - group = "everyone"; - resource = "res1"; - }; - policies.pol2 = { - description = "Allow anyone res2 access"; - group = "everyone"; - resource = "res2"; - }; - policies.pol3 = { - description = "Allow anyone res3 access"; - group = "everyone"; - resource = "res3"; + client = { + type = "service_account"; + name = "A client"; + email = "client@example.com"; }; }; + resources.res1 = { + type = "dns"; + name = "Dns Resource"; + address = "resource.example.com"; + gatewayGroups = [ "site" ]; + filters = [ + { protocol = "icmp"; } + { + protocol = "tcp"; + ports = [ 80 ]; + } + ]; + }; + resources.res2 = { + type = "ip"; + name = "Ip Resource"; + address = "172.20.2.1"; + gatewayGroups = [ "site" ]; + }; + resources.res3 = { + type = "cidr"; + name = "Cidr Resource"; + address = "172.20.1.0/24"; + gatewayGroups = [ "site" ]; + }; + policies.pol1 = { + description = "Allow anyone res1 access"; + group = "everyone"; + resource = "res1"; + }; + policies.pol2 = { + description = "Allow anyone res2 access"; + group = "everyone"; + resource = "res2"; + }; + policies.pol3 = { + description = "Allow anyone res3 access"; + group = "everyone"; + resource = "res3"; + }; }; - - api.externalUrl = "https://${domain}/api/"; - web.externalUrl = "https://${domain}/"; }; - systemd.services.firezone-server-domain.postStart = lib.mkAfter '' - ${lib.getExe config.services.firezone.server.domain.package} rpc 'Code.eval_file("${./create-tokens.exs}")' - ''; + api.externalUrl = "https://${domain}/api/"; + web.externalUrl = "https://${domain}/"; }; - relay = - { - nodes, - config, - lib, - ... - }: - { - security.pki.certificateFiles = [ certs.ca.cert ]; - networking.extraHosts = '' - ${nodes.server.networking.primaryIPAddress} ${domain} - ${nodes.server.networking.primaryIPv6Address} ${domain} - ''; + systemd.services.firezone-server-domain.postStart = lib.mkAfter '' + ${lib.getExe config.services.firezone.server.domain.package} rpc 'Code.eval_file("${./create-tokens.exs}")' + ''; + }; - services.firezone.relay = { - enable = true; - logLevel = "debug"; - name = "test-relay"; - apiUrl = "wss://${domain}/api/"; - tokenFile = "/tmp/shared/relay_token.txt"; - publicIpv4 = config.networking.primaryIPAddress; - publicIpv6 = config.networking.primaryIPv6Address; - openFirewall = true; + relay = + { + nodes, + config, + lib, + ... + }: + { + security.pki.certificateFiles = [ certs.ca.cert ]; + networking.extraHosts = '' + ${nodes.server.networking.primaryIPAddress} ${domain} + ${nodes.server.networking.primaryIPv6Address} ${domain} + ''; + + services.firezone.relay = { + enable = true; + logLevel = "debug"; + name = "test-relay"; + apiUrl = "wss://${domain}/api/"; + tokenFile = "/tmp/shared/relay_token.txt"; + publicIpv4 = config.networking.primaryIPAddress; + publicIpv6 = config.networking.primaryIPv6Address; + openFirewall = true; + }; + + # Don't auto-start so we can wait until the token was provisioned + systemd.services.firezone-relay.wantedBy = lib.mkForce [ ]; + }; + + # A resource that is only connected to the gateway, + # allowing us to confirm the VPN works + resource = { + virtualisation.vlans = [ + 1 + 2 + ]; + + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "172.20.1.1"; + prefixLength = 24; + } + ]; + + networking.interfaces.eth2.ipv4.addresses = [ + { + address = "172.20.2.1"; + prefixLength = 24; + } + ]; + + networking.firewall.allowedTCPPorts = [ + 80 + ]; + + services.nginx = { + enable = true; + virtualHosts = { + "localhost" = { + default = true; + locations."/".extraConfig = '' + return 200 'greetings from the resource'; + add_header Content-Type text/plain; + ''; }; - - # Don't auto-start so we can wait until the token was provisioned - systemd.services.firezone-relay.wantedBy = lib.mkForce [ ]; }; + }; + }; - # A resource that is only connected to the gateway, - # allowing us to confirm the VPN works - resource = { + gateway = + { + nodes, + lib, + ... + }: + { virtualisation.vlans = [ 1 2 ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "172.20.1.1"; - prefixLength = 24; - } - ]; - - networking.interfaces.eth2.ipv4.addresses = [ - { - address = "172.20.2.1"; - prefixLength = 24; - } - ]; - - networking.firewall.allowedTCPPorts = [ - 80 - ]; - - services.nginx = { - enable = true; - virtualHosts = { - "localhost" = { - default = true; - locations."/".extraConfig = '' - return 200 'greetings from the resource'; - add_header Content-Type text/plain; - ''; - }; - }; - }; - }; - - gateway = - { - nodes, - lib, - ... - }: - { - virtualisation.vlans = [ - 1 - 2 + networking = { + interfaces.eth1.ipv4.addresses = [ + { + address = "172.20.1.2"; + prefixLength = 24; + } ]; - networking = { - interfaces.eth1.ipv4.addresses = [ - { - address = "172.20.1.2"; - prefixLength = 24; - } - ]; + interfaces.eth2.ipv4.addresses = [ + { + address = "172.20.2.2"; + prefixLength = 24; + } + ]; - interfaces.eth2.ipv4.addresses = [ - { - address = "172.20.2.2"; - prefixLength = 24; - } - ]; + firewall.enable = false; + nftables.enable = true; + nftables.tables."filter".family = "inet"; + nftables.tables."filter".content = '' + chain incoming { + type filter hook input priority 0; policy accept; + } - firewall.enable = false; - nftables.enable = true; - nftables.tables."filter".family = "inet"; - nftables.tables."filter".content = '' - chain incoming { - type filter hook input priority 0; policy accept; - } + chain postrouting { + type nat hook postrouting priority srcnat; policy accept; + meta protocol ip iifname "tun-firezone" oifname { "eth1", "eth2" } masquerade random + } - chain postrouting { - type nat hook postrouting priority srcnat; policy accept; - meta protocol ip iifname "tun-firezone" oifname { "eth1", "eth2" } masquerade random - } + chain forward { + type filter hook forward priority 0; policy drop; + iifname "tun-firezone" accept + oifname "tun-firezone" accept + } - chain forward { - type filter hook forward priority 0; policy drop; - iifname "tun-firezone" accept - oifname "tun-firezone" accept - } - - chain output { - type filter hook output priority 0; policy accept; - } - ''; - }; - - boot.kernel.sysctl."net.ipv4.ip_forward" = "1"; - # boot.kernel.sysctl."net.ipv4.conf.all.src_valid_mark" = "1"; - boot.kernel.sysctl."net.ipv6.conf.default.forwarding" = "1"; - boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = "1"; - - security.pki.certificateFiles = [ certs.ca.cert ]; - networking.extraHosts = '' - ${nodes.server.networking.primaryIPAddress} ${domain} - ${nodes.server.networking.primaryIPv6Address} ${domain} - 172.20.1.1 resource.example.com + chain output { + type filter hook output priority 0; policy accept; + } ''; - - services.firezone.gateway = { - enable = true; - logLevel = "debug"; - name = "test-gateway"; - apiUrl = "wss://${domain}/api/"; - tokenFile = "/tmp/shared/gateway_token.txt"; - }; - - # Don't auto-start so we can wait until the token was provisioned - systemd.services.firezone-gateway.wantedBy = lib.mkForce [ ]; }; - client = - { - nodes, - lib, - ... - }: - { - security.pki.certificateFiles = [ certs.ca.cert ]; - networking.useNetworkd = true; - networking.extraHosts = '' - ${nodes.server.networking.primaryIPAddress} ${domain} - ${nodes.server.networking.primaryIPv6Address} ${domain} - ''; + boot.kernel.sysctl."net.ipv4.ip_forward" = "1"; + # boot.kernel.sysctl."net.ipv4.conf.all.src_valid_mark" = "1"; + boot.kernel.sysctl."net.ipv6.conf.default.forwarding" = "1"; + boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = "1"; - services.firezone.headless-client = { - enable = true; - logLevel = "debug"; - name = "test-client-somebody"; - apiUrl = "wss://${domain}/api/"; - tokenFile = "/tmp/shared/client_token.txt"; - }; + security.pki.certificateFiles = [ certs.ca.cert ]; + networking.extraHosts = '' + ${nodes.server.networking.primaryIPAddress} ${domain} + ${nodes.server.networking.primaryIPv6Address} ${domain} + 172.20.1.1 resource.example.com + ''; - # Don't auto-start so we can wait until the token was provisioned - systemd.services.firezone-headless-client.wantedBy = lib.mkForce [ ]; + services.firezone.gateway = { + enable = true; + logLevel = "debug"; + name = "test-gateway"; + apiUrl = "wss://${domain}/api/"; + tokenFile = "/tmp/shared/gateway_token.txt"; }; - }; - testScript = - { ... }: - '' - start_all() + # Don't auto-start so we can wait until the token was provisioned + systemd.services.firezone-gateway.wantedBy = lib.mkForce [ ]; + }; - with subtest("Start server"): - server.wait_for_unit("firezone.target") - server.wait_until_succeeds("curl -Lsf https://${domain} | grep 'Welcome to Firezone'") - server.wait_until_succeeds("curl -Ls https://${domain}/api | grep 'Not Found'") + client = + { + nodes, + lib, + ... + }: + { + security.pki.certificateFiles = [ certs.ca.cert ]; + networking.useNetworkd = true; + networking.extraHosts = '' + ${nodes.server.networking.primaryIPAddress} ${domain} + ${nodes.server.networking.primaryIPv6Address} ${domain} + ''; - # Wait for tokens and copy them to shared folder - server.wait_for_file("/var/lib/private/firezone/relay_token.txt") - server.wait_for_file("/var/lib/private/firezone/gateway_token.txt") - server.wait_for_file("/var/lib/private/firezone/client_token.txt") - server.succeed("cp /var/lib/private/firezone/*_token.txt /tmp/shared") + services.firezone.headless-client = { + enable = true; + logLevel = "debug"; + name = "test-client-somebody"; + apiUrl = "wss://${domain}/api/"; + tokenFile = "/tmp/shared/client_token.txt"; + }; - with subtest("Connect relay"): - relay.succeed("systemctl start firezone-relay") - relay.wait_for_unit("firezone-relay.service") - relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Connected to portal.*${domain}'", timeout=30) + # Don't auto-start so we can wait until the token was provisioned + systemd.services.firezone-headless-client.wantedBy = lib.mkForce [ ]; + }; + }; - with subtest("Connect gateway"): - gateway.succeed("systemctl start firezone-gateway") - gateway.wait_for_unit("firezone-gateway.service") - gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Connected to portal.*${domain}'", timeout=30) - relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv4'", timeout=30) - relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv6'", timeout=30) + testScript = + { ... }: + '' + start_all() - # Assert both relay ips are known - gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Updated allocation.*relay_ip4.*Some.*relay_ip6.*Some'", timeout=30) + with subtest("Start server"): + server.wait_for_unit("firezone.target") + server.wait_until_succeeds("curl -Lsf https://${domain} | grep 'Welcome to Firezone'") + server.wait_until_succeeds("curl -Ls https://${domain}/api | grep 'Not Found'") - with subtest("Connect headless-client"): - client.succeed("systemctl start firezone-headless-client") - client.wait_for_unit("firezone-headless-client.service") - client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Connected to portal.*${domain}'", timeout=30) - client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Tunnel ready'", timeout=30) + # Wait for tokens and copy them to shared folder + server.wait_for_file("/var/lib/private/firezone/relay_token.txt") + server.wait_for_file("/var/lib/private/firezone/gateway_token.txt") + server.wait_for_file("/var/lib/private/firezone/client_token.txt") + server.succeed("cp /var/lib/private/firezone/*_token.txt /tmp/shared") - with subtest("Check DNS based access"): - # Check that we can access the resource through the VPN via DNS - client.wait_until_succeeds("curl -4 -Lsf http://resource.example.com | grep 'greetings from the resource'") - client.wait_until_succeeds("curl -6 -Lsf http://resource.example.com | grep 'greetings from the resource'") + with subtest("Connect relay"): + relay.succeed("systemctl start firezone-relay") + relay.wait_for_unit("firezone-relay.service") + relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Connected to portal.*${domain}'", timeout=30) - with subtest("Check CIDR based access"): - # Check that we can access the resource through the VPN via CIDR - client.wait_until_succeeds("ping -c1 -W1 172.20.1.1") + with subtest("Connect gateway"): + gateway.succeed("systemctl start firezone-gateway") + gateway.wait_for_unit("firezone-gateway.service") + gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Connected to portal.*${domain}'", timeout=30) + relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv4'", timeout=30) + relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv6'", timeout=30) - with subtest("Check IP based access"): - # Check that we can access the resource through the VPN via IP - client.wait_until_succeeds("ping -c1 -W1 172.20.2.1") - ''; - } -) + # Assert both relay ips are known + gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Updated allocation.*relay_ip4.*Some.*relay_ip6.*Some'", timeout=30) + + with subtest("Connect headless-client"): + client.succeed("systemctl start firezone-headless-client") + client.wait_for_unit("firezone-headless-client.service") + client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Connected to portal.*${domain}'", timeout=30) + client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Tunnel ready'", timeout=30) + + with subtest("Check DNS based access"): + # Check that we can access the resource through the VPN via DNS + client.wait_until_succeeds("curl -4 -Lsf http://resource.example.com | grep 'greetings from the resource'") + client.wait_until_succeeds("curl -6 -Lsf http://resource.example.com | grep 'greetings from the resource'") + + with subtest("Check CIDR based access"): + # Check that we can access the resource through the VPN via CIDR + client.wait_until_succeeds("ping -c1 -W1 172.20.1.1") + + with subtest("Check IP based access"): + # Check that we can access the resource through the VPN via IP + client.wait_until_succeeds("ping -c1 -W1 172.20.2.1") + ''; +} diff --git a/nixos/tests/flaresolverr.nix b/nixos/tests/flaresolverr.nix index 0cec7adf6d6b..dc92e03a0a60 100644 --- a/nixos/tests/flaresolverr.nix +++ b/nixos/tests/flaresolverr.nix @@ -1,22 +1,20 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "flaresolverr"; - meta.maintainers = with lib.maintainers; [ paveloom ]; +{ lib, ... }: +{ + name = "flaresolverr"; + meta.maintainers = with lib.maintainers; [ paveloom ]; - nodes.machine = - { pkgs, ... }: - { - services.flaresolverr = { - enable = true; - port = 8888; - }; + nodes.machine = + { pkgs, ... }: + { + services.flaresolverr = { + enable = true; + port = 8888; }; + }; - testScript = '' - machine.wait_for_unit("flaresolverr.service") - machine.wait_for_open_port(8888) - machine.succeed("curl --fail http://localhost:8888/") - ''; - } -) + testScript = '' + machine.wait_for_unit("flaresolverr.service") + machine.wait_for_open_port(8888) + machine.succeed("curl --fail http://localhost:8888/") + ''; +} diff --git a/nixos/tests/flood.nix b/nixos/tests/flood.nix index ca3ef1d919a5..c474f5efeefb 100644 --- a/nixos/tests/flood.nix +++ b/nixos/tests/flood.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - port = 3001; - in - { - name = "flood"; - meta = { - maintainers = with pkgs.lib.maintainers; [ thiagokokada ]; +{ pkgs, ... }: +let + port = 3001; +in +{ + name = "flood"; + meta = { + maintainers = with pkgs.lib.maintainers; [ thiagokokada ]; + }; + + nodes.machine = + { pkgs, ... }: + { + services.flood = { + inherit port; + enable = true; + openFirewall = true; + extraArgs = [ "--baseuri=/" ]; + }; }; - nodes.machine = - { pkgs, ... }: - { - services.flood = { - inherit port; - enable = true; - openFirewall = true; - extraArgs = [ "--baseuri=/" ]; - }; - }; + testScript = # python + '' + machine.start() + machine.wait_for_unit("flood.service") + machine.wait_for_open_port(${toString port}) - testScript = # python - '' - machine.start() - machine.wait_for_unit("flood.service") - machine.wait_for_open_port(${toString port}) - - machine.succeed("curl --fail http://localhost:${toString port}") - ''; - } -) + machine.succeed("curl --fail http://localhost:${toString port}") + ''; +} diff --git a/nixos/tests/fluentd.nix b/nixos/tests/fluentd.nix index aa56db49dc53..8f4fb6e37f0e 100644 --- a/nixos/tests/fluentd.nix +++ b/nixos/tests/fluentd.nix @@ -1,58 +1,56 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "fluentd"; +{ pkgs, lib, ... }: +{ + name = "fluentd"; - nodes.machine = - { pkgs, ... }: - { - services.fluentd = { - enable = true; - config = '' - - @type http - port 9880 - + nodes.machine = + { pkgs, ... }: + { + services.fluentd = { + enable = true; + config = '' + + @type http + port 9880 + - - type copy - - @type file - format json - path /tmp/fluentd - symlink_path /tmp/current-log - - - @type stdout - - - ''; - }; + + type copy + + @type file + format json + path /tmp/fluentd + symlink_path /tmp/current-log + + + @type stdout + + + ''; }; + }; - testScript = - let - testMessage = "an example log message"; + testScript = + let + testMessage = "an example log message"; - payload = pkgs.writeText "test-message.json" ( - builtins.toJSON { - inherit testMessage; - } - ); - in - '' - machine.start() - machine.wait_for_unit("fluentd.service") - machine.wait_for_open_port(9880) + payload = pkgs.writeText "test-message.json" ( + builtins.toJSON { + inherit testMessage; + } + ); + in + '' + machine.start() + machine.wait_for_unit("fluentd.service") + machine.wait_for_open_port(9880) - machine.succeed( - "curl -fsSL -X POST -H 'Content-type: application/json' -d @${payload} http://localhost:9880/test.tag" - ) + machine.succeed( + "curl -fsSL -X POST -H 'Content-type: application/json' -d @${payload} http://localhost:9880/test.tag" + ) - # blocking flush - machine.succeed("systemctl stop fluentd") + # blocking flush + machine.succeed("systemctl stop fluentd") - machine.succeed("grep '${testMessage}' /tmp/current-log") - ''; - } -) + machine.succeed("grep '${testMessage}' /tmp/current-log") + ''; +} diff --git a/nixos/tests/fluidd.nix b/nixos/tests/fluidd.nix index 94463fc74e49..edfc1e8d9a49 100644 --- a/nixos/tests/fluidd.nix +++ b/nixos/tests/fluidd.nix @@ -1,23 +1,21 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "fluidd"; - meta.maintainers = with lib.maintainers; [ vtuan10 ]; +{ + name = "fluidd"; + meta.maintainers = with lib.maintainers; [ vtuan10 ]; - nodes.machine = - { pkgs, ... }: - { - services.fluidd = { - enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.fluidd = { + enable = true; }; + }; - testScript = '' - machine.start() - machine.wait_for_unit("nginx.service") - machine.wait_for_open_port(80) - machine.succeed("curl -sSfL http://localhost/ | grep 'fluidd'") - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("nginx.service") + machine.wait_for_open_port(80) + machine.succeed("curl -sSfL http://localhost/ | grep 'fluidd'") + ''; +} diff --git a/nixos/tests/fontconfig-default-fonts.nix b/nixos/tests/fontconfig-default-fonts.nix index ce7f33201638..674192e7bba2 100644 --- a/nixos/tests/fontconfig-default-fonts.nix +++ b/nixos/tests/fontconfig-default-fonts.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "fontconfig-default-fonts"; +{ lib, ... }: +{ + name = "fontconfig-default-fonts"; - meta.maintainers = with lib.maintainers; [ - jtojnar - ]; + meta.maintainers = with lib.maintainers; [ + jtojnar + ]; - nodes.machine = - { config, pkgs, ... }: - { - fonts.enableDefaultPackages = true; # Background fonts - fonts.packages = with pkgs; [ - noto-fonts-color-emoji - cantarell-fonts - twitter-color-emoji - source-code-pro - gentium - ]; - fonts.fontconfig.defaultFonts = { - serif = [ "Gentium Plus" ]; - sansSerif = [ "Cantarell" ]; - monospace = [ "Source Code Pro" ]; - emoji = [ "Twitter Color Emoji" ]; - }; + nodes.machine = + { config, pkgs, ... }: + { + fonts.enableDefaultPackages = true; # Background fonts + fonts.packages = with pkgs; [ + noto-fonts-color-emoji + cantarell-fonts + twitter-color-emoji + source-code-pro + gentium + ]; + fonts.fontconfig.defaultFonts = { + serif = [ "Gentium Plus" ]; + sansSerif = [ "Cantarell" ]; + monospace = [ "Source Code Pro" ]; + emoji = [ "Twitter Color Emoji" ]; }; + }; - testScript = '' - machine.succeed("fc-match serif | grep '\"Gentium Plus\"'") - machine.succeed("fc-match sans-serif | grep '\"Cantarell\"'") - machine.succeed("fc-match monospace | grep '\"Source Code Pro\"'") - machine.succeed("fc-match emoji | grep '\"Twitter Color Emoji\"'") - ''; - } -) + testScript = '' + machine.succeed("fc-match serif | grep '\"Gentium Plus\"'") + machine.succeed("fc-match sans-serif | grep '\"Cantarell\"'") + machine.succeed("fc-match monospace | grep '\"Source Code Pro\"'") + machine.succeed("fc-match emoji | grep '\"Twitter Color Emoji\"'") + ''; +} diff --git a/nixos/tests/freeswitch.nix b/nixos/tests/freeswitch.nix index 91f25244c9a4..8dbb28efe90d 100644 --- a/nixos/tests/freeswitch.nix +++ b/nixos/tests/freeswitch.nix @@ -1,34 +1,32 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "freeswitch"; - meta = with pkgs.lib.maintainers; { - maintainers = [ misuzu ]; - }; - nodes = { - node0 = - { config, lib, ... }: - { - networking.useDHCP = false; - networking.interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.0.1"; - prefixLength = 24; - } - ]; - }; - services.freeswitch = { - enable = true; - enableReload = true; - configTemplate = "${config.services.freeswitch.package}/share/freeswitch/conf/minimal"; - }; +{ pkgs, ... }: +{ + name = "freeswitch"; + meta = with pkgs.lib.maintainers; { + maintainers = [ misuzu ]; + }; + nodes = { + node0 = + { config, lib, ... }: + { + networking.useDHCP = false; + networking.interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.0.1"; + prefixLength = 24; + } + ]; }; - }; - testScript = '' - node0.wait_for_unit("freeswitch.service") - # Wait for SIP port to be open - node0.wait_for_open_port(5060) - ''; - } -) + services.freeswitch = { + enable = true; + enableReload = true; + configTemplate = "${config.services.freeswitch.package}/share/freeswitch/conf/minimal"; + }; + }; + }; + testScript = '' + node0.wait_for_unit("freeswitch.service") + # Wait for SIP port to be open + node0.wait_for_open_port(5060) + ''; +} diff --git a/nixos/tests/frp.nix b/nixos/tests/frp.nix index 8771ab8ad224..14db56308edc 100644 --- a/nixos/tests/frp.nix +++ b/nixos/tests/frp.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "frp"; - meta.maintainers = with lib.maintainers; [ zaldnoay ]; - nodes = { - frps = { - networking = { - useNetworkd = true; - useDHCP = false; - firewall.enable = false; - }; - - systemd.network.networks."01-eth1" = { - name = "eth1"; - networkConfig.Address = "10.0.0.1/24"; - }; - - services.frp = { - enable = true; - role = "server"; - settings = { - bindPort = 7000; - vhostHTTPPort = 80; - }; - }; +{ pkgs, lib, ... }: +{ + name = "frp"; + meta.maintainers = with lib.maintainers; [ zaldnoay ]; + nodes = { + frps = { + networking = { + useNetworkd = true; + useDHCP = false; + firewall.enable = false; }; - frpc = { - networking = { - useNetworkd = true; - useDHCP = false; - }; + systemd.network.networks."01-eth1" = { + name = "eth1"; + networkConfig.Address = "10.0.0.1/24"; + }; - systemd.network.networks."01-eth1" = { - name = "eth1"; - networkConfig.Address = "10.0.0.2/24"; - }; - - services.httpd = { - enable = true; - adminAddr = "admin@example.com"; - virtualHosts."test-appication" = - let - testdir = pkgs.writeTextDir "web/index.php" "&2") + router2 = + { ... }: + { + virtualisation.vlans = [ + 3 + 2 + ]; + boot.kernel.sysctl."net.ipv4.ip_forward" = "1"; + networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospfigp -j ACCEPT"; + services.frr = { + ospfd.enable = true; + config = ospfConf2; + }; + }; - with subtest("Wait for OSPF to form adjacencies"): - for gw in router1, router2: - gw.wait_until_succeeds("vtysh -c 'show ip ospf neighbor' | grep Full") - gw.wait_until_succeeds("vtysh -c 'show ip route' | grep '^O>'") + server = + { nodes, ... }: + { + virtualisation.vlans = [ 3 ]; + services.frr = { + config = '' + ip route 192.168.0.0/16 ${ifAddr nodes.router2 "eth1"} + ''; + }; + }; + }; - with subtest("Test ICMP"): - client.wait_until_succeeds("ping -4 -c 3 server >&2") - ''; - } -) + testScript = + { nodes, ... }: + '' + start_all() + + # Wait for the networking to start on all machines + for machine in client, router1, router2, server: + machine.wait_for_unit("network.target") + + with subtest("Wait for FRR"): + for gw in client, router1, router2, server: + gw.wait_for_unit("frr") + + router1.succeed("${nodes.router1.system.build.toplevel}/specialisation/ospf/bin/switch-to-configuration test >&2") + + with subtest("Wait for OSPF to form adjacencies"): + for gw in router1, router2: + gw.wait_until_succeeds("vtysh -c 'show ip ospf neighbor' | grep Full") + gw.wait_until_succeeds("vtysh -c 'show ip route' | grep '^O>'") + + with subtest("Test ICMP"): + client.wait_until_succeeds("ping -4 -c 3 server >&2") + ''; +} diff --git a/nixos/tests/fscrypt.nix b/nixos/tests/fscrypt.nix index 5bc611f123c4..4dc839dac447 100644 --- a/nixos/tests/fscrypt.nix +++ b/nixos/tests/fscrypt.nix @@ -1,54 +1,52 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "fscrypt"; +{ ... }: +{ + name = "fscrypt"; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/user-account.nix ]; - security.pam.enableFscrypt = true; - }; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/user-account.nix ]; + security.pam.enableFscrypt = true; + }; - testScript = '' - def login_as_alice(): - machine.wait_until_tty_matches("1", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("1", "Password: ") - machine.send_chars("foobar\n") - machine.wait_until_tty_matches("1", "alice\@machine") + testScript = '' + def login_as_alice(): + machine.wait_until_tty_matches("1", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("1", "Password: ") + machine.send_chars("foobar\n") + machine.wait_until_tty_matches("1", "alice\@machine") - def logout(): - machine.send_chars("logout\n") - machine.wait_until_tty_matches("1", "login: ") + def logout(): + machine.send_chars("logout\n") + machine.wait_until_tty_matches("1", "login: ") - machine.wait_for_unit("default.target") + machine.wait_for_unit("default.target") - with subtest("Enable fscrypt on filesystem"): - machine.succeed("tune2fs -O encrypt /dev/vda") - machine.succeed("fscrypt setup --quiet --force --time=1ms") + with subtest("Enable fscrypt on filesystem"): + machine.succeed("tune2fs -O encrypt /dev/vda") + machine.succeed("fscrypt setup --quiet --force --time=1ms") - with subtest("Set up alice with an fscrypt-enabled home directory"): - machine.succeed("(echo foobar; echo foobar) | passwd alice") - machine.succeed("chown -R alice.users ~alice") - machine.succeed("echo foobar | fscrypt encrypt --skip-unlock --source=pam_passphrase --user=alice /home/alice") + with subtest("Set up alice with an fscrypt-enabled home directory"): + machine.succeed("(echo foobar; echo foobar) | passwd alice") + machine.succeed("chown -R alice.users ~alice") + machine.succeed("echo foobar | fscrypt encrypt --skip-unlock --source=pam_passphrase --user=alice /home/alice") - with subtest("Create file as alice"): - login_as_alice() - machine.succeed("echo hello > /home/alice/world") - logout() - # Wait for logout to be processed - machine.sleep(1) + with subtest("Create file as alice"): + login_as_alice() + machine.succeed("echo hello > /home/alice/world") + logout() + # Wait for logout to be processed + machine.sleep(1) - with subtest("File should not be readable without being logged in as alice"): - machine.fail("cat /home/alice/world") + with subtest("File should not be readable without being logged in as alice"): + machine.fail("cat /home/alice/world") - with subtest("File should be readable again as alice"): - login_as_alice() - machine.succeed("cat /home/alice/world") - logout() - ''; - } -) + with subtest("File should be readable again as alice"): + login_as_alice() + machine.succeed("cat /home/alice/world") + logout() + ''; +} diff --git a/nixos/tests/ft2-clone.nix b/nixos/tests/ft2-clone.nix index 012432bb3f84..c4f1475ad710 100644 --- a/nixos/tests/ft2-clone.nix +++ b/nixos/tests/ft2-clone.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "ft2-clone"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "ft2-clone"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + environment.systemPackages = [ pkgs.ft2-clone ]; }; - nodes.machine = - { pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; - environment.systemPackages = [ pkgs.ft2-clone ]; - }; + enableOCR = true; - enableOCR = true; + testScript = '' + machine.wait_for_x() + # Add a dummy sound card, or the program won't start + machine.execute("modprobe snd-dummy") - testScript = '' - machine.wait_for_x() - # Add a dummy sound card, or the program won't start - machine.execute("modprobe snd-dummy") + machine.execute("ft2-clone >&2 &") - machine.execute("ft2-clone >&2 &") - - machine.wait_for_window(r"Fasttracker") - machine.sleep(5) - machine.wait_for_text(r"(Songlen|Repstart|Time|About|Nibbles|Help)") - machine.screenshot("screen") - ''; - } -) + machine.wait_for_window(r"Fasttracker") + machine.sleep(5) + machine.wait_for_text(r"(Songlen|Repstart|Time|About|Nibbles|Help)") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/gancio.nix b/nixos/tests/gancio.nix index 8f4696d6f6cc..6cd487008670 100644 --- a/nixos/tests/gancio.nix +++ b/nixos/tests/gancio.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - extraHosts = '' - 192.168.13.12 agenda.example.com - ''; - in - { - name = "gancio"; - meta.maintainers = with pkgs.lib.maintainers; [ jbgi ]; +{ pkgs, ... }: +let + extraHosts = '' + 192.168.13.12 agenda.example.com + ''; +in +{ + name = "gancio"; + meta.maintainers = with pkgs.lib.maintainers; [ jbgi ]; - nodes = { - server = - { pkgs, ... }: - { - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.13.12"; - prefixLength = 24; - } - ]; - }; - inherit extraHosts; - firewall.allowedTCPPorts = [ 80 ]; + nodes = { + server = + { pkgs, ... }: + { + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.13.12"; + prefixLength = 24; + } + ]; }; - environment.systemPackages = [ pkgs.gancio ]; - services.gancio = { - enable = true; - settings = { - hostname = "agenda.example.com"; - db.dialect = "postgres"; - }; - plugins = [ pkgs.gancioPlugins.telegram-bridge ]; - userLocale = { - en = { - register = { - description = "My new registration page description"; - }; + inherit extraHosts; + firewall.allowedTCPPorts = [ 80 ]; + }; + environment.systemPackages = [ pkgs.gancio ]; + services.gancio = { + enable = true; + settings = { + hostname = "agenda.example.com"; + db.dialect = "postgres"; + }; + plugins = [ pkgs.gancioPlugins.telegram-bridge ]; + userLocale = { + en = { + register = { + description = "My new registration page description"; }; }; - nginx = { - enableACME = false; - forceSSL = false; - }; + }; + nginx = { + enableACME = false; + forceSSL = false; }; }; + }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.jq ]; - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.13.1"; - prefixLength = 24; - } - ]; - }; - inherit extraHosts; + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.jq ]; + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.13.1"; + prefixLength = 24; + } + ]; }; + inherit extraHosts; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("postgresql") - server.wait_for_unit("gancio") - server.wait_for_unit("nginx") - server.wait_for_file("/run/gancio/socket") - server.wait_for_open_port(80) + server.wait_for_unit("postgresql") + server.wait_for_unit("gancio") + server.wait_for_unit("nginx") + server.wait_for_file("/run/gancio/socket") + server.wait_for_open_port(80) - # Check can create user via cli - server.succeed("cd /var/lib/gancio && sudo -u gancio gancio users create admin dummy admin") + # Check can create user via cli + server.succeed("cd /var/lib/gancio && sudo -u gancio gancio users create admin dummy admin") - # Check event list is returned - client.wait_until_succeeds("curl --verbose --fail-with-body http://agenda.example.com/api/events", timeout=30) + # Check event list is returned + client.wait_until_succeeds("curl --verbose --fail-with-body http://agenda.example.com/api/events", timeout=30) - server.shutdown() - client.shutdown() - ''; - } -) + server.shutdown() + client.shutdown() + ''; +} diff --git a/nixos/tests/geth.nix b/nixos/tests/geth.nix index 7fa7b97ed3bb..49a2c4f11701 100644 --- a/nixos/tests/geth.nix +++ b/nixos/tests/geth.nix @@ -1,71 +1,69 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "geth"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ bachp ]; - }; +{ pkgs, ... }: +{ + name = "geth"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ bachp ]; + }; - nodes.machine = - { ... }: - { - services.geth."mainnet" = { + nodes.machine = + { ... }: + { + services.geth."mainnet" = { + enable = true; + http = { enable = true; - http = { - enable = true; - }; - }; - - services.geth."holesky" = { - enable = true; - port = 30304; - network = "holesky"; - http = { - enable = true; - port = 18545; - }; - authrpc = { - enable = true; - port = 18551; - }; - }; - - services.geth."sepolia" = { - enable = true; - port = 30305; - network = "sepolia"; - http = { - enable = true; - port = 28545; - }; - authrpc = { - enable = true; - port = 28551; - }; }; }; - testScript = '' - start_all() + services.geth."holesky" = { + enable = true; + port = 30304; + network = "holesky"; + http = { + enable = true; + port = 18545; + }; + authrpc = { + enable = true; + port = 18551; + }; + }; - machine.wait_for_unit("geth-mainnet.service") - machine.wait_for_unit("geth-holesky.service") - machine.wait_for_unit("geth-sepolia.service") - machine.wait_for_open_port(8545) - machine.wait_for_open_port(18545) - machine.wait_for_open_port(28545) + services.geth."sepolia" = { + enable = true; + port = 30305; + network = "sepolia"; + http = { + enable = true; + port = 28545; + }; + authrpc = { + enable = true; + port = 28551; + }; + }; + }; - machine.succeed( - 'geth attach --exec "eth.blockNumber" http://localhost:8545 | grep \'^0$\' ' - ) + testScript = '' + start_all() - machine.succeed( - 'geth attach --exec "eth.blockNumber" http://localhost:18545 | grep \'^0$\' ' - ) + machine.wait_for_unit("geth-mainnet.service") + machine.wait_for_unit("geth-holesky.service") + machine.wait_for_unit("geth-sepolia.service") + machine.wait_for_open_port(8545) + machine.wait_for_open_port(18545) + machine.wait_for_open_port(28545) - machine.succeed( - 'geth attach --exec "eth.blockNumber" http://localhost:28545 | grep \'^0$\' ' - ) - ''; - } -) + machine.succeed( + 'geth attach --exec "eth.blockNumber" http://localhost:8545 | grep \'^0$\' ' + ) + + machine.succeed( + 'geth attach --exec "eth.blockNumber" http://localhost:18545 | grep \'^0$\' ' + ) + + machine.succeed( + 'geth attach --exec "eth.blockNumber" http://localhost:28545 | grep \'^0$\' ' + ) + ''; +} diff --git a/nixos/tests/ghostunnel.nix b/nixos/tests/ghostunnel.nix index bd16a802e60d..417f1a64765b 100644 --- a/nixos/tests/ghostunnel.nix +++ b/nixos/tests/ghostunnel.nix @@ -1,116 +1,114 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "ghostunnel"; - nodes = { - backend = - { pkgs, ... }: - { - services.nginx.enable = true; - services.nginx.virtualHosts."backend".root = pkgs.runCommand "webroot" { } '' - mkdir $out - echo hi >$out/hi.txt - ''; - networking.firewall.allowedTCPPorts = [ 80 ]; +{ pkgs, ... }: +{ + name = "ghostunnel"; + nodes = { + backend = + { pkgs, ... }: + { + services.nginx.enable = true; + services.nginx.virtualHosts."backend".root = pkgs.runCommand "webroot" { } '' + mkdir $out + echo hi >$out/hi.txt + ''; + networking.firewall.allowedTCPPorts = [ 80 ]; + }; + service = + { ... }: + { + services.ghostunnel.enable = true; + services.ghostunnel.servers."plain-old" = { + listen = "0.0.0.0:443"; + cert = "/root/service-cert.pem"; + key = "/root/service-key.pem"; + disableAuthentication = true; + target = "backend:80"; + unsafeTarget = true; }; - service = - { ... }: - { - services.ghostunnel.enable = true; - services.ghostunnel.servers."plain-old" = { - listen = "0.0.0.0:443"; - cert = "/root/service-cert.pem"; - key = "/root/service-key.pem"; - disableAuthentication = true; - target = "backend:80"; - unsafeTarget = true; - }; - services.ghostunnel.servers."client-cert" = { - listen = "0.0.0.0:1443"; - cert = "/root/service-cert.pem"; - key = "/root/service-key.pem"; - cacert = "/root/ca.pem"; - target = "backend:80"; - allowCN = [ "client" ]; - unsafeTarget = true; - }; - networking.firewall.allowedTCPPorts = [ - 443 - 1443 - ]; + services.ghostunnel.servers."client-cert" = { + listen = "0.0.0.0:1443"; + cert = "/root/service-cert.pem"; + key = "/root/service-key.pem"; + cacert = "/root/ca.pem"; + target = "backend:80"; + allowCN = [ "client" ]; + unsafeTarget = true; }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ - pkgs.curl - ]; - }; - }; + networking.firewall.allowedTCPPorts = [ + 443 + 1443 + ]; + }; + client = + { pkgs, ... }: + { + environment.systemPackages = [ + pkgs.curl + ]; + }; + }; - testScript = '' + testScript = '' - # prepare certificates + # prepare certificates - def cmd(command): - print(f"+{command}") - r = os.system(command) - if r != 0: - raise Exception(f"Command {command} failed with exit code {r}") + def cmd(command): + print(f"+{command}") + r = os.system(command) + if r != 0: + raise Exception(f"Command {command} failed with exit code {r}") - # Create CA - cmd("${pkgs.openssl}/bin/openssl genrsa -out ca-key.pem 4096") - cmd("${pkgs.openssl}/bin/openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -subj '/C=NL/ST=Zuid-Holland/L=The Hague/O=Stevige Balken en Planken B.V./OU=OpSec/CN=Certificate Authority' -out ca.pem") + # Create CA + cmd("${pkgs.openssl}/bin/openssl genrsa -out ca-key.pem 4096") + cmd("${pkgs.openssl}/bin/openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -subj '/C=NL/ST=Zuid-Holland/L=The Hague/O=Stevige Balken en Planken B.V./OU=OpSec/CN=Certificate Authority' -out ca.pem") - # Create service - cmd("${pkgs.openssl}/bin/openssl genrsa -out service-key.pem 4096") - cmd("${pkgs.openssl}/bin/openssl req -subj '/CN=service' -sha256 -new -key service-key.pem -out service.csr") - cmd("echo subjectAltName = DNS:service,IP:127.0.0.1 >> extfile.cnf") - cmd("echo extendedKeyUsage = serverAuth >> extfile.cnf") - cmd("${pkgs.openssl}/bin/openssl x509 -req -days 365 -sha256 -in service.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out service-cert.pem -extfile extfile.cnf") + # Create service + cmd("${pkgs.openssl}/bin/openssl genrsa -out service-key.pem 4096") + cmd("${pkgs.openssl}/bin/openssl req -subj '/CN=service' -sha256 -new -key service-key.pem -out service.csr") + cmd("echo subjectAltName = DNS:service,IP:127.0.0.1 >> extfile.cnf") + cmd("echo extendedKeyUsage = serverAuth >> extfile.cnf") + cmd("${pkgs.openssl}/bin/openssl x509 -req -days 365 -sha256 -in service.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out service-cert.pem -extfile extfile.cnf") - # Create client - cmd("${pkgs.openssl}/bin/openssl genrsa -out client-key.pem 4096") - cmd("${pkgs.openssl}/bin/openssl req -subj '/CN=client' -new -key client-key.pem -out client.csr") - cmd("echo extendedKeyUsage = clientAuth > extfile-client.cnf") - cmd("${pkgs.openssl}/bin/openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -extfile extfile-client.cnf") + # Create client + cmd("${pkgs.openssl}/bin/openssl genrsa -out client-key.pem 4096") + cmd("${pkgs.openssl}/bin/openssl req -subj '/CN=client' -new -key client-key.pem -out client.csr") + cmd("echo extendedKeyUsage = clientAuth > extfile-client.cnf") + cmd("${pkgs.openssl}/bin/openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -extfile extfile-client.cnf") - cmd("ls -al") + cmd("ls -al") - start_all() + start_all() - # Configuration - service.copy_from_host("ca.pem", "/root/ca.pem") - service.copy_from_host("service-cert.pem", "/root/service-cert.pem") - service.copy_from_host("service-key.pem", "/root/service-key.pem") - client.copy_from_host("ca.pem", "/root/ca.pem") - client.copy_from_host("service-cert.pem", "/root/service-cert.pem") - client.copy_from_host("client-cert.pem", "/root/client-cert.pem") - client.copy_from_host("client-key.pem", "/root/client-key.pem") + # Configuration + service.copy_from_host("ca.pem", "/root/ca.pem") + service.copy_from_host("service-cert.pem", "/root/service-cert.pem") + service.copy_from_host("service-key.pem", "/root/service-key.pem") + client.copy_from_host("ca.pem", "/root/ca.pem") + client.copy_from_host("service-cert.pem", "/root/service-cert.pem") + client.copy_from_host("client-cert.pem", "/root/client-cert.pem") + client.copy_from_host("client-key.pem", "/root/client-key.pem") - backend.wait_for_unit("nginx.service") - service.wait_for_unit("multi-user.target") - service.wait_for_unit("multi-user.target") - client.wait_for_unit("multi-user.target") + backend.wait_for_unit("nginx.service") + service.wait_for_unit("multi-user.target") + service.wait_for_unit("multi-user.target") + client.wait_for_unit("multi-user.target") - # Check assumptions before the real test - client.succeed("bash -c 'diff <(curl -v --no-progress-meter http://backend/hi.txt) <(echo hi)'") + # Check assumptions before the real test + client.succeed("bash -c 'diff <(curl -v --no-progress-meter http://backend/hi.txt) <(echo hi)'") - # Plain old simple TLS can connect, ignoring cert - client.succeed("bash -c 'diff <(curl -v --no-progress-meter --insecure https://service/hi.txt) <(echo hi)'") + # Plain old simple TLS can connect, ignoring cert + client.succeed("bash -c 'diff <(curl -v --no-progress-meter --insecure https://service/hi.txt) <(echo hi)'") - # Plain old simple TLS provides correct signature with its cert - client.succeed("bash -c 'diff <(curl -v --no-progress-meter --cacert /root/ca.pem https://service/hi.txt) <(echo hi)'") + # Plain old simple TLS provides correct signature with its cert + client.succeed("bash -c 'diff <(curl -v --no-progress-meter --cacert /root/ca.pem https://service/hi.txt) <(echo hi)'") - # Client can authenticate with certificate - client.succeed("bash -c 'diff <(curl -v --no-progress-meter --cert /root/client-cert.pem --key /root/client-key.pem --cacert /root/ca.pem https://service:1443/hi.txt) <(echo hi)'") + # Client can authenticate with certificate + client.succeed("bash -c 'diff <(curl -v --no-progress-meter --cert /root/client-cert.pem --key /root/client-key.pem --cacert /root/ca.pem https://service:1443/hi.txt) <(echo hi)'") - # Client must authenticate with certificate - client.fail("bash -c 'diff <(curl -v --no-progress-meter --cacert /root/ca.pem https://service:1443/hi.txt) <(echo hi)'") - ''; + # Client must authenticate with certificate + client.fail("bash -c 'diff <(curl -v --no-progress-meter --cacert /root/ca.pem https://service:1443/hi.txt) <(echo hi)'") + ''; - meta.maintainers = with pkgs.lib.maintainers; [ - roberth - ]; - } -) + meta.maintainers = with pkgs.lib.maintainers; [ + roberth + ]; +} diff --git a/nixos/tests/gitdaemon.nix b/nixos/tests/gitdaemon.nix index 41a29168bda4..35278a319827 100644 --- a/nixos/tests/gitdaemon.nix +++ b/nixos/tests/gitdaemon.nix @@ -1,83 +1,81 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - hashes = pkgs.writeText "hashes" '' - b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c /project/bar - ''; - in - { - name = "gitdaemon"; +let + hashes = pkgs.writeText "hashes" '' + b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c /project/bar + ''; +in +{ + name = "gitdaemon"; - meta = with pkgs.lib.maintainers; { - maintainers = [ tilpner ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ tilpner ]; + }; - nodes = { - server = - { config, ... }: - { - networking.firewall.allowedTCPPorts = [ config.services.gitDaemon.port ]; + nodes = { + server = + { config, ... }: + { + networking.firewall.allowedTCPPorts = [ config.services.gitDaemon.port ]; - environment.systemPackages = [ pkgs.git ]; + environment.systemPackages = [ pkgs.git ]; - systemd.tmpfiles.rules = [ - # type path mode user group age arg - " d /git 0755 git git - -" - ]; + systemd.tmpfiles.rules = [ + # type path mode user group age arg + " d /git 0755 git git - -" + ]; - services.gitDaemon = { - enable = true; - basePath = "/git"; - }; + services.gitDaemon = { + enable = true; + basePath = "/git"; }; + }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.git ]; - }; - }; + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.git ]; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("create project.git"): - server.succeed( - "git init --bare /git/project.git", - "touch /git/project.git/git-daemon-export-ok", - ) + with subtest("create project.git"): + server.succeed( + "git init --bare /git/project.git", + "touch /git/project.git/git-daemon-export-ok", + ) - with subtest("add file to project.git"): - server.succeed( - "git clone /git/project.git /project", - "echo foo > /project/bar", - "git config --global user.email 'you@example.com'", - "git config --global user.name 'Your Name'", - "git -C /project add bar", - "git -C /project commit -m 'quux'", - "git -C /project push", - "rm -r /project", - ) + with subtest("add file to project.git"): + server.succeed( + "git clone /git/project.git /project", + "echo foo > /project/bar", + "git config --global user.email 'you@example.com'", + "git config --global user.name 'Your Name'", + "git -C /project add bar", + "git -C /project commit -m 'quux'", + "git -C /project push", + "rm -r /project", + ) - # Change user/group to default daemon user/group from module - # to avoid "fatal: detected dubious ownership in repository at '/git/project.git'" - server.succeed("chown git:git -R /git/project.git") + # Change user/group to default daemon user/group from module + # to avoid "fatal: detected dubious ownership in repository at '/git/project.git'" + server.succeed("chown git:git -R /git/project.git") - with subtest("git daemon starts"): - server.wait_for_unit("git-daemon.service") + with subtest("git daemon starts"): + server.wait_for_unit("git-daemon.service") - server.systemctl("start network-online.target") - client.systemctl("start network-online.target") - server.wait_for_unit("network-online.target") - client.wait_for_unit("network-online.target") + server.systemctl("start network-online.target") + client.systemctl("start network-online.target") + server.wait_for_unit("network-online.target") + client.wait_for_unit("network-online.target") - with subtest("client can clone project.git"): - client.succeed( - "git clone git://server/project.git /project", - "sha256sum -c ${hashes}", - ) - ''; - } -) + with subtest("client can clone project.git"): + client.succeed( + "git clone git://server/project.git /project", + "sha256sum -c ${hashes}", + ) + ''; +} diff --git a/nixos/tests/gitolite-fcgiwrap.nix b/nixos/tests/gitolite-fcgiwrap.nix index 9560c5c34913..fff676ee1f59 100644 --- a/nixos/tests/gitolite-fcgiwrap.nix +++ b/nixos/tests/gitolite-fcgiwrap.nix @@ -1,98 +1,96 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - user = "gitolite-admin"; - password = "some_password"; +let + user = "gitolite-admin"; + password = "some_password"; - # not used but needed to setup gitolite - adminPublicKey = '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client - ''; - in - { - name = "gitolite-fcgiwrap"; + # not used but needed to setup gitolite + adminPublicKey = '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client + ''; +in +{ + name = "gitolite-fcgiwrap"; - meta = with pkgs.lib.maintainers; { - maintainers = [ bbigras ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ bbigras ]; + }; - nodes = { + nodes = { - server = - { config, ... }: - { - networking.firewall.allowedTCPPorts = [ 80 ]; + server = + { config, ... }: + { + networking.firewall.allowedTCPPorts = [ 80 ]; - services.fcgiwrap.instances.gitolite = { - process.user = "gitolite"; - process.group = "gitolite"; - socket = { inherit (config.services.nginx) user group; }; - }; + services.fcgiwrap.instances.gitolite = { + process.user = "gitolite"; + process.group = "gitolite"; + socket = { inherit (config.services.nginx) user group; }; + }; - services.gitolite = { - enable = true; - adminPubkey = adminPublicKey; - }; + services.gitolite = { + enable = true; + adminPubkey = adminPublicKey; + }; - services.nginx = { - enable = true; - recommendedProxySettings = true; - virtualHosts."server".locations."/git".extraConfig = '' - # turn off gzip as git objects are already well compressed - gzip off; + services.nginx = { + enable = true; + recommendedProxySettings = true; + virtualHosts."server".locations."/git".extraConfig = '' + # turn off gzip as git objects are already well compressed + gzip off; - # use file based basic authentication - auth_basic "Git Repository Authentication"; - auth_basic_user_file /etc/gitolite/htpasswd; + # use file based basic authentication + auth_basic "Git Repository Authentication"; + auth_basic_user_file /etc/gitolite/htpasswd; - # common FastCGI parameters are required - include ${config.services.nginx.package}/conf/fastcgi_params; + # common FastCGI parameters are required + include ${config.services.nginx.package}/conf/fastcgi_params; - # strip the CGI program prefix - fastcgi_split_path_info ^(/git)(.*)$; - fastcgi_param PATH_INFO $fastcgi_path_info; + # strip the CGI program prefix + fastcgi_split_path_info ^(/git)(.*)$; + fastcgi_param PATH_INFO $fastcgi_path_info; - # pass authenticated user login(mandatory) to Gitolite - fastcgi_param REMOTE_USER $remote_user; + # pass authenticated user login(mandatory) to Gitolite + fastcgi_param REMOTE_USER $remote_user; - # pass git repository root directory and hosting user directory - # these env variables can be set in a wrapper script - fastcgi_param GIT_HTTP_EXPORT_ALL ""; - fastcgi_param GIT_PROJECT_ROOT /var/lib/gitolite/repositories; - fastcgi_param GITOLITE_HTTP_HOME /var/lib/gitolite; - fastcgi_param SCRIPT_FILENAME ${pkgs.gitolite}/bin/gitolite-shell; + # pass git repository root directory and hosting user directory + # these env variables can be set in a wrapper script + fastcgi_param GIT_HTTP_EXPORT_ALL ""; + fastcgi_param GIT_PROJECT_ROOT /var/lib/gitolite/repositories; + fastcgi_param GITOLITE_HTTP_HOME /var/lib/gitolite; + fastcgi_param SCRIPT_FILENAME ${pkgs.gitolite}/bin/gitolite-shell; - # use Unix domain socket or inet socket - fastcgi_pass unix:${config.services.fcgiwrap.instances.gitolite.socket.address}; - ''; - }; - - # WARNING: DON'T DO THIS IN PRODUCTION! - # This puts unhashed secrets directly into the Nix store for ease of testing. - environment.etc."gitolite/htpasswd".source = pkgs.runCommand "htpasswd" { } '' - ${pkgs.apacheHttpd}/bin/htpasswd -bc "$out" ${user} ${password} + # use Unix domain socket or inet socket + fastcgi_pass unix:${config.services.fcgiwrap.instances.gitolite.socket.address}; ''; }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.git ]; - }; - }; + # WARNING: DON'T DO THIS IN PRODUCTION! + # This puts unhashed secrets directly into the Nix store for ease of testing. + environment.etc."gitolite/htpasswd".source = pkgs.runCommand "htpasswd" { } '' + ${pkgs.apacheHttpd}/bin/htpasswd -bc "$out" ${user} ${password} + ''; + }; - testScript = '' - start_all() + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.git ]; + }; + }; - server.wait_for_unit("gitolite-init.service") - server.wait_for_unit("nginx.service") - server.wait_for_file("/run/fcgiwrap-gitolite.sock") + testScript = '' + start_all() - client.wait_for_unit("multi-user.target") - client.succeed( - "git clone http://${user}:${password}@server/git/gitolite-admin.git" - ) - ''; - } -) + server.wait_for_unit("gitolite-init.service") + server.wait_for_unit("nginx.service") + server.wait_for_file("/run/fcgiwrap-gitolite.sock") + + client.wait_for_unit("multi-user.target") + client.succeed( + "git clone http://${user}:${password}@server/git/gitolite-admin.git" + ) + ''; +} diff --git a/nixos/tests/gitolite.nix b/nixos/tests/gitolite.nix index af2aa327c016..3b4c4634e270 100644 --- a/nixos/tests/gitolite.nix +++ b/nixos/tests/gitolite.nix @@ -1,144 +1,142 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - adminPrivateKey = pkgs.writeText "id_ed25519" '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3gAAAJBJiYxDSYmM - QwAAAAtzc2gtZWQyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3g - AAAEDE1W6vMwSEUcF1r7Hyypm/+sCOoDmKZgPxi3WOa1mD2u7urFhAA90BTpGuEHeWWTY3 - W/g9PBxXNxfWhfbrm4LeAAAACGJmb0BtaW5pAQIDBAU= - -----END OPENSSH PRIVATE KEY----- - ''; +let + adminPrivateKey = pkgs.writeText "id_ed25519" '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3gAAAJBJiYxDSYmM + QwAAAAtzc2gtZWQyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3g + AAAEDE1W6vMwSEUcF1r7Hyypm/+sCOoDmKZgPxi3WOa1mD2u7urFhAA90BTpGuEHeWWTY3 + W/g9PBxXNxfWhfbrm4LeAAAACGJmb0BtaW5pAQIDBAU= + -----END OPENSSH PRIVATE KEY----- + ''; - adminPublicKey = '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client - ''; + adminPublicKey = '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client + ''; - alicePrivateKey = pkgs.writeText "id_ed25519" '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQAAAJAwVQ5VMFUO - VQAAAAtzc2gtZWQyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQ - AAAEB7lbfkkdkJoE+4TKHPdPQWBKLSx+J54Eg8DaTr+3KoSlt5a8eH8BYZYjoQhzXGVKKH - Je1pw1D0p7O2Vb9VTLzBAAAACGJmb0BtaW5pAQIDBAU= - -----END OPENSSH PRIVATE KEY----- - ''; + alicePrivateKey = pkgs.writeText "id_ed25519" '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQAAAJAwVQ5VMFUO + VQAAAAtzc2gtZWQyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQ + AAAEB7lbfkkdkJoE+4TKHPdPQWBKLSx+J54Eg8DaTr+3KoSlt5a8eH8BYZYjoQhzXGVKKH + Je1pw1D0p7O2Vb9VTLzBAAAACGJmb0BtaW5pAQIDBAU= + -----END OPENSSH PRIVATE KEY----- + ''; - alicePublicKey = pkgs.writeText "id_ed25519.pub" '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFt5a8eH8BYZYjoQhzXGVKKHJe1pw1D0p7O2Vb9VTLzB alice@client - ''; + alicePublicKey = pkgs.writeText "id_ed25519.pub" '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFt5a8eH8BYZYjoQhzXGVKKHJe1pw1D0p7O2Vb9VTLzB alice@client + ''; - bobPrivateKey = pkgs.writeText "id_ed25519" '' - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMAAAAJDQBmNV0AZj - VQAAAAtzc2gtZWQyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMA - AAAEDM1IYYFUwk/IVxauha9kuR6bbRtT3gZ6ZA0GLb9txb/pZNonUP1ePHLrvn0W9D2hdN - 6zWWZYFyJc+QR6pOKQEwAAAACGJmb0BtaW5pAQIDBAU= - -----END OPENSSH PRIVATE KEY----- - ''; + bobPrivateKey = pkgs.writeText "id_ed25519" '' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMAAAAJDQBmNV0AZj + VQAAAAtzc2gtZWQyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMA + AAAEDM1IYYFUwk/IVxauha9kuR6bbRtT3gZ6ZA0GLb9txb/pZNonUP1ePHLrvn0W9D2hdN + 6zWWZYFyJc+QR6pOKQEwAAAACGJmb0BtaW5pAQIDBAU= + -----END OPENSSH PRIVATE KEY----- + ''; - bobPublicKey = pkgs.writeText "id_ed25519.pub" '' - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJZNonUP1ePHLrvn0W9D2hdN6zWWZYFyJc+QR6pOKQEw bob@client - ''; + bobPublicKey = pkgs.writeText "id_ed25519.pub" '' + ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJZNonUP1ePHLrvn0W9D2hdN6zWWZYFyJc+QR6pOKQEw bob@client + ''; - gitoliteAdminConfSnippet = pkgs.writeText "gitolite-admin-conf-snippet" '' - repo alice-project - RW+ = alice - ''; - in - { - name = "gitolite"; + gitoliteAdminConfSnippet = pkgs.writeText "gitolite-admin-conf-snippet" '' + repo alice-project + RW+ = alice + ''; +in +{ + name = "gitolite"; - meta = with pkgs.lib.maintainers; { - maintainers = [ bjornfor ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ bjornfor ]; + }; - nodes = { + nodes = { - server = - { ... }: - { - services.gitolite = { - enable = true; - adminPubkey = adminPublicKey; - }; - services.openssh.enable = true; + server = + { ... }: + { + services.gitolite = { + enable = true; + adminPubkey = adminPublicKey; }; + services.openssh.enable = true; + }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.git ]; - programs.ssh.extraConfig = '' - Host * - UserKnownHostsFile /dev/null - StrictHostKeyChecking no - # there's nobody around that can input password - PreferredAuthentications publickey - ''; - users.users.alice = { - isNormalUser = true; - }; - users.users.bob = { - isNormalUser = true; - }; + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.git ]; + programs.ssh.extraConfig = '' + Host * + UserKnownHostsFile /dev/null + StrictHostKeyChecking no + # there's nobody around that can input password + PreferredAuthentications publickey + ''; + users.users.alice = { + isNormalUser = true; }; + users.users.bob = { + isNormalUser = true; + }; + }; - }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("can setup ssh keys on system"): - client.succeed( - "mkdir -p ~root/.ssh", - "cp ${adminPrivateKey} ~root/.ssh/id_ed25519", - "chmod 600 ~root/.ssh/id_ed25519", - ) - client.succeed( - "sudo -u alice mkdir -p ~alice/.ssh", - "sudo -u alice cp ${alicePrivateKey} ~alice/.ssh/id_ed25519", - "sudo -u alice chmod 600 ~alice/.ssh/id_ed25519", - ) - client.succeed( - "sudo -u bob mkdir -p ~bob/.ssh", - "sudo -u bob cp ${bobPrivateKey} ~bob/.ssh/id_ed25519", - "sudo -u bob chmod 600 ~bob/.ssh/id_ed25519", - ) + with subtest("can setup ssh keys on system"): + client.succeed( + "mkdir -p ~root/.ssh", + "cp ${adminPrivateKey} ~root/.ssh/id_ed25519", + "chmod 600 ~root/.ssh/id_ed25519", + ) + client.succeed( + "sudo -u alice mkdir -p ~alice/.ssh", + "sudo -u alice cp ${alicePrivateKey} ~alice/.ssh/id_ed25519", + "sudo -u alice chmod 600 ~alice/.ssh/id_ed25519", + ) + client.succeed( + "sudo -u bob mkdir -p ~bob/.ssh", + "sudo -u bob cp ${bobPrivateKey} ~bob/.ssh/id_ed25519", + "sudo -u bob chmod 600 ~bob/.ssh/id_ed25519", + ) - with subtest("gitolite server starts"): - server.wait_for_unit("gitolite-init.service") - server.wait_for_unit("sshd.service") - client.succeed("ssh -n gitolite@server info") + with subtest("gitolite server starts"): + server.wait_for_unit("gitolite-init.service") + server.wait_for_unit("sshd.service") + client.succeed("ssh -n gitolite@server info") - with subtest("admin can clone and configure gitolite-admin.git"): - client.succeed( - "git clone gitolite@server:gitolite-admin.git", - "git config --global user.name 'System Administrator'", - "git config --global user.email root\@domain.example", - "cp ${alicePublicKey} gitolite-admin/keydir/alice.pub", - "cp ${bobPublicKey} gitolite-admin/keydir/bob.pub", - "(cd gitolite-admin && git add . && git commit -m 'Add keys for alice, bob' && git push)", - "cat ${gitoliteAdminConfSnippet} >> gitolite-admin/conf/gitolite.conf", - "(cd gitolite-admin && git add . && git commit -m 'Add repo for alice' && git push)", - ) + with subtest("admin can clone and configure gitolite-admin.git"): + client.succeed( + "git clone gitolite@server:gitolite-admin.git", + "git config --global user.name 'System Administrator'", + "git config --global user.email root\@domain.example", + "cp ${alicePublicKey} gitolite-admin/keydir/alice.pub", + "cp ${bobPublicKey} gitolite-admin/keydir/bob.pub", + "(cd gitolite-admin && git add . && git commit -m 'Add keys for alice, bob' && git push)", + "cat ${gitoliteAdminConfSnippet} >> gitolite-admin/conf/gitolite.conf", + "(cd gitolite-admin && git add . && git commit -m 'Add repo for alice' && git push)", + ) - with subtest("non-admins cannot clone gitolite-admin.git"): - client.fail("sudo -i -u alice git clone gitolite@server:gitolite-admin.git") - client.fail("sudo -i -u bob git clone gitolite@server:gitolite-admin.git") + with subtest("non-admins cannot clone gitolite-admin.git"): + client.fail("sudo -i -u alice git clone gitolite@server:gitolite-admin.git") + client.fail("sudo -i -u bob git clone gitolite@server:gitolite-admin.git") - with subtest("non-admins can clone testing.git"): - client.succeed("sudo -i -u alice git clone gitolite@server:testing.git") - client.succeed("sudo -i -u bob git clone gitolite@server:testing.git") + with subtest("non-admins can clone testing.git"): + client.succeed("sudo -i -u alice git clone gitolite@server:testing.git") + client.succeed("sudo -i -u bob git clone gitolite@server:testing.git") - with subtest("alice can clone alice-project.git"): - client.succeed("sudo -i -u alice git clone gitolite@server:alice-project.git") + with subtest("alice can clone alice-project.git"): + client.succeed("sudo -i -u alice git clone gitolite@server:alice-project.git") - with subtest("bob cannot clone alice-project.git"): - client.fail("sudo -i -u bob git clone gitolite@server:alice-project.git") - ''; - } -) + with subtest("bob cannot clone alice-project.git"): + client.fail("sudo -i -u bob git clone gitolite@server:alice-project.git") + ''; +} diff --git a/nixos/tests/glusterfs.nix b/nixos/tests/glusterfs.nix index 8beed8e4a91f..e8b0a442aedc 100644 --- a/nixos/tests/glusterfs.nix +++ b/nixos/tests/glusterfs.nix @@ -1,75 +1,73 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.glusterfs ]; - virtualisation.fileSystems = { - "/gluster" = { - device = "server1:/gv0"; - fsType = "glusterfs"; - }; +let + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.glusterfs ]; + virtualisation.fileSystems = { + "/gluster" = { + device = "server1:/gv0"; + fsType = "glusterfs"; }; }; - - server = - { pkgs, ... }: - { - networking.firewall.enable = false; - services.glusterfs.enable = true; - - # create a mount point for the volume - boot.initrd.postDeviceCommands = '' - ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb - ''; - - virtualisation.emptyDiskImages = [ 1024 ]; - - virtualisation.fileSystems = { - "/data" = { - device = "/dev/disk/by-label/data"; - fsType = "ext4"; - }; - }; - }; - in - { - name = "glusterfs"; - - nodes = { - server1 = server; - server2 = server; - client1 = client; - client2 = client; }; - testScript = '' - server1.wait_for_unit("glusterd.service") - server2.wait_for_unit("glusterd.service") + server = + { pkgs, ... }: + { + networking.firewall.enable = false; + services.glusterfs.enable = true; - server1.wait_until_succeeds("gluster peer status") - server2.wait_until_succeeds("gluster peer status") + # create a mount point for the volume + boot.initrd.postDeviceCommands = '' + ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb + ''; - # establish initial contact - server1.succeed("gluster peer probe server2") - server1.succeed("gluster peer probe server1") + virtualisation.emptyDiskImages = [ 1024 ]; - server1.succeed("gluster peer status | grep Connected") + virtualisation.fileSystems = { + "/data" = { + device = "/dev/disk/by-label/data"; + fsType = "ext4"; + }; + }; + }; +in +{ + name = "glusterfs"; - # create volumes - server1.succeed("mkdir -p /data/vg0") - server2.succeed("mkdir -p /data/vg0") - server1.succeed("gluster volume create gv0 server1:/data/vg0 server2:/data/vg0") - server1.succeed("gluster volume start gv0") + nodes = { + server1 = server; + server2 = server; + client1 = client; + client2 = client; + }; - # test clients - client1.wait_for_unit("gluster.mount") - client2.wait_for_unit("gluster.mount") + testScript = '' + server1.wait_for_unit("glusterd.service") + server2.wait_for_unit("glusterd.service") - client1.succeed("echo test > /gluster/file1") - client2.succeed("grep test /gluster/file1") - ''; - } -) + server1.wait_until_succeeds("gluster peer status") + server2.wait_until_succeeds("gluster peer status") + + # establish initial contact + server1.succeed("gluster peer probe server2") + server1.succeed("gluster peer probe server1") + + server1.succeed("gluster peer status | grep Connected") + + # create volumes + server1.succeed("mkdir -p /data/vg0") + server2.succeed("mkdir -p /data/vg0") + server1.succeed("gluster volume create gv0 server1:/data/vg0 server2:/data/vg0") + server1.succeed("gluster volume start gv0") + + # test clients + client1.wait_for_unit("gluster.mount") + client2.wait_for_unit("gluster.mount") + + client1.succeed("echo test > /gluster/file1") + client2.succeed("grep test /gluster/file1") + ''; +} diff --git a/nixos/tests/gnome-extensions.nix b/nixos/tests/gnome-extensions.nix index d6381a899f3b..fa20d5e7e840 100644 --- a/nixos/tests/gnome-extensions.nix +++ b/nixos/tests/gnome-extensions.nix @@ -1,162 +1,160 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "gnome-extensions"; - meta.maintainers = [ ]; +{ pkgs, lib, ... }: +{ + name = "gnome-extensions"; + meta.maintainers = [ ]; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/user-account.nix ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/user-account.nix ]; - # Install all extensions - environment.systemPackages = lib.filter (e: e ? extensionUuid) ( - lib.attrValues pkgs.gnomeExtensions - ); + # Install all extensions + environment.systemPackages = lib.filter (e: e ? extensionUuid) ( + lib.attrValues pkgs.gnomeExtensions + ); - # Some extensions are broken, but that's kind of the point of a testing VM - nixpkgs.config.allowBroken = true; - # There are some aliases which throw exceptions; ignore them. - # Also prevent duplicate extensions under different names. - nixpkgs.config.allowAliases = false; + # Some extensions are broken, but that's kind of the point of a testing VM + nixpkgs.config.allowBroken = true; + # There are some aliases which throw exceptions; ignore them. + # Also prevent duplicate extensions under different names. + nixpkgs.config.allowAliases = false; - # Configure GDM - services.xserver.enable = true; - services.xserver.displayManager = { - gdm = { - enable = true; - debug = true; - wayland = true; - }; - autoLogin = { - enable = true; - user = "alice"; - }; + # Configure GDM + services.xserver.enable = true; + services.xserver.displayManager = { + gdm = { + enable = true; + debug = true; + wayland = true; }; - - # Configure Gnome - services.xserver.desktopManager.gnome.enable = true; - services.xserver.desktopManager.gnome.debug = true; - - systemd.user.services = { - "org.gnome.Shell@wayland" = { - serviceConfig = { - ExecStart = [ - # Clear the list before overriding it. - "" - # Eval API is now internal so Shell needs to run in unsafe mode. - # TODO: improve test driver so that it supports openqa-like manipulation - # that would allow us to drop this mess. - "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode" - ]; - }; - }; + autoLogin = { + enable = true; + user = "alice"; }; - }; - testScript = - { nodes, ... }: - let - # Keep line widths somewhat manageable - user = nodes.machine.users.users.alice; - uid = toString user.uid; - bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus"; - # Run a command in the appropriate user environment - run = command: "su - ${user.name} -c '${bus} ${command}'"; + # Configure Gnome + services.xserver.desktopManager.gnome.enable = true; + services.xserver.desktopManager.gnome.debug = true; - # Call javascript in gnome shell, returns a tuple (success, output), where - # `success` is true if the dbus call was successful and output is what the - # javascript evaluates to. - eval = - command: - run "gdbus call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval ${command}"; + systemd.user.services = { + "org.gnome.Shell@wayland" = { + serviceConfig = { + ExecStart = [ + # Clear the list before overriding it. + "" + # Eval API is now internal so Shell needs to run in unsafe mode. + # TODO: improve test driver so that it supports openqa-like manipulation + # that would allow us to drop this mess. + "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode" + ]; + }; + }; + }; - # False when startup is done - startingUp = eval "Main.layoutManager._startingUp"; + }; - # Extensions to keep always enabled together - # Those are extensions that are usually always on for many users, and that we expect to work - # well together with most others without conflicts - alwaysOnExtensions = map (name: pkgs.gnomeExtensions.${name}.extensionUuid) [ - "applications-menu" - "user-themes" - ]; + testScript = + { nodes, ... }: + let + # Keep line widths somewhat manageable + user = nodes.machine.users.users.alice; + uid = toString user.uid; + bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus"; + # Run a command in the appropriate user environment + run = command: "su - ${user.name} -c '${bus} ${command}'"; - # Extensions to enable and disable individually - # Extensions like dash-to-dock and dash-to-panel cannot be enabled at the same time. - testExtensions = map (name: pkgs.gnomeExtensions.${name}.extensionUuid) [ - "appindicator" - "dash-to-dock" - "dash-to-panel" - "ddterm" - "gsconnect" - "system-monitor-next" - "desktop-icons-ng-ding" - "workspace-indicator" - "vitals" - ]; - in - '' - with subtest("Login to GNOME with GDM"): - # wait for gdm to start - machine.wait_for_unit("display-manager.service") - # wait for the wayland server - machine.wait_for_file("/run/user/${uid}/wayland-0") - # wait for alice to be logged in - machine.wait_for_unit("default.target", "${user.name}") - # check that logging in has given the user ownership of devices - assert "alice" in machine.succeed("getfacl -p /dev/snd/timer") + # Call javascript in gnome shell, returns a tuple (success, output), where + # `success` is true if the dbus call was successful and output is what the + # javascript evaluates to. + eval = + command: + run "gdbus call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval ${command}"; - with subtest("Wait for GNOME Shell"): - # correct output should be (true, 'false') - machine.wait_until_succeeds( - "${startingUp} | grep -q 'true,..false'" - ) + # False when startup is done + startingUp = eval "Main.layoutManager._startingUp"; - # Close the Activities view so that Shell can correctly track the focused window. - machine.send_key("esc") - # # Disable extension version validation (only use for manual testing) - # machine.succeed( - # "${run "gsettings set org.gnome.shell disable-extension-version-validation true"}" - # ) + # Extensions to keep always enabled together + # Those are extensions that are usually always on for many users, and that we expect to work + # well together with most others without conflicts + alwaysOnExtensions = map (name: pkgs.gnomeExtensions.${name}.extensionUuid) [ + "applications-menu" + "user-themes" + ]; - def getState(extension): - return machine.succeed( - f"${run "gnome-extensions info {extension}"} | grep '^ State: .*$'" - ) + # Extensions to enable and disable individually + # Extensions like dash-to-dock and dash-to-panel cannot be enabled at the same time. + testExtensions = map (name: pkgs.gnomeExtensions.${name}.extensionUuid) [ + "appindicator" + "dash-to-dock" + "dash-to-panel" + "ddterm" + "gsconnect" + "system-monitor-next" + "desktop-icons-ng-ding" + "workspace-indicator" + "vitals" + ]; + in + '' + with subtest("Login to GNOME with GDM"): + # wait for gdm to start + machine.wait_for_unit("display-manager.service") + # wait for the wayland server + machine.wait_for_file("/run/user/${uid}/wayland-0") + # wait for alice to be logged in + machine.wait_for_unit("default.target", "${user.name}") + # check that logging in has given the user ownership of devices + assert "alice" in machine.succeed("getfacl -p /dev/snd/timer") - # Assert that some extension is in a specific state - def checkState(target, extension): - state = getState(extension) - assert target in state, f"{state} instead of {target}" + with subtest("Wait for GNOME Shell"): + # correct output should be (true, 'false') + machine.wait_until_succeeds( + "${startingUp} | grep -q 'true,..false'" + ) - def checkExtension(extension, disable): - with subtest(f"Enable extension '{extension}'"): - # Check that the extension is properly initialized; skip out of date ones - state = machine.succeed( - f"${run "gnome-extensions info {extension}"} | grep '^ State: .*$'" - ) - if "OUT OF DATE" in state: - machine.log(f"Extension {extension} will be skipped because out of date") - return + # Close the Activities view so that Shell can correctly track the focused window. + machine.send_key("esc") + # # Disable extension version validation (only use for manual testing) + # machine.succeed( + # "${run "gsettings set org.gnome.shell disable-extension-version-validation true"}" + # ) - assert "INITIALIZED" in state, f"{state} instead of INITIALIZED" + def getState(extension): + return machine.succeed( + f"${run "gnome-extensions info {extension}"} | grep '^ State: .*$'" + ) - # Enable and optionally disable + # Assert that some extension is in a specific state + def checkState(target, extension): + state = getState(extension) + assert target in state, f"{state} instead of {target}" - machine.succeed(f"${run "gnome-extensions enable {extension}"}") - wait_time = 5 - while getState(extension) == "ACTIVATING" and (wait_time := wait_time - 1) > 0: - machine.log(f"Extension {extension} is still activating, waiting {wait_time} more seconds") - machine.sleep(1) - checkState("ACTIVE", extension) + def checkExtension(extension, disable): + with subtest(f"Enable extension '{extension}'"): + # Check that the extension is properly initialized; skip out of date ones + state = machine.succeed( + f"${run "gnome-extensions info {extension}"} | grep '^ State: .*$'" + ) + if "OUT OF DATE" in state: + machine.log(f"Extension {extension} will be skipped because out of date") + return - if disable: - machine.succeed(f"${run "gnome-extensions disable {extension}"}") - checkState("INACTIVE", extension) - '' - + lib.concatLines (map (e: ''checkExtension("${e}", False)'') alwaysOnExtensions) - + lib.concatLines (map (e: ''checkExtension("${e}", True)'') testExtensions); - } -) + assert "INITIALIZED" in state, f"{state} instead of INITIALIZED" + + # Enable and optionally disable + + machine.succeed(f"${run "gnome-extensions enable {extension}"}") + wait_time = 5 + while getState(extension) == "ACTIVATING" and (wait_time := wait_time - 1) > 0: + machine.log(f"Extension {extension} is still activating, waiting {wait_time} more seconds") + machine.sleep(1) + checkState("ACTIVE", extension) + + if disable: + machine.succeed(f"${run "gnome-extensions disable {extension}"}") + checkState("INACTIVE", extension) + '' + + lib.concatLines (map (e: ''checkExtension("${e}", False)'') alwaysOnExtensions) + + lib.concatLines (map (e: ''checkExtension("${e}", True)'') testExtensions); +} diff --git a/nixos/tests/gnome-flashback.nix b/nixos/tests/gnome-flashback.nix index b96245b4461e..ceb420adaa54 100644 --- a/nixos/tests/gnome-flashback.nix +++ b/nixos/tests/gnome-flashback.nix @@ -1,61 +1,59 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "gnome-flashback"; - meta.maintainers = lib.teams.gnome.members ++ [ lib.maintainers.chpatrick ]; +{ pkgs, lib, ... }: +{ + name = "gnome-flashback"; + meta.maintainers = lib.teams.gnome.members ++ [ lib.maintainers.chpatrick ]; - nodes.machine = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in + nodes.machine = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in - { - imports = [ ./common/user-account.nix ]; + { + imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; + services.xserver.enable = true; - services.xserver.displayManager = { - gdm.enable = true; - gdm.debug = true; - }; - - services.displayManager.autoLogin = { - enable = true; - user = user.name; - }; - - services.xserver.desktopManager.gnome.enable = true; - services.xserver.desktopManager.gnome.debug = true; - services.xserver.desktopManager.gnome.flashback.enableMetacity = true; - services.displayManager.defaultSession = "gnome-flashback-metacity"; + services.xserver.displayManager = { + gdm.enable = true; + gdm.debug = true; }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - uid = toString user.uid; - xauthority = "/run/user/${uid}/gdm/Xauthority"; - in - '' - with subtest("Login to GNOME Flashback with GDM"): - machine.wait_for_x() - machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"') - # Wait for alice to be logged in" - machine.wait_for_unit("default.target", "${user.name}") - machine.wait_for_file("${xauthority}") - machine.succeed("xauth merge ${xauthority}") - # Check that logging in has given the user ownership of devices - assert "alice" in machine.succeed("getfacl -p /dev/snd/timer") + services.displayManager.autoLogin = { + enable = true; + user = user.name; + }; - with subtest("Wait for Metacity"): - machine.wait_until_succeeds("pgrep metacity") + services.xserver.desktopManager.gnome.enable = true; + services.xserver.desktopManager.gnome.debug = true; + services.xserver.desktopManager.gnome.flashback.enableMetacity = true; + services.displayManager.defaultSession = "gnome-flashback-metacity"; + }; - with subtest("Regression test for #233920"): - machine.wait_until_succeeds("pgrep -fa gnome-flashback-media-keys") - machine.sleep(20) - machine.screenshot("screen") - ''; - } -) + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + uid = toString user.uid; + xauthority = "/run/user/${uid}/gdm/Xauthority"; + in + '' + with subtest("Login to GNOME Flashback with GDM"): + machine.wait_for_x() + machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"') + # Wait for alice to be logged in" + machine.wait_for_unit("default.target", "${user.name}") + machine.wait_for_file("${xauthority}") + machine.succeed("xauth merge ${xauthority}") + # Check that logging in has given the user ownership of devices + assert "alice" in machine.succeed("getfacl -p /dev/snd/timer") + + with subtest("Wait for Metacity"): + machine.wait_until_succeeds("pgrep metacity") + + with subtest("Regression test for #233920"): + machine.wait_until_succeeds("pgrep -fa gnome-flashback-media-keys") + machine.sleep(20) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/gnome-xorg.nix b/nixos/tests/gnome-xorg.nix index b605781ce40f..318d206d07ac 100644 --- a/nixos/tests/gnome-xorg.nix +++ b/nixos/tests/gnome-xorg.nix @@ -1,111 +1,109 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "gnome-xorg"; - meta = { - maintainers = lib.teams.gnome.members; - }; +{ pkgs, lib, ... }: +{ + name = "gnome-xorg"; + meta = { + maintainers = lib.teams.gnome.members; + }; - nodes.machine = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in + nodes.machine = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in - { - imports = [ ./common/user-account.nix ]; + { + imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - - services.xserver.displayManager = { - gdm.enable = true; - gdm.debug = true; - }; - - services.displayManager.autoLogin = { - enable = true; - user = user.name; - }; - - services.xserver.desktopManager.gnome.enable = true; - services.xserver.desktopManager.gnome.debug = true; - services.displayManager.defaultSession = "gnome-xorg"; - - systemd.user.services = { - "org.gnome.Shell@x11" = { - serviceConfig = { - ExecStart = [ - # Clear the list before overriding it. - "" - # Eval API is now internal so Shell needs to run in unsafe mode. - # TODO: improve test driver so that it supports openqa-like manipulation - # that would allow us to drop this mess. - "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode" - ]; - }; - }; - }; + services.xserver.enable = true; + services.xserver.displayManager = { + gdm.enable = true; + gdm.debug = true; }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - uid = toString user.uid; - bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus"; - xauthority = "/run/user/${uid}/gdm/Xauthority"; - display = "DISPLAY=:0.0"; - env = "${bus} XAUTHORITY=${xauthority} ${display}"; - # Run a command in the appropriate user environment - run = command: "su - ${user.name} -c '${bus} ${command}'"; + services.displayManager.autoLogin = { + enable = true; + user = user.name; + }; - # Call javascript in gnome shell, returns a tuple (success, output), where - # `success` is true if the dbus call was successful and output is what the - # javascript evaluates to. - eval = - command: - run "gdbus call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval ${command}"; + services.xserver.desktopManager.gnome.enable = true; + services.xserver.desktopManager.gnome.debug = true; + services.displayManager.defaultSession = "gnome-xorg"; - # False when startup is done - startingUp = eval "Main.layoutManager._startingUp"; + systemd.user.services = { + "org.gnome.Shell@x11" = { + serviceConfig = { + ExecStart = [ + # Clear the list before overriding it. + "" + # Eval API is now internal so Shell needs to run in unsafe mode. + # TODO: improve test driver so that it supports openqa-like manipulation + # that would allow us to drop this mess. + "${pkgs.gnome-shell}/bin/gnome-shell --unsafe-mode" + ]; + }; + }; + }; - # Start Console - launchConsole = run "gapplication launch org.gnome.Console"; + }; - # Hopefully Console's wm class - wmClass = eval "global.display.focus_window.wm_class"; - in - '' - with subtest("Login to GNOME Xorg with GDM"): - machine.wait_for_x() - # Wait for alice to be logged in" - machine.wait_for_unit("default.target", "${user.name}") - machine.wait_for_file("${xauthority}") - machine.succeed("xauth merge ${xauthority}") - # Check that logging in has given the user ownership of devices - assert "alice" in machine.succeed("getfacl -p /dev/snd/timer") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + uid = toString user.uid; + bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus"; + xauthority = "/run/user/${uid}/gdm/Xauthority"; + display = "DISPLAY=:0.0"; + env = "${bus} XAUTHORITY=${xauthority} ${display}"; + # Run a command in the appropriate user environment + run = command: "su - ${user.name} -c '${bus} ${command}'"; - with subtest("Wait for GNOME Shell"): - # correct output should be (true, 'false') - machine.wait_until_succeeds( - "${startingUp} | grep -q 'true,..false'" - ) + # Call javascript in gnome shell, returns a tuple (success, output), where + # `success` is true if the dbus call was successful and output is what the + # javascript evaluates to. + eval = + command: + run "gdbus call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval ${command}"; - with subtest("Open Console"): - # Close the Activities view so that Shell can correctly track the focused window. - machine.send_key("esc") + # False when startup is done + startingUp = eval "Main.layoutManager._startingUp"; - machine.succeed( - "${launchConsole}" - ) - # correct output should be (true, '"kgx"') - # For some reason, this deviates from Wayland. - machine.wait_until_succeeds( - "${wmClass} | grep -q 'true,...kgx'" - ) - machine.sleep(20) - machine.screenshot("screen") - ''; - } -) + # Start Console + launchConsole = run "gapplication launch org.gnome.Console"; + + # Hopefully Console's wm class + wmClass = eval "global.display.focus_window.wm_class"; + in + '' + with subtest("Login to GNOME Xorg with GDM"): + machine.wait_for_x() + # Wait for alice to be logged in" + machine.wait_for_unit("default.target", "${user.name}") + machine.wait_for_file("${xauthority}") + machine.succeed("xauth merge ${xauthority}") + # Check that logging in has given the user ownership of devices + assert "alice" in machine.succeed("getfacl -p /dev/snd/timer") + + with subtest("Wait for GNOME Shell"): + # correct output should be (true, 'false') + machine.wait_until_succeeds( + "${startingUp} | grep -q 'true,..false'" + ) + + with subtest("Open Console"): + # Close the Activities view so that Shell can correctly track the focused window. + machine.send_key("esc") + + machine.succeed( + "${launchConsole}" + ) + # correct output should be (true, '"kgx"') + # For some reason, this deviates from Wayland. + machine.wait_until_succeeds( + "${wmClass} | grep -q 'true,...kgx'" + ) + machine.sleep(20) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/gns3-server.nix b/nixos/tests/gns3-server.nix index 309b83053b8d..19aa1e984ed7 100644 --- a/nixos/tests/gns3-server.nix +++ b/nixos/tests/gns3-server.nix @@ -1,62 +1,60 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "gns3-server"; - meta.maintainers = [ lib.maintainers.anthonyroussel ]; +{ pkgs, lib, ... }: +{ + name = "gns3-server"; + meta.maintainers = [ lib.maintainers.anthonyroussel ]; - nodes.machine = - { ... }: - let - tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' - openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 365 \ - -subj '/CN=localhost' - install -D -t $out key.pem cert.pem - ''; - in - { - services.gns3-server = { + nodes.machine = + { ... }: + let + tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' + openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 365 \ + -subj '/CN=localhost' + install -D -t $out key.pem cert.pem + ''; + in + { + services.gns3-server = { + enable = true; + auth = { enable = true; - auth = { - enable = true; - user = "user"; - passwordFile = pkgs.writeText "gns3-auth-password-file" "password"; - }; - ssl = { - enable = true; - certFile = "${tls-cert}/cert.pem"; - keyFile = "${tls-cert}/key.pem"; - }; - dynamips.enable = true; - ubridge.enable = true; - vpcs.enable = true; + user = "user"; + passwordFile = pkgs.writeText "gns3-auth-password-file" "password"; }; - - security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ]; + ssl = { + enable = true; + certFile = "${tls-cert}/cert.pem"; + keyFile = "${tls-cert}/key.pem"; + }; + dynamips.enable = true; + ubridge.enable = true; + vpcs.enable = true; }; - testScript = - let - createProject = pkgs.writeText "createProject.json" ( - builtins.toJSON { - name = "test_project"; - } - ); - in - '' - start_all() + security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ]; + }; - machine.wait_for_unit("gns3-server.service") - machine.wait_for_open_port(3080) + testScript = + let + createProject = pkgs.writeText "createProject.json" ( + builtins.toJSON { + name = "test_project"; + } + ); + in + '' + start_all() - with subtest("server is listening"): - machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/version") + machine.wait_for_unit("gns3-server.service") + machine.wait_for_open_port(3080) - with subtest("create dummy project"): - machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/projects -d @${createProject}") + with subtest("server is listening"): + machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/version") - with subtest("logging works"): - log_path = "/var/log/gns3/server.log" - machine.wait_for_file(log_path) - ''; - } -) + with subtest("create dummy project"): + machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/projects -d @${createProject}") + + with subtest("logging works"): + log_path = "/var/log/gns3/server.log" + machine.wait_for_file(log_path) + ''; +} diff --git a/nixos/tests/gnupg.nix b/nixos/tests/gnupg.nix index 897c94aba507..1c720b2d6b84 100644 --- a/nixos/tests/gnupg.nix +++ b/nixos/tests/gnupg.nix @@ -1,127 +1,125 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "gnupg"; - meta = with lib.maintainers; { - maintainers = [ rnhmjoj ]; +{ + name = "gnupg"; + meta = with lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; + + # server for testing SSH + nodes.server = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + + users.users.alice.isNormalUser = true; + services.openssh.enable = true; }; - # server for testing SSH - nodes.server = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; + # machine for testing GnuPG + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; - users.users.alice.isNormalUser = true; - services.openssh.enable = true; - }; + users.users.alice.isNormalUser = true; + services.getty.autologinUser = "alice"; - # machine for testing GnuPG - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; + environment.shellInit = '' + # preset a key passphrase in gpg-agent + preset_key() { + # find all keys + case "$1" in + ssh) grips=$(awk '/^[0-9A-F]/{print $1}' "''${GNUPGHOME:-$HOME/.gnupg}/sshcontrol") ;; + pgp) grips=$(gpg --with-keygrip --list-secret-keys | awk '/Keygrip/{print $3}') ;; + esac - users.users.alice.isNormalUser = true; - services.getty.autologinUser = "alice"; + # try to preset the passphrase for each key found + for grip in $grips; do + "$(gpgconf --list-dirs libexecdir)/gpg-preset-passphrase" -c -P "$2" "$grip" + done + } + ''; - environment.shellInit = '' - # preset a key passphrase in gpg-agent - preset_key() { - # find all keys - case "$1" in - ssh) grips=$(awk '/^[0-9A-F]/{print $1}' "''${GNUPGHOME:-$HOME/.gnupg}/sshcontrol") ;; - pgp) grips=$(gpg --with-keygrip --list-secret-keys | awk '/Keygrip/{print $3}') ;; - esac + programs.gnupg.agent.enable = true; + programs.gnupg.agent.enableSSHSupport = true; + }; - # try to preset the passphrase for each key found - for grip in $grips; do - "$(gpgconf --list-dirs libexecdir)/gpg-preset-passphrase" -c -P "$2" "$grip" - done - } - ''; - - programs.gnupg.agent.enable = true; - programs.gnupg.agent.enableSSHSupport = true; - }; - - testScript = '' - import shlex + testScript = '' + import shlex - def as_alice(command: str) -> str: - """ - Wraps a command to run it as Alice in a login shell - """ - quoted = shlex.quote(command) - return "su --login alice --command " + quoted + def as_alice(command: str) -> str: + """ + Wraps a command to run it as Alice in a login shell + """ + quoted = shlex.quote(command) + return "su --login alice --command " + quoted - start_all() + start_all() - with subtest("Wait for the autologin"): - machine.wait_until_tty_matches("1", "alice@machine") + with subtest("Wait for the autologin"): + machine.wait_until_tty_matches("1", "alice@machine") - with subtest("Can generate a PGP key"): - # Note: this needs a tty because of pinentry - machine.send_chars("gpg --gen-key\n") - machine.wait_until_tty_matches("1", "Real name:") - machine.send_chars("Alice\n") - machine.wait_until_tty_matches("1", "Email address:") - machine.send_chars("alice@machine\n") - machine.wait_until_tty_matches("1", "Change") - machine.send_chars("O\n") - machine.wait_until_tty_matches("1", "Please enter") - machine.send_chars("pgp_p4ssphrase") - machine.send_key("tab") - machine.send_chars("pgp_p4ssphrase") - machine.wait_until_tty_matches("1", "Passphrases match") - machine.send_chars("\n") - machine.wait_until_tty_matches("1", "public and secret key created") + with subtest("Can generate a PGP key"): + # Note: this needs a tty because of pinentry + machine.send_chars("gpg --gen-key\n") + machine.wait_until_tty_matches("1", "Real name:") + machine.send_chars("Alice\n") + machine.wait_until_tty_matches("1", "Email address:") + machine.send_chars("alice@machine\n") + machine.wait_until_tty_matches("1", "Change") + machine.send_chars("O\n") + machine.wait_until_tty_matches("1", "Please enter") + machine.send_chars("pgp_p4ssphrase") + machine.send_key("tab") + machine.send_chars("pgp_p4ssphrase") + machine.wait_until_tty_matches("1", "Passphrases match") + machine.send_chars("\n") + machine.wait_until_tty_matches("1", "public and secret key created") - with subtest("Confirm the key is in the keyring"): - machine.wait_until_succeeds(as_alice("gpg --list-secret-keys | grep -q alice@machine")) + with subtest("Confirm the key is in the keyring"): + machine.wait_until_succeeds(as_alice("gpg --list-secret-keys | grep -q alice@machine")) - with subtest("Can generate and add an SSH key"): - machine.succeed(as_alice("ssh-keygen -t ed25519 -f alice -N ssh_p4ssphrase")) + with subtest("Can generate and add an SSH key"): + machine.succeed(as_alice("ssh-keygen -t ed25519 -f alice -N ssh_p4ssphrase")) - # Note: apparently this must be run before using the OpenSSH agent - # socket for the first time in a tty. It's not needed for `ssh` - # because there's a hook that calls it automatically (only in NixOS). - machine.send_chars("gpg-connect-agent updatestartuptty /bye\n") + # Note: apparently this must be run before using the OpenSSH agent + # socket for the first time in a tty. It's not needed for `ssh` + # because there's a hook that calls it automatically (only in NixOS). + machine.send_chars("gpg-connect-agent updatestartuptty /bye\n") - # Note: again, this needs a tty because of pinentry - machine.send_chars("ssh-add alice\n") - machine.wait_until_tty_matches("1", "Enter passphrase") - machine.send_chars("ssh_p4ssphrase\n") - machine.wait_until_tty_matches("1", "Please enter") - machine.send_chars("ssh_agent_p4ssphrase") - machine.send_key("tab") - machine.send_chars("ssh_agent_p4ssphrase") - machine.wait_until_tty_matches("1", "Passphrases match") - machine.send_chars("\n") + # Note: again, this needs a tty because of pinentry + machine.send_chars("ssh-add alice\n") + machine.wait_until_tty_matches("1", "Enter passphrase") + machine.send_chars("ssh_p4ssphrase\n") + machine.wait_until_tty_matches("1", "Please enter") + machine.send_chars("ssh_agent_p4ssphrase") + machine.send_key("tab") + machine.send_chars("ssh_agent_p4ssphrase") + machine.wait_until_tty_matches("1", "Passphrases match") + machine.send_chars("\n") - with subtest("Confirm the SSH key has been registered"): - machine.wait_until_succeeds(as_alice("ssh-add -l | grep -q alice@machine")) + with subtest("Confirm the SSH key has been registered"): + machine.wait_until_succeeds(as_alice("ssh-add -l | grep -q alice@machine")) - with subtest("Can preset the key passphrases in the agent"): - machine.succeed(as_alice("echo allow-preset-passphrase > .gnupg/gpg-agent.conf")) - machine.succeed(as_alice("pkill gpg-agent")) - machine.succeed(as_alice("preset_key pgp pgp_p4ssphrase")) - machine.succeed(as_alice("preset_key ssh ssh_agent_p4ssphrase")) + with subtest("Can preset the key passphrases in the agent"): + machine.succeed(as_alice("echo allow-preset-passphrase > .gnupg/gpg-agent.conf")) + machine.succeed(as_alice("pkill gpg-agent")) + machine.succeed(as_alice("preset_key pgp pgp_p4ssphrase")) + machine.succeed(as_alice("preset_key ssh ssh_agent_p4ssphrase")) - with subtest("Can encrypt and decrypt a message"): - machine.succeed(as_alice("echo Hello | gpg -e -r alice | gpg -d | grep -q Hello")) + with subtest("Can encrypt and decrypt a message"): + machine.succeed(as_alice("echo Hello | gpg -e -r alice | gpg -d | grep -q Hello")) - with subtest("Can log into the server"): - # Install Alice's public key - public_key = machine.succeed(as_alice("cat alice.pub")) - server.succeed("mkdir /etc/ssh/authorized_keys.d") - server.succeed(f"printf '{public_key}' > /etc/ssh/authorized_keys.d/alice") + with subtest("Can log into the server"): + # Install Alice's public key + public_key = machine.succeed(as_alice("cat alice.pub")) + server.succeed("mkdir /etc/ssh/authorized_keys.d") + server.succeed(f"printf '{public_key}' > /etc/ssh/authorized_keys.d/alice") - server.wait_for_open_port(22) - machine.succeed(as_alice("ssh -i alice -o StrictHostKeyChecking=no server exit")) - ''; - } -) + server.wait_for_open_port(22) + machine.succeed(as_alice("ssh -i alice -o StrictHostKeyChecking=no server exit")) + ''; +} diff --git a/nixos/tests/goatcounter.nix b/nixos/tests/goatcounter.nix index ee3b373383e2..b3dfe6f267f7 100644 --- a/nixos/tests/goatcounter.nix +++ b/nixos/tests/goatcounter.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "goatcounter"; +{ + name = "goatcounter"; - meta.maintainers = with lib.maintainers; [ bhankas ]; + meta.maintainers = with lib.maintainers; [ bhankas ]; - nodes.machine = - { config, ... }: - { - virtualisation.memorySize = 2048; + nodes.machine = + { config, ... }: + { + virtualisation.memorySize = 2048; - services.goatcounter = { - enable = true; - proxy = true; - }; + services.goatcounter = { + enable = true; + proxy = true; }; + }; - testScript = '' - start_all() - machine.wait_for_unit("goatcounter.service") - # wait for goatcounter to fully come up + testScript = '' + start_all() + machine.wait_for_unit("goatcounter.service") + # wait for goatcounter to fully come up - with subtest("goatcounter service starts"): - machine.wait_until_succeeds( - "curl -sSfL http://localhost:8081/ > /dev/null", - timeout=30 - ) - ''; - } -) + with subtest("goatcounter service starts"): + machine.wait_until_succeeds( + "curl -sSfL http://localhost:8081/ > /dev/null", + timeout=30 + ) + ''; +} diff --git a/nixos/tests/gobgpd.nix b/nixos/tests/gobgpd.nix index 029b486b7cc6..8491e1893403 100644 --- a/nixos/tests/gobgpd.nix +++ b/nixos/tests/gobgpd.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - ifAddr = - node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address; - in - { - name = "gobgpd"; +{ pkgs, ... }: +let + ifAddr = + node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address; +in +{ + name = "gobgpd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ higebu ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ higebu ]; + }; - nodes = { - node1 = - { nodes, ... }: - { - environment.systemPackages = [ pkgs.gobgp ]; - networking.firewall.allowedTCPPorts = [ 179 ]; - services.gobgpd = { - enable = true; - settings = { - global = { - config = { - as = 64512; - router-id = "192.168.255.1"; - }; - }; - neighbors = [ - { - config = { - neighbor-address = ifAddr nodes.node2 "eth1"; - peer-as = 64513; - }; - } - ]; - }; - }; - }; - node2 = - { nodes, ... }: - { - environment.systemPackages = [ pkgs.gobgp ]; - networking.firewall.allowedTCPPorts = [ 179 ]; - services.gobgpd = { - enable = true; - settings = { - global = { - config = { - as = 64513; - router-id = "192.168.255.2"; - }; - }; - neighbors = [ - { - config = { - neighbor-address = ifAddr nodes.node1 "eth1"; - peer-as = 64512; - }; - } - ]; - }; - }; - }; - }; - - testScript = + nodes = { + node1 = { nodes, ... }: - let - addr1 = ifAddr nodes.node1 "eth1"; - addr2 = ifAddr nodes.node2 "eth1"; - in - '' - start_all() + { + environment.systemPackages = [ pkgs.gobgp ]; + networking.firewall.allowedTCPPorts = [ 179 ]; + services.gobgpd = { + enable = true; + settings = { + global = { + config = { + as = 64512; + router-id = "192.168.255.1"; + }; + }; + neighbors = [ + { + config = { + neighbor-address = ifAddr nodes.node2 "eth1"; + peer-as = 64513; + }; + } + ]; + }; + }; + }; + node2 = + { nodes, ... }: + { + environment.systemPackages = [ pkgs.gobgp ]; + networking.firewall.allowedTCPPorts = [ 179 ]; + services.gobgpd = { + enable = true; + settings = { + global = { + config = { + as = 64513; + router-id = "192.168.255.2"; + }; + }; + neighbors = [ + { + config = { + neighbor-address = ifAddr nodes.node1 "eth1"; + peer-as = 64512; + }; + } + ]; + }; + }; + }; + }; - for node in node1, node2: - with subtest("should start gobgpd node"): - node.wait_for_unit("gobgpd.service") - with subtest("should open port 179"): - node.wait_for_open_port(179) + testScript = + { nodes, ... }: + let + addr1 = ifAddr nodes.node1 "eth1"; + addr2 = ifAddr nodes.node2 "eth1"; + in + '' + start_all() - with subtest("should show neighbors by gobgp cli and BGP state should be ESTABLISHED"): - node1.wait_until_succeeds("gobgp neighbor ${addr2} | grep -q ESTABLISHED") - node2.wait_until_succeeds("gobgp neighbor ${addr1} | grep -q ESTABLISHED") - ''; - } -) + for node in node1, node2: + with subtest("should start gobgpd node"): + node.wait_for_unit("gobgpd.service") + with subtest("should open port 179"): + node.wait_for_open_port(179) + + with subtest("should show neighbors by gobgp cli and BGP state should be ESTABLISHED"): + node1.wait_until_succeeds("gobgp neighbor ${addr2} | grep -q ESTABLISHED") + node2.wait_until_succeeds("gobgp neighbor ${addr1} | grep -q ESTABLISHED") + ''; +} diff --git a/nixos/tests/gocd-agent.nix b/nixos/tests/gocd-agent.nix index d717299d55c7..674bc2374fd1 100644 --- a/nixos/tests/gocd-agent.nix +++ b/nixos/tests/gocd-agent.nix @@ -9,46 +9,44 @@ let header = "Accept: application/vnd.go.cd.v2+json"; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "gocd-agent"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - grahamc - swarren83 - ]; +{ pkgs, ... }: +{ + name = "gocd-agent"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + grahamc + swarren83 + ]; - # gocd agent needs to register with the autoregister key created on first server startup, - # but NixOS module doesn't seem to allow to pass during runtime currently - broken = true; - }; + # gocd agent needs to register with the autoregister key created on first server startup, + # but NixOS module doesn't seem to allow to pass during runtime currently + broken = true; + }; - nodes = { - agent = - { ... }: - { - virtualisation.memorySize = 2046; - services.gocd-agent = { - enable = true; - }; - services.gocd-server = { - enable = true; - }; + nodes = { + agent = + { ... }: + { + virtualisation.memorySize = 2046; + services.gocd-agent = { + enable = true; }; - }; + services.gocd-server = { + enable = true; + }; + }; + }; - testScript = '' - start_all() - agent.wait_for_unit("gocd-server") - agent.wait_for_open_port(8153) - agent.wait_for_unit("gocd-agent") - agent.wait_until_succeeds( - "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].uuid" - ) - agent.succeed( - "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].agent_state | grep Idle" - ) - ''; - } -) + testScript = '' + start_all() + agent.wait_for_unit("gocd-server") + agent.wait_for_open_port(8153) + agent.wait_for_unit("gocd-agent") + agent.wait_until_succeeds( + "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].uuid" + ) + agent.succeed( + "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].agent_state | grep Idle" + ) + ''; +} diff --git a/nixos/tests/gocd-server.nix b/nixos/tests/gocd-server.nix index 95b126c55a52..cffd9f50aedd 100644 --- a/nixos/tests/gocd-server.nix +++ b/nixos/tests/gocd-server.nix @@ -2,29 +2,27 @@ # 1. GoCD server starts # 2. GoCD server responds -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "gocd-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ swarren83 ]; - }; +{ + name = "gocd-server"; + meta = with pkgs.lib.maintainers; { + maintainers = [ swarren83 ]; + }; - nodes = { - server = - { ... }: - { - virtualisation.memorySize = 2046; - services.gocd-server.enable = true; - }; - }; + nodes = { + server = + { ... }: + { + virtualisation.memorySize = 2046; + services.gocd-server.enable = true; + }; + }; - testScript = '' - server.start() - server.wait_for_unit("gocd-server") - server.wait_for_open_port(8153) - server.wait_until_succeeds("curl -s -f localhost:8153/go") - ''; - } -) + testScript = '' + server.start() + server.wait_for_unit("gocd-server") + server.wait_for_open_port(8153) + server.wait_until_succeeds("curl -s -f localhost:8153/go") + ''; +} diff --git a/nixos/tests/gollum.nix b/nixos/tests/gollum.nix index ab937bc990bb..bf42ccc35e30 100644 --- a/nixos/tests/gollum.nix +++ b/nixos/tests/gollum.nix @@ -1,21 +1,19 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "gollum"; +{ pkgs, ... }: +{ + name = "gollum"; - nodes = { - webserver = - { pkgs, lib, ... }: - { - services.gollum.enable = true; - }; - }; + nodes = { + webserver = + { pkgs, lib, ... }: + { + services.gollum.enable = true; + }; + }; - testScript = - { nodes, ... }: - '' - webserver.wait_for_unit("gollum") - webserver.wait_for_open_port(${toString nodes.webserver.services.gollum.port}) - ''; - } -) + testScript = + { nodes, ... }: + '' + webserver.wait_for_unit("gollum") + webserver.wait_for_open_port(${toString nodes.webserver.services.gollum.port}) + ''; +} diff --git a/nixos/tests/gonic.nix b/nixos/tests/gonic.nix index 47d432d6354a..836e7167035f 100644 --- a/nixos/tests/gonic.nix +++ b/nixos/tests/gonic.nix @@ -1,31 +1,29 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "gonic"; +{ pkgs, ... }: +{ + name = "gonic"; - nodes.machine = - { ... }: - { - systemd.tmpfiles.settings = { - "10-gonic" = { - "/tmp/music"."d" = { }; - "/tmp/podcast"."d" = { }; - "/tmp/playlists"."d" = { }; - }; - }; - services.gonic = { - enable = true; - settings = { - music-path = [ "/tmp/music" ]; - podcast-path = "/tmp/podcast"; - playlists-path = "/tmp/playlists"; - }; + nodes.machine = + { ... }: + { + systemd.tmpfiles.settings = { + "10-gonic" = { + "/tmp/music"."d" = { }; + "/tmp/podcast"."d" = { }; + "/tmp/playlists"."d" = { }; }; }; + services.gonic = { + enable = true; + settings = { + music-path = [ "/tmp/music" ]; + podcast-path = "/tmp/podcast"; + playlists-path = "/tmp/playlists"; + }; + }; + }; - testScript = '' - machine.wait_for_unit("gonic") - machine.wait_for_open_port(4747) - ''; - } -) + testScript = '' + machine.wait_for_unit("gonic") + machine.wait_for_open_port(4747) + ''; +} diff --git a/nixos/tests/gopro-tool.nix b/nixos/tests/gopro-tool.nix index 1f183e4c30d5..e52a6106ee41 100644 --- a/nixos/tests/gopro-tool.nix +++ b/nixos/tests/gopro-tool.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - let - testScript = '' - start_all() +let + testScript = '' + start_all() - machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("multi-user.target") - # Check that gopro-tool is installed - machine.succeed("which gopro-tool") + # Check that gopro-tool is installed + machine.succeed("which gopro-tool") - # Check that the v4l2loopback module is available - machine.succeed("lsmod | grep v4l2loopback || echo 'Module not found'") + # Check that the v4l2loopback module is available + machine.succeed("lsmod | grep v4l2loopback || echo 'Module not found'") - # Check that VLC is installed - machine.succeed("which vlc") - ''; - in - { - name = "gopro-tool"; - meta.maintainers = with lib.maintainers; [ ZMon3y ]; - nodes.machine = - { config, pkgs, ... }: - { - # Ensure dependencies are installed - environment.systemPackages = with pkgs; [ - gopro-tool - vlc - ]; + # Check that VLC is installed + machine.succeed("which vlc") + ''; +in +{ + name = "gopro-tool"; + meta.maintainers = with lib.maintainers; [ ZMon3y ]; + nodes.machine = + { config, pkgs, ... }: + { + # Ensure dependencies are installed + environment.systemPackages = with pkgs; [ + gopro-tool + vlc + ]; - # Load kernel module for testing - boot.extraModulePackages = with config.boot.kernelPackages; [ v4l2loopback ]; + # Load kernel module for testing + boot.extraModulePackages = with config.boot.kernelPackages; [ v4l2loopback ]; - # Enable module loading - boot.kernelModules = [ "v4l2loopback" ]; - }; + # Enable module loading + boot.kernelModules = [ "v4l2loopback" ]; + }; - testScript = testScript; - } -) + testScript = testScript; +} diff --git a/nixos/tests/goss.nix b/nixos/tests/goss.nix index 1bf9feda8ba3..a9d49b4ff53c 100644 --- a/nixos/tests/goss.nix +++ b/nixos/tests/goss.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "goss"; - meta.maintainers = [ lib.maintainers.anthonyroussel ]; +{ pkgs, lib, ... }: +{ + name = "goss"; + meta.maintainers = [ lib.maintainers.anthonyroussel ]; - nodes.machine = { - environment.systemPackages = [ pkgs.jq ]; + nodes.machine = { + environment.systemPackages = [ pkgs.jq ]; - services.goss = { - enable = true; + services.goss = { + enable = true; - environment = { - GOSS_FMT = "json"; + environment = { + GOSS_FMT = "json"; + }; + + settings = { + addr."tcp://localhost:8080" = { + reachable = true; + local-address = "127.0.0.1"; }; - - settings = { - addr."tcp://localhost:8080" = { - reachable = true; - local-address = "127.0.0.1"; - }; - command."check-goss-version" = { - exec = "${lib.getExe pkgs.goss} --version"; - exit-status = 0; - }; - dns.localhost.resolvable = true; - file."/nix" = { - filetype = "directory"; - exists = true; - }; - group.root.exists = true; - kernel-param."kernel.ostype".value = "Linux"; - user.root.exists = true; + command."check-goss-version" = { + exec = "${lib.getExe pkgs.goss} --version"; + exit-status = 0; }; + dns.localhost.resolvable = true; + file."/nix" = { + filetype = "directory"; + exists = true; + }; + group.root.exists = true; + kernel-param."kernel.ostype".value = "Linux"; + user.root.exists = true; }; }; + }; - testScript = '' - import json + testScript = '' + import json - machine.wait_for_unit("goss.service") - machine.wait_for_open_port(8080) + machine.wait_for_unit("goss.service") + machine.wait_for_open_port(8080) - with subtest("returns health status"): - result = json.loads(machine.succeed("curl -sS http://localhost:8080/healthz")) + with subtest("returns health status"): + result = json.loads(machine.succeed("curl -sS http://localhost:8080/healthz")) - assert len(result["results"]) == 8, f".results should be an array of 10 items, was {result['results']!r}" - assert result["summary"]["failed-count"] == 0, f".summary.failed-count should be zero, was {result['summary']['failed-count']}" - assert result["summary"]["test-count"] == 8, f".summary.test-count should be 10, was {result['summary']['test-count']}" - ''; - } -) + assert len(result["results"]) == 8, f".results should be an array of 10 items, was {result['results']!r}" + assert result["summary"]["failed-count"] == 0, f".summary.failed-count should be zero, was {result['summary']['failed-count']}" + assert result["summary"]["test-count"] == 8, f".summary.test-count should be 10, was {result['summary']['test-count']}" + ''; +} diff --git a/nixos/tests/gotenberg.nix b/nixos/tests/gotenberg.nix index aa39b2d349d7..c640657ea872 100644 --- a/nixos/tests/gotenberg.nix +++ b/nixos/tests/gotenberg.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "gotenberg"; - meta.maintainers = with lib.maintainers; [ pyrox0 ]; +{ + name = "gotenberg"; + meta.maintainers = with lib.maintainers; [ pyrox0 ]; - nodes.machine = { - services.gotenberg = { - enable = true; - }; + nodes.machine = { + services.gotenberg = { + enable = true; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("gotenberg.service") + machine.wait_for_unit("gotenberg.service") - # Gotenberg startup - machine.wait_for_open_port(3000) + # Gotenberg startup + machine.wait_for_open_port(3000) - # Ensure healthcheck endpoint succeeds - machine.succeed("curl http://localhost:3000/health") - ''; - } -) + # Ensure healthcheck endpoint succeeds + machine.succeed("curl http://localhost:3000/health") + ''; +} diff --git a/nixos/tests/gotify-server.nix b/nixos/tests/gotify-server.nix index 55f5fe6468f5..560022be89e8 100644 --- a/nixos/tests/gotify-server.nix +++ b/nixos/tests/gotify-server.nix @@ -1,57 +1,55 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "gotify-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, lib, ... }: +{ + name = "gotify-server"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.jq ]; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.jq ]; - services.gotify = { - enable = true; - environment = { - GOTIFY_SERVER_PORT = 3000; - }; + services.gotify = { + enable = true; + environment = { + GOTIFY_SERVER_PORT = 3000; }; }; + }; - testScript = '' - machine.start() + testScript = '' + machine.start() - machine.wait_for_unit("gotify-server.service") - machine.wait_for_open_port(3000) + machine.wait_for_unit("gotify-server.service") + machine.wait_for_open_port(3000) - token = machine.succeed( - "curl --fail -sS -X POST localhost:3000/application -F name=nixos " - + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" ' - + "| jq .token | xargs echo -n" - ) + token = machine.succeed( + "curl --fail -sS -X POST localhost:3000/application -F name=nixos " + + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" ' + + "| jq .token | xargs echo -n" + ) - usertoken = machine.succeed( - "curl --fail -sS -X POST localhost:3000/client -F name=nixos " - + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" ' - + "| jq .token | xargs echo -n" - ) + usertoken = machine.succeed( + "curl --fail -sS -X POST localhost:3000/client -F name=nixos " + + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" ' + + "| jq .token | xargs echo -n" + ) - machine.succeed( - f"curl --fail -sS -X POST 'localhost:3000/message?token={token}' -H 'Accept: application/json' " - + "-F title=Gotify -F message=Works" - ) + machine.succeed( + f"curl --fail -sS -X POST 'localhost:3000/message?token={token}' -H 'Accept: application/json' " + + "-F title=Gotify -F message=Works" + ) - title = machine.succeed( - f"curl --fail -sS 'localhost:3000/message?since=0&token={usertoken}' | jq '.messages|.[0]|.title' | xargs echo -n" - ) + title = machine.succeed( + f"curl --fail -sS 'localhost:3000/message?since=0&token={usertoken}' | jq '.messages|.[0]|.title' | xargs echo -n" + ) - assert title == "Gotify" + assert title == "Gotify" - # Ensure that the UI responds with a successful code and that the - # response is not empty - result = machine.succeed("curl -fsS localhost:3000") - assert result, "HTTP response from localhost:3000 must not be empty!" - ''; - } -) + # Ensure that the UI responds with a successful code and that the + # response is not empty + result = machine.succeed("curl -fsS localhost:3000") + assert result, "HTTP response from localhost:3000 must not be empty!" + ''; +} diff --git a/nixos/tests/graphite.nix b/nixos/tests/graphite.nix index 1791cb38fc62..fd5888f0c973 100644 --- a/nixos/tests/graphite.nix +++ b/nixos/tests/graphite.nix @@ -1,39 +1,37 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "graphite"; - nodes = { - one = - { ... }: - { - time.timeZone = "UTC"; - services.graphite = { - web = { - enable = true; - extraConfig = '' - SECRET_KEY = "abcd"; - ''; - }; - carbon.enableCache = true; - seyren.enable = false; # Implicitly requires openssl-1.0.2u which is marked insecure +{ pkgs, ... }: +{ + name = "graphite"; + nodes = { + one = + { ... }: + { + time.timeZone = "UTC"; + services.graphite = { + web = { + enable = true; + extraConfig = '' + SECRET_KEY = "abcd"; + ''; }; + carbon.enableCache = true; + seyren.enable = false; # Implicitly requires openssl-1.0.2u which is marked insecure }; - }; + }; + }; - testScript = '' - start_all() - one.wait_for_unit("default.target") - one.wait_for_unit("graphiteWeb.service") - one.wait_for_unit("carbonCache.service") - # The services above are of type "simple". systemd considers them active immediately - # even if they're still in preStart (which takes quite long for graphiteWeb). - # Wait for ports to open so we're sure the services are up and listening. - one.wait_for_open_port(8080) - one.wait_for_open_port(2003) - one.succeed('echo "foo 1 `date +%s`" | nc -N localhost 2003') - one.wait_until_succeeds( - "curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2" - ) - ''; - } -) + testScript = '' + start_all() + one.wait_for_unit("default.target") + one.wait_for_unit("graphiteWeb.service") + one.wait_for_unit("carbonCache.service") + # The services above are of type "simple". systemd considers them active immediately + # even if they're still in preStart (which takes quite long for graphiteWeb). + # Wait for ports to open so we're sure the services are up and listening. + one.wait_for_open_port(8080) + one.wait_for_open_port(2003) + one.succeed('echo "foo 1 `date +%s`" | nc -N localhost 2003') + one.wait_until_succeeds( + "curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2" + ) + ''; +} diff --git a/nixos/tests/graylog.nix b/nixos/tests/graylog.nix index 7766ad9a6a05..32c419378713 100644 --- a/nixos/tests/graylog.nix +++ b/nixos/tests/graylog.nix @@ -1,140 +1,138 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "graylog"; - meta.maintainers = [ ]; +{ pkgs, lib, ... }: +{ + name = "graylog"; + meta.maintainers = [ ]; - nodes.machine = - { pkgs, ... }: - { - virtualisation.memorySize = 4096; - virtualisation.diskSize = 1024 * 6; + nodes.machine = + { pkgs, ... }: + { + virtualisation.memorySize = 4096; + virtualisation.diskSize = 1024 * 6; - services.mongodb.enable = true; - services.elasticsearch.enable = true; - services.elasticsearch.extraConf = '' - network.publish_host: 127.0.0.1 - network.bind_host: 127.0.0.1 - ''; + services.mongodb.enable = true; + services.elasticsearch.enable = true; + services.elasticsearch.extraConf = '' + network.publish_host: 127.0.0.1 + network.bind_host: 127.0.0.1 + ''; - services.graylog = { - enable = true; - passwordSecret = "YGhZ59wXMrYOojx5xdgEpBpDw2N6FbhM4lTtaJ1KPxxmKrUvSlDbtWArwAWMQ5LKx1ojHEVrQrBMVRdXbRyZLqffoUzHfssc"; - elasticsearchHosts = [ "http://localhost:9200" ]; + services.graylog = { + enable = true; + passwordSecret = "YGhZ59wXMrYOojx5xdgEpBpDw2N6FbhM4lTtaJ1KPxxmKrUvSlDbtWArwAWMQ5LKx1ojHEVrQrBMVRdXbRyZLqffoUzHfssc"; + elasticsearchHosts = [ "http://localhost:9200" ]; - # `echo -n "nixos" | shasum -a 256` - rootPasswordSha2 = "6ed332bcfa615381511d4d5ba44a293bb476f368f7e9e304f0dff50230d1a85b"; - }; - - environment.systemPackages = [ pkgs.jq ]; - - systemd.services.graylog.path = [ pkgs.netcat ]; - systemd.services.graylog.preStart = '' - until nc -z localhost 9200; do - sleep 2 - done - ''; + # `echo -n "nixos" | shasum -a 256` + rootPasswordSha2 = "6ed332bcfa615381511d4d5ba44a293bb476f368f7e9e304f0dff50230d1a85b"; }; - testScript = - let - payloads.login = pkgs.writeText "login.json" ( - builtins.toJSON { - host = "127.0.0.1:9000"; - username = "admin"; - password = "nixos"; - } - ); + environment.systemPackages = [ pkgs.jq ]; - payloads.input = pkgs.writeText "input.json" ( - builtins.toJSON { - title = "Demo"; - global = false; - type = "org.graylog2.inputs.gelf.udp.GELFUDPInput"; - node = "@node@"; - configuration = { - bind_address = "0.0.0.0"; - decompress_size_limit = 8388608; - number_worker_threads = 1; - override_source = null; - port = 12201; - recv_buffer_size = 262144; - }; - } - ); - - payloads.gelf_message = pkgs.writeText "gelf.json" ( - builtins.toJSON { - host = "example.org"; - short_message = "A short message"; - full_message = "A long message"; - version = "1.1"; - level = 5; - facility = "Test"; - } - ); - in - '' - machine.start() - machine.wait_for_unit("graylog.service") - - machine.wait_until_succeeds( - "journalctl -o cat -u graylog.service | grep 'Started REST API at <127.0.0.1:9000>'" - ) - - machine.wait_for_open_port(9000) - machine.succeed("curl -sSfL http://127.0.0.1:9000/") - - machine.wait_until_succeeds( - "journalctl -o cat -u graylog.service | grep 'Graylog server up and running'" - ) - - session = machine.succeed( - "curl -X POST " - + "-sSfL http://127.0.0.1:9000/api/system/sessions " - + "-d $(cat ${payloads.login}) " - + "-H 'Content-Type: application/json' " - + "-H 'Accept: application/json' " - + "-H 'x-requested-by: cli' " - + "| jq .session_id | xargs echo" - ).rstrip() - - machine.succeed( - "curl -X POST " - + f"-sSfL http://127.0.0.1:9000/api/system/inputs -u {session}:session " - + '-d $(cat ${payloads.input} | sed -e "s,@node@,$(cat /var/lib/graylog/server/node-id),") ' - + "-H 'Accept: application/json' " - + "-H 'Content-Type: application/json' " - + "-H 'x-requested-by: cli' " - ) - - machine.wait_until_succeeds( - "journalctl -o cat -u graylog.service | grep -E 'Input \[GELF UDP/Demo/[[:alnum:]]{24}\] is now RUNNING'" - ) - - machine.wait_until_succeeds( - "test \"$(curl -sSfL 'http://127.0.0.1:9000/api/cluster/inputstates' " - + f"-u {session}:session " - + "-H 'Accept: application/json' " - + "-H 'Content-Type: application/json' " - + "-H 'x-requested-by: cli'" - + "| jq 'to_entries[]|.value|.[0]|.state' | xargs echo" - + ')" = "RUNNING"' - ) - - machine.succeed( - "echo -n $(cat ${payloads.gelf_message}) | nc -w10 -u 127.0.0.1 12201" - ) - - machine.succeed( - 'test "$(curl -X GET ' - + "-sSfL 'http://127.0.0.1:9000/api/search/universal/relative?query=*' " - + f"-u {session}:session " - + "-H 'Accept: application/json' " - + "-H 'Content-Type: application/json' " - + "-H 'x-requested-by: cli'" - + ' | jq \'.total_results\' | xargs echo)" = "1"' - ) + systemd.services.graylog.path = [ pkgs.netcat ]; + systemd.services.graylog.preStart = '' + until nc -z localhost 9200; do + sleep 2 + done ''; - } -) + }; + + testScript = + let + payloads.login = pkgs.writeText "login.json" ( + builtins.toJSON { + host = "127.0.0.1:9000"; + username = "admin"; + password = "nixos"; + } + ); + + payloads.input = pkgs.writeText "input.json" ( + builtins.toJSON { + title = "Demo"; + global = false; + type = "org.graylog2.inputs.gelf.udp.GELFUDPInput"; + node = "@node@"; + configuration = { + bind_address = "0.0.0.0"; + decompress_size_limit = 8388608; + number_worker_threads = 1; + override_source = null; + port = 12201; + recv_buffer_size = 262144; + }; + } + ); + + payloads.gelf_message = pkgs.writeText "gelf.json" ( + builtins.toJSON { + host = "example.org"; + short_message = "A short message"; + full_message = "A long message"; + version = "1.1"; + level = 5; + facility = "Test"; + } + ); + in + '' + machine.start() + machine.wait_for_unit("graylog.service") + + machine.wait_until_succeeds( + "journalctl -o cat -u graylog.service | grep 'Started REST API at <127.0.0.1:9000>'" + ) + + machine.wait_for_open_port(9000) + machine.succeed("curl -sSfL http://127.0.0.1:9000/") + + machine.wait_until_succeeds( + "journalctl -o cat -u graylog.service | grep 'Graylog server up and running'" + ) + + session = machine.succeed( + "curl -X POST " + + "-sSfL http://127.0.0.1:9000/api/system/sessions " + + "-d $(cat ${payloads.login}) " + + "-H 'Content-Type: application/json' " + + "-H 'Accept: application/json' " + + "-H 'x-requested-by: cli' " + + "| jq .session_id | xargs echo" + ).rstrip() + + machine.succeed( + "curl -X POST " + + f"-sSfL http://127.0.0.1:9000/api/system/inputs -u {session}:session " + + '-d $(cat ${payloads.input} | sed -e "s,@node@,$(cat /var/lib/graylog/server/node-id),") ' + + "-H 'Accept: application/json' " + + "-H 'Content-Type: application/json' " + + "-H 'x-requested-by: cli' " + ) + + machine.wait_until_succeeds( + "journalctl -o cat -u graylog.service | grep -E 'Input \[GELF UDP/Demo/[[:alnum:]]{24}\] is now RUNNING'" + ) + + machine.wait_until_succeeds( + "test \"$(curl -sSfL 'http://127.0.0.1:9000/api/cluster/inputstates' " + + f"-u {session}:session " + + "-H 'Accept: application/json' " + + "-H 'Content-Type: application/json' " + + "-H 'x-requested-by: cli'" + + "| jq 'to_entries[]|.value|.[0]|.state' | xargs echo" + + ')" = "RUNNING"' + ) + + machine.succeed( + "echo -n $(cat ${payloads.gelf_message}) | nc -w10 -u 127.0.0.1 12201" + ) + + machine.succeed( + 'test "$(curl -X GET ' + + "-sSfL 'http://127.0.0.1:9000/api/search/universal/relative?query=*' " + + f"-u {session}:session " + + "-H 'Accept: application/json' " + + "-H 'Content-Type: application/json' " + + "-H 'x-requested-by: cli'" + + ' | jq \'.total_results\' | xargs echo)" = "1"' + ) + ''; +} diff --git a/nixos/tests/greetd-no-shadow.nix b/nixos/tests/greetd-no-shadow.nix index 0bbf4faa3d9c..c4cb028ff6eb 100644 --- a/nixos/tests/greetd-no-shadow.nix +++ b/nixos/tests/greetd-no-shadow.nix @@ -1,56 +1,54 @@ -import ./make-test-python.nix ( - { - pkgs, - latestKernel ? false, - ... - }: - { - name = "greetd-no-shadow"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ + pkgs, + latestKernel ? false, + ... +}: +{ + name = "greetd-no-shadow"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes.machine = - { pkgs, lib, ... }: - { + nodes.machine = + { pkgs, lib, ... }: + { - users.users.alice = { - isNormalUser = true; - group = "alice"; - password = "foobar"; - }; - users.groups.alice = { }; + users.users.alice = { + isNormalUser = true; + group = "alice"; + password = "foobar"; + }; + users.groups.alice = { }; - # This means login(1) breaks, so we must use greetd/agreety instead. - security.shadow.enable = false; + # This means login(1) breaks, so we must use greetd/agreety instead. + security.shadow.enable = false; - services.greetd = { - enable = true; - settings = { - default_session = { - command = "${pkgs.greetd.greetd}/bin/agreety --cmd bash"; - }; + services.greetd = { + enable = true; + settings = { + default_session = { + command = "${pkgs.greetd.greetd}/bin/agreety --cmd bash"; }; }; }; + }; - testScript = '' - machine.start() + testScript = '' + machine.start() - machine.wait_for_unit("multi-user.target") - machine.wait_until_succeeds("pgrep -f 'agretty.*tty1'") - machine.screenshot("postboot") + machine.wait_for_unit("multi-user.target") + machine.wait_until_succeeds("pgrep -f 'agretty.*tty1'") + machine.screenshot("postboot") - with subtest("Log in as alice on a virtual console"): - machine.wait_until_tty_matches("1", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("1", "login: alice") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches("1", "Password: ") - machine.send_chars("foobar\n") - machine.wait_until_succeeds("pgrep -u alice bash") - machine.send_chars("touch done\n") - machine.wait_for_file("/home/alice/done") - ''; - } -) + with subtest("Log in as alice on a virtual console"): + machine.wait_until_tty_matches("1", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("1", "login: alice") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches("1", "Password: ") + machine.send_chars("foobar\n") + machine.wait_until_succeeds("pgrep -u alice bash") + machine.send_chars("touch done\n") + machine.wait_for_file("/home/alice/done") + ''; +} diff --git a/nixos/tests/grub.nix b/nixos/tests/grub.nix index c7232a9e6940..4221eec73049 100644 --- a/nixos/tests/grub.nix +++ b/nixos/tests/grub.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "grub"; +{ lib, ... }: +{ + name = "grub"; - meta = with lib.maintainers; { - maintainers = [ rnhmjoj ]; + meta = with lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; + + nodes.machine = + { ... }: + { + virtualisation.useBootLoader = true; + + boot.loader.timeout = null; + boot.loader.grub = { + enable = true; + users.alice.password = "supersecret"; + + # OCR is not accurate enough + extraConfig = "serial; terminal_output serial"; + }; }; - nodes.machine = - { ... }: - { - virtualisation.useBootLoader = true; - - boot.loader.timeout = null; - boot.loader.grub = { - enable = true; - users.alice.password = "supersecret"; - - # OCR is not accurate enough - extraConfig = "serial; terminal_output serial"; - }; - }; - - testScript = '' - def grub_login_as(user, password): - """ - Enters user and password to log into GRUB - """ - machine.wait_for_console_text("Enter username:") - machine.send_chars(user + "\n") - machine.wait_for_console_text("Enter password:") - machine.send_chars(password + "\n") + testScript = '' + def grub_login_as(user, password): + """ + Enters user and password to log into GRUB + """ + machine.wait_for_console_text("Enter username:") + machine.send_chars(user + "\n") + machine.wait_for_console_text("Enter password:") + machine.send_chars(password + "\n") - def grub_select_all_configurations(): - """ - Selects "All configurations" from the GRUB menu - to trigger a login request. - """ - machine.send_monitor_command("sendkey down") - machine.send_monitor_command("sendkey ret") + def grub_select_all_configurations(): + """ + Selects "All configurations" from the GRUB menu + to trigger a login request. + """ + machine.send_monitor_command("sendkey down") + machine.send_monitor_command("sendkey ret") - machine.start() + machine.start() - # wait for grub screen - machine.wait_for_console_text("GNU GRUB") + # wait for grub screen + machine.wait_for_console_text("GNU GRUB") - grub_select_all_configurations() - with subtest("Invalid credentials are rejected"): - grub_login_as("wronguser", "wrongsecret") - machine.wait_for_console_text("error: access denied.") + grub_select_all_configurations() + with subtest("Invalid credentials are rejected"): + grub_login_as("wronguser", "wrongsecret") + machine.wait_for_console_text("error: access denied.") - grub_select_all_configurations() - with subtest("Valid credentials are accepted"): - grub_login_as("alice", "supersecret") - machine.send_chars("\n") # press enter to boot - machine.wait_for_console_text("Linux version") + grub_select_all_configurations() + with subtest("Valid credentials are accepted"): + grub_login_as("alice", "supersecret") + machine.send_chars("\n") # press enter to boot + machine.wait_for_console_text("Linux version") - with subtest("Machine boots correctly"): - machine.wait_for_unit("multi-user.target") - ''; - } -) + with subtest("Machine boots correctly"): + machine.wait_for_unit("multi-user.target") + ''; +} diff --git a/nixos/tests/guacamole-server.nix b/nixos/tests/guacamole-server.nix index 41830778d836..280742f5ad0b 100644 --- a/nixos/tests/guacamole-server.nix +++ b/nixos/tests/guacamole-server.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "guacamole-server"; +{ pkgs, lib, ... }: +{ + name = "guacamole-server"; - nodes = { - machine = - { pkgs, ... }: - { - services.guacamole-server = { - enable = true; - host = "0.0.0.0"; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.guacamole-server = { + enable = true; + host = "0.0.0.0"; }; - }; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("guacamole-server.service") - machine.wait_for_open_port(4822) - ''; + testScript = '' + start_all() + machine.wait_for_unit("guacamole-server.service") + machine.wait_for_open_port(4822) + ''; - meta.maintainers = [ lib.maintainers.drupol ]; - } -) + meta.maintainers = [ lib.maintainers.drupol ]; +} diff --git a/nixos/tests/gvisor.nix b/nixos/tests/gvisor.nix index 905abaeedc62..73b013754e81 100644 --- a/nixos/tests/gvisor.nix +++ b/nixos/tests/gvisor.nix @@ -1,48 +1,46 @@ # This test runs a container through gvisor and checks if simple container starts -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "gvisor"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, ... }: +{ + name = "gvisor"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes = { - gvisor = - { pkgs, ... }: - { - virtualisation.docker = { - enable = true; - extraOptions = "--add-runtime runsc=${pkgs.gvisor}/bin/runsc"; - }; - - networking = { - dhcpcd.enable = false; - defaultGateway = "192.168.1.1"; - interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; + nodes = { + gvisor = + { pkgs, ... }: + { + virtualisation.docker = { + enable = true; + extraOptions = "--add-runtime runsc=${pkgs.gvisor}/bin/runsc"; }; - }; - testScript = '' - start_all() + networking = { + dhcpcd.enable = false; + defaultGateway = "192.168.1.1"; + interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; + }; + }; + }; - gvisor.wait_for_unit("network.target") - gvisor.wait_for_unit("sockets.target") + testScript = '' + start_all() - # Test the Docker runtime - gvisor.succeed("tar cv --files-from /dev/null | docker import - scratchimg") - gvisor.succeed( - "docker run -d --name=sleeping --runtime=runsc -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10" - ) - gvisor.succeed("docker ps | grep sleeping") - gvisor.succeed("docker stop sleeping") - ''; - } -) + gvisor.wait_for_unit("network.target") + gvisor.wait_for_unit("sockets.target") + + # Test the Docker runtime + gvisor.succeed("tar cv --files-from /dev/null | docker import - scratchimg") + gvisor.succeed( + "docker run -d --name=sleeping --runtime=runsc -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10" + ) + gvisor.succeed("docker ps | grep sleeping") + gvisor.succeed("docker stop sleeping") + ''; +} diff --git a/nixos/tests/hardened.nix b/nixos/tests/hardened.nix index 7e1301b8099a..5a11b0a90567 100644 --- a/nixos/tests/hardened.nix +++ b/nixos/tests/hardened.nix @@ -1,113 +1,111 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "hardened"; - meta = with pkgs.lib.maintainers; { - maintainers = [ joachifm ]; +{ pkgs, ... }: +{ + name = "hardened"; + meta = with pkgs.lib.maintainers; { + maintainers = [ joachifm ]; + }; + + nodes.machine = + { + lib, + pkgs, + config, + ... + }: + { + users.users.alice = { + isNormalUser = true; + extraGroups = [ "proc" ]; + }; + users.users.sybil = { + isNormalUser = true; + group = "wheel"; + }; + imports = [ ../modules/profiles/hardened.nix ]; + environment.memoryAllocator.provider = "graphene-hardened"; + nix.settings.sandbox = false; + virtualisation.emptyDiskImages = [ 4096 ]; + boot.initrd.postDeviceCommands = '' + ${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb + ''; + virtualisation.fileSystems = { + "/efi" = { + device = "/dev/disk/by-label/EFISYS"; + fsType = "vfat"; + options = [ "noauto" ]; + }; + }; + boot.extraModulePackages = pkgs.lib.optional (pkgs.lib.versionOlder config.boot.kernelPackages.kernel.version "5.6") config.boot.kernelPackages.wireguard; + boot.kernelModules = [ "wireguard" ]; }; - nodes.machine = - { - lib, - pkgs, - config, - ... - }: - { - users.users.alice = { - isNormalUser = true; - extraGroups = [ "proc" ]; - }; - users.users.sybil = { - isNormalUser = true; - group = "wheel"; - }; - imports = [ ../modules/profiles/hardened.nix ]; - environment.memoryAllocator.provider = "graphene-hardened"; - nix.settings.sandbox = false; - virtualisation.emptyDiskImages = [ 4096 ]; - boot.initrd.postDeviceCommands = '' - ${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb - ''; - virtualisation.fileSystems = { - "/efi" = { - device = "/dev/disk/by-label/EFISYS"; - fsType = "vfat"; - options = [ "noauto" ]; - }; - }; - boot.extraModulePackages = pkgs.lib.optional (pkgs.lib.versionOlder config.boot.kernelPackages.kernel.version "5.6") config.boot.kernelPackages.wireguard; - boot.kernelModules = [ "wireguard" ]; - }; - - testScript = - let - hardened-malloc-tests = pkgs.graphene-hardened-malloc.ld-preload-tests; - in - '' - machine.wait_for_unit("multi-user.target") + testScript = + let + hardened-malloc-tests = pkgs.graphene-hardened-malloc.ld-preload-tests; + in + '' + machine.wait_for_unit("multi-user.target") - with subtest("AppArmor profiles are loaded"): - machine.succeed("systemctl status apparmor.service") + with subtest("AppArmor profiles are loaded"): + machine.succeed("systemctl status apparmor.service") - # AppArmor securityfs - with subtest("AppArmor securityfs is mounted"): - machine.succeed("mountpoint -q /sys/kernel/security") - machine.succeed("cat /sys/kernel/security/apparmor/profiles") + # AppArmor securityfs + with subtest("AppArmor securityfs is mounted"): + machine.succeed("mountpoint -q /sys/kernel/security") + machine.succeed("cat /sys/kernel/security/apparmor/profiles") - # Test loading out-of-tree modules - with subtest("Out-of-tree modules can be loaded"): - machine.succeed("grep -Fq wireguard /proc/modules") + # Test loading out-of-tree modules + with subtest("Out-of-tree modules can be loaded"): + machine.succeed("grep -Fq wireguard /proc/modules") - # Test kernel module hardening - with subtest("No more kernel modules can be loaded"): - # note: this better a be module we normally wouldn't load ... - machine.wait_for_unit("disable-kernel-module-loading.service") - machine.fail("modprobe dccp") + # Test kernel module hardening + with subtest("No more kernel modules can be loaded"): + # note: this better a be module we normally wouldn't load ... + machine.wait_for_unit("disable-kernel-module-loading.service") + machine.fail("modprobe dccp") - # Test userns - with subtest("User namespaces are restricted"): - machine.succeed("unshare --user true") - machine.fail("su -l alice -c 'unshare --user true'") + # Test userns + with subtest("User namespaces are restricted"): + machine.succeed("unshare --user true") + machine.fail("su -l alice -c 'unshare --user true'") - # Test dmesg restriction - with subtest("Regular users cannot access dmesg"): - machine.fail("su -l alice -c dmesg") + # Test dmesg restriction + with subtest("Regular users cannot access dmesg"): + machine.fail("su -l alice -c dmesg") - # Test access to kcore - with subtest("Kcore is inaccessible as root"): - machine.fail("cat /proc/kcore") + # Test access to kcore + with subtest("Kcore is inaccessible as root"): + machine.fail("cat /proc/kcore") - # Test deferred mount - with subtest("Deferred mounts work"): - machine.fail("mountpoint -q /efi") # was deferred - machine.execute("mkdir -p /efi") - machine.succeed("mount /dev/disk/by-label/EFISYS /efi") - machine.succeed("mountpoint -q /efi") # now mounted + # Test deferred mount + with subtest("Deferred mounts work"): + machine.fail("mountpoint -q /efi") # was deferred + machine.execute("mkdir -p /efi") + machine.succeed("mount /dev/disk/by-label/EFISYS /efi") + machine.succeed("mountpoint -q /efi") # now mounted - # Test Nix dæmon usage - with subtest("nix-daemon cannot be used by all users"): - machine.fail("su -l nobody -s /bin/sh -c 'nix --extra-experimental-features nix-command ping-store'") - machine.succeed("su -l alice -c 'nix --extra-experimental-features nix-command ping-store'") + # Test Nix dæmon usage + with subtest("nix-daemon cannot be used by all users"): + machine.fail("su -l nobody -s /bin/sh -c 'nix --extra-experimental-features nix-command ping-store'") + machine.succeed("su -l alice -c 'nix --extra-experimental-features nix-command ping-store'") - # Test kernel image protection - with subtest("The kernel image is protected"): - machine.fail("systemctl hibernate") - machine.fail("systemctl kexec") + # Test kernel image protection + with subtest("The kernel image is protected"): + machine.fail("systemctl hibernate") + machine.fail("systemctl kexec") - with subtest("The hardened memory allocator works"): - machine.succeed("${hardened-malloc-tests}/bin/run-tests") - ''; - } -) + with subtest("The hardened memory allocator works"): + machine.succeed("${hardened-malloc-tests}/bin/run-tests") + ''; +} diff --git a/nixos/tests/haste-server.nix b/nixos/tests/haste-server.nix index 9c076155f2fc..758e5f9dcae8 100644 --- a/nixos/tests/haste-server.nix +++ b/nixos/tests/haste-server.nix @@ -1,27 +1,25 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "haste-server"; - meta.maintainers = with lib.maintainers; [ mkg20001 ]; +{ pkgs, lib, ... }: +{ + name = "haste-server"; + meta.maintainers = with lib.maintainers; [ mkg20001 ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - curl - jq - ]; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + curl + jq + ]; - services.haste-server = { - enable = true; - }; + services.haste-server = { + enable = true; }; + }; - testScript = '' - machine.wait_for_unit("haste-server") - machine.wait_until_succeeds("curl -s localhost:7777") - machine.succeed('curl -s -X POST http://localhost:7777/documents -d "Hello World!" > bla') - machine.succeed('curl http://localhost:7777/raw/$(cat bla | jq -r .key) | grep "Hello World"') - ''; - } -) + testScript = '' + machine.wait_for_unit("haste-server") + machine.wait_until_succeeds("curl -s localhost:7777") + machine.succeed('curl -s -X POST http://localhost:7777/documents -d "Hello World!" > bla') + machine.succeed('curl http://localhost:7777/raw/$(cat bla | jq -r .key) | grep "Hello World"') + ''; +} diff --git a/nixos/tests/headscale.nix b/nixos/tests/headscale.nix index 3ba331b7c3d4..c4e69cd1ffd5 100644 --- a/nixos/tests/headscale.nix +++ b/nixos/tests/headscale.nix @@ -1,93 +1,91 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' - openssl req \ - -x509 -newkey rsa:4096 -sha256 -days 365 \ - -nodes -out cert.pem -keyout key.pem \ - -subj '/CN=headscale' -addext "subjectAltName=DNS:headscale" +{ pkgs, lib, ... }: +let + tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' + openssl req \ + -x509 -newkey rsa:4096 -sha256 -days 365 \ + -nodes -out cert.pem -keyout key.pem \ + -subj '/CN=headscale' -addext "subjectAltName=DNS:headscale" - mkdir -p $out - cp key.pem cert.pem $out - ''; - in - { - name = "headscale"; - meta.maintainers = with lib.maintainers; [ - kradalby - misterio77 - ]; + mkdir -p $out + cp key.pem cert.pem $out + ''; +in +{ + name = "headscale"; + meta.maintainers = with lib.maintainers; [ + kradalby + misterio77 + ]; - nodes = - let - headscalePort = 8080; - stunPort = 3478; - peer = { - services.tailscale.enable = true; - security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ]; - }; - in - { - peer1 = peer; - peer2 = peer; - - headscale = { - services = { - headscale = { - enable = true; - port = headscalePort; - settings = { - server_url = "https://headscale"; - ip_prefixes = [ "100.64.0.0/10" ]; - derp.server = { - enabled = true; - region_id = 999; - stun_listen_addr = "0.0.0.0:${toString stunPort}"; - }; - dns.base_domain = "tailnet"; - }; - }; - nginx = { - enable = true; - virtualHosts.headscale = { - addSSL = true; - sslCertificate = "${tls-cert}/cert.pem"; - sslCertificateKey = "${tls-cert}/key.pem"; - locations."/" = { - proxyPass = "http://127.0.0.1:${toString headscalePort}"; - proxyWebsockets = true; - }; - }; - }; - }; - networking.firewall = { - allowedTCPPorts = [ - 80 - 443 - ]; - allowedUDPPorts = [ stunPort ]; - }; - environment.systemPackages = [ pkgs.headscale ]; - }; + nodes = + let + headscalePort = 8080; + stunPort = 3478; + peer = { + services.tailscale.enable = true; + security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ]; }; + in + { + peer1 = peer; + peer2 = peer; - testScript = '' - start_all() - headscale.wait_for_unit("headscale") - headscale.wait_for_open_port(443) + headscale = { + services = { + headscale = { + enable = true; + port = headscalePort; + settings = { + server_url = "https://headscale"; + ip_prefixes = [ "100.64.0.0/10" ]; + derp.server = { + enabled = true; + region_id = 999; + stun_listen_addr = "0.0.0.0:${toString stunPort}"; + }; + dns.base_domain = "tailnet"; + }; + }; + nginx = { + enable = true; + virtualHosts.headscale = { + addSSL = true; + sslCertificate = "${tls-cert}/cert.pem"; + sslCertificateKey = "${tls-cert}/key.pem"; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString headscalePort}"; + proxyWebsockets = true; + }; + }; + }; + }; + networking.firewall = { + allowedTCPPorts = [ + 80 + 443 + ]; + allowedUDPPorts = [ stunPort ]; + }; + environment.systemPackages = [ pkgs.headscale ]; + }; + }; - # Create headscale user and preauth-key - headscale.succeed("headscale users create test") - authkey = headscale.succeed("headscale preauthkeys -u test create --reusable") + testScript = '' + start_all() + headscale.wait_for_unit("headscale") + headscale.wait_for_open_port(443) - # Connect peers - up_cmd = f"tailscale up --login-server 'https://headscale' --auth-key {authkey}" - peer1.execute(up_cmd) - peer2.execute(up_cmd) + # Create headscale user and preauth-key + headscale.succeed("headscale users create test") + authkey = headscale.succeed("headscale preauthkeys -u test create --reusable") - # Check that they are reachable from the tailnet - peer1.wait_until_succeeds("tailscale ping peer2") - peer2.wait_until_succeeds("tailscale ping peer1.tailnet") - ''; - } -) + # Connect peers + up_cmd = f"tailscale up --login-server 'https://headscale' --auth-key {authkey}" + peer1.execute(up_cmd) + peer2.execute(up_cmd) + + # Check that they are reachable from the tailnet + peer1.wait_until_succeeds("tailscale ping peer2") + peer2.wait_until_succeeds("tailscale ping peer1.tailnet") + ''; +} diff --git a/nixos/tests/hedgedoc.nix b/nixos/tests/hedgedoc.nix index aa1be7421d5e..aaf83a30bfc5 100644 --- a/nixos/tests/hedgedoc.nix +++ b/nixos/tests/hedgedoc.nix @@ -1,104 +1,102 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "hedgedoc"; +{ pkgs, lib, ... }: +{ + name = "hedgedoc"; - meta = with lib.maintainers; { - maintainers = [ willibutz ]; - }; + meta = with lib.maintainers; { + maintainers = [ willibutz ]; + }; - nodes = { - hedgedocSqlite = - { ... }: - { - services.hedgedoc.enable = true; - }; + nodes = { + hedgedocSqlite = + { ... }: + { + services.hedgedoc.enable = true; + }; - hedgedocPostgresWithTCPSocket = - { ... }: - { - systemd.services.hedgedoc.after = [ "postgresql.service" ]; - services = { - hedgedoc = { - enable = true; - settings.db = { - dialect = "postgres"; - user = "hedgedoc"; - password = "$DB_PASSWORD"; - host = "localhost"; - port = 5432; - database = "hedgedocdb"; - }; - - /* - Do not use pkgs.writeText for secrets as - they will end up in the world-readable Nix store. - */ - environmentFile = pkgs.writeText "hedgedoc-env" '' - DB_PASSWORD=snakeoilpassword - ''; - }; - postgresql = { - enable = true; - initialScript = pkgs.writeText "pg-init-script.sql" '' - CREATE ROLE hedgedoc LOGIN PASSWORD 'snakeoilpassword'; - CREATE DATABASE hedgedocdb OWNER hedgedoc; - ''; + hedgedocPostgresWithTCPSocket = + { ... }: + { + systemd.services.hedgedoc.after = [ "postgresql.service" ]; + services = { + hedgedoc = { + enable = true; + settings.db = { + dialect = "postgres"; + user = "hedgedoc"; + password = "$DB_PASSWORD"; + host = "localhost"; + port = 5432; + database = "hedgedocdb"; }; + + /* + Do not use pkgs.writeText for secrets as + they will end up in the world-readable Nix store. + */ + environmentFile = pkgs.writeText "hedgedoc-env" '' + DB_PASSWORD=snakeoilpassword + ''; + }; + postgresql = { + enable = true; + initialScript = pkgs.writeText "pg-init-script.sql" '' + CREATE ROLE hedgedoc LOGIN PASSWORD 'snakeoilpassword'; + CREATE DATABASE hedgedocdb OWNER hedgedoc; + ''; }; }; + }; - hedgedocPostgresWithUNIXSocket = - { ... }: - { - systemd.services.hedgedoc.after = [ "postgresql.service" ]; - services = { - hedgedoc = { - enable = true; - settings.db = { - dialect = "postgres"; - user = "hedgedoc"; - password = "$DB_PASSWORD"; - host = "/run/postgresql"; - database = "hedgedocdb"; - }; + hedgedocPostgresWithUNIXSocket = + { ... }: + { + systemd.services.hedgedoc.after = [ "postgresql.service" ]; + services = { + hedgedoc = { + enable = true; + settings.db = { + dialect = "postgres"; + user = "hedgedoc"; + password = "$DB_PASSWORD"; + host = "/run/postgresql"; + database = "hedgedocdb"; + }; - environmentFile = pkgs.writeText "hedgedoc-env" '' - DB_PASSWORD=snakeoilpassword - ''; - }; - postgresql = { - enable = true; - initialScript = pkgs.writeText "pg-init-script.sql" '' - CREATE ROLE hedgedoc LOGIN PASSWORD 'snakeoilpassword'; - CREATE DATABASE hedgedocdb OWNER hedgedoc; - ''; - }; + environmentFile = pkgs.writeText "hedgedoc-env" '' + DB_PASSWORD=snakeoilpassword + ''; + }; + postgresql = { + enable = true; + initialScript = pkgs.writeText "pg-init-script.sql" '' + CREATE ROLE hedgedoc LOGIN PASSWORD 'snakeoilpassword'; + CREATE DATABASE hedgedocdb OWNER hedgedoc; + ''; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("HedgeDoc sqlite"): - hedgedocSqlite.wait_for_unit("hedgedoc.service") - hedgedocSqlite.wait_for_open_port(3000) - hedgedocSqlite.wait_until_succeeds("curl -sSf http://localhost:3000/new") + with subtest("HedgeDoc sqlite"): + hedgedocSqlite.wait_for_unit("hedgedoc.service") + hedgedocSqlite.wait_for_open_port(3000) + hedgedocSqlite.wait_until_succeeds("curl -sSf http://localhost:3000/new") - with subtest("HedgeDoc postgres with TCP socket"): - hedgedocPostgresWithTCPSocket.wait_for_unit("postgresql.service") - hedgedocPostgresWithTCPSocket.wait_for_unit("hedgedoc.service") - hedgedocPostgresWithTCPSocket.wait_for_open_port(5432) - hedgedocPostgresWithTCPSocket.wait_for_open_port(3000) - hedgedocPostgresWithTCPSocket.wait_until_succeeds("curl -sSf http://localhost:3000/new") + with subtest("HedgeDoc postgres with TCP socket"): + hedgedocPostgresWithTCPSocket.wait_for_unit("postgresql.service") + hedgedocPostgresWithTCPSocket.wait_for_unit("hedgedoc.service") + hedgedocPostgresWithTCPSocket.wait_for_open_port(5432) + hedgedocPostgresWithTCPSocket.wait_for_open_port(3000) + hedgedocPostgresWithTCPSocket.wait_until_succeeds("curl -sSf http://localhost:3000/new") - with subtest("HedgeDoc postgres with UNIX socket"): - hedgedocPostgresWithUNIXSocket.wait_for_unit("postgresql.service") - hedgedocPostgresWithUNIXSocket.wait_for_unit("hedgedoc.service") - hedgedocPostgresWithUNIXSocket.wait_for_open_port(5432) - hedgedocPostgresWithUNIXSocket.wait_for_open_port(3000) - hedgedocPostgresWithUNIXSocket.wait_until_succeeds("curl -sSf http://localhost:3000/new") - ''; - } -) + with subtest("HedgeDoc postgres with UNIX socket"): + hedgedocPostgresWithUNIXSocket.wait_for_unit("postgresql.service") + hedgedocPostgresWithUNIXSocket.wait_for_unit("hedgedoc.service") + hedgedocPostgresWithUNIXSocket.wait_for_open_port(5432) + hedgedocPostgresWithUNIXSocket.wait_for_open_port(3000) + hedgedocPostgresWithUNIXSocket.wait_until_succeeds("curl -sSf http://localhost:3000/new") + ''; +} diff --git a/nixos/tests/herbstluftwm.nix b/nixos/tests/herbstluftwm.nix index 84e6985803c3..5f6678e41fff 100644 --- a/nixos/tests/herbstluftwm.nix +++ b/nixos/tests/herbstluftwm.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "herbstluftwm"; +{ lib, ... }: +{ + name = "herbstluftwm"; - meta = { - maintainers = with lib.maintainers; [ thibautmarty ]; + meta = { + maintainers = with lib.maintainers; [ thibautmarty ]; + }; + + nodes.machine = + { pkgs, lib, ... }: + { + imports = [ + ./common/x11.nix + ./common/user-account.nix + ]; + test-support.displayManager.auto.user = "alice"; + services.displayManager.defaultSession = lib.mkForce "none+herbstluftwm"; + services.xserver.windowManager.herbstluftwm.enable = true; + environment.systemPackages = [ pkgs.dzen2 ]; # needed for upstream provided panel }; - nodes.machine = - { pkgs, lib, ... }: - { - imports = [ - ./common/x11.nix - ./common/user-account.nix - ]; - test-support.displayManager.auto.user = "alice"; - services.displayManager.defaultSession = lib.mkForce "none+herbstluftwm"; - services.xserver.windowManager.herbstluftwm.enable = true; - environment.systemPackages = [ pkgs.dzen2 ]; # needed for upstream provided panel - }; + testScript = '' + with subtest("ensure x starts"): + machine.wait_for_x() + machine.wait_for_file("/home/alice/.Xauthority") + machine.succeed("xauth merge ~alice/.Xauthority") - testScript = '' - with subtest("ensure x starts"): - machine.wait_for_x() - machine.wait_for_file("/home/alice/.Xauthority") - machine.succeed("xauth merge ~alice/.Xauthority") + with subtest("ensure client is available"): + machine.succeed("herbstclient --version") - with subtest("ensure client is available"): - machine.succeed("herbstclient --version") + with subtest("ensure keybindings are set"): + machine.wait_until_succeeds("herbstclient list_keybinds | grep xterm") - with subtest("ensure keybindings are set"): - machine.wait_until_succeeds("herbstclient list_keybinds | grep xterm") + with subtest("ensure panel starts"): + machine.wait_for_window("dzen title") - with subtest("ensure panel starts"): - machine.wait_for_window("dzen title") - - with subtest("ensure we can open a new terminal"): - machine.send_key("alt-ret") - machine.wait_for_window(r"alice.*?machine") - machine.sleep(2) - machine.screenshot("terminal") - ''; - } -) + with subtest("ensure we can open a new terminal"): + machine.send_key("alt-ret") + machine.wait_for_window(r"alice.*?machine") + machine.sleep(2) + machine.screenshot("terminal") + ''; +} diff --git a/nixos/tests/hledger-web.nix b/nixos/tests/hledger-web.nix index fcb3c7f656aa..705288dc4e71 100644 --- a/nixos/tests/hledger-web.nix +++ b/nixos/tests/hledger-web.nix @@ -1,56 +1,54 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - journal = pkgs.writeText "test.journal" '' - 2010/01/10 Loan - assets:cash 500$ - income:loan -500$ - 2010/01/10 NixOS Foundation donation - expenses:donation 250$ - assets:cash -250$ - ''; - in - rec { - name = "hledger-web"; - meta.maintainers = with lib.maintainers; [ marijanp ]; +{ pkgs, lib, ... }: +let + journal = pkgs.writeText "test.journal" '' + 2010/01/10 Loan + assets:cash 500$ + income:loan -500$ + 2010/01/10 NixOS Foundation donation + expenses:donation 250$ + assets:cash -250$ + ''; +in +rec { + name = "hledger-web"; + meta.maintainers = with lib.maintainers; [ marijanp ]; - nodes = rec { - server = - { config, pkgs, ... }: - { - services.hledger-web = { - host = "127.0.0.1"; - port = 5000; - enable = true; - allow = "edit"; - }; - networking.firewall.allowedTCPPorts = [ config.services.hledger-web.port ]; - systemd.services.hledger-web.preStart = '' - ln -s ${journal} /var/lib/hledger-web/.hledger.journal - ''; + nodes = rec { + server = + { config, pkgs, ... }: + { + services.hledger-web = { + host = "127.0.0.1"; + port = 5000; + enable = true; + allow = "edit"; }; - apiserver = - { ... }: - { - imports = [ server ]; - services.hledger-web.serveApi = true; - }; - }; + networking.firewall.allowedTCPPorts = [ config.services.hledger-web.port ]; + systemd.services.hledger-web.preStart = '' + ln -s ${journal} /var/lib/hledger-web/.hledger.journal + ''; + }; + apiserver = + { ... }: + { + imports = [ server ]; + services.hledger-web.serveApi = true; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("hledger-web.service") - server.wait_for_open_port(5000) - with subtest("Check if web UI is accessible"): - page = server.succeed("curl -L http://127.0.0.1:5000") - assert ".hledger.journal" in page + server.wait_for_unit("hledger-web.service") + server.wait_for_open_port(5000) + with subtest("Check if web UI is accessible"): + page = server.succeed("curl -L http://127.0.0.1:5000") + assert ".hledger.journal" in page - apiserver.wait_for_unit("hledger-web.service") - apiserver.wait_for_open_port(5000) - with subtest("Check if the JSON API is served"): - transactions = apiserver.succeed("curl -L http://127.0.0.1:5000/transactions") - assert "NixOS Foundation donation" in transactions - ''; - } -) + apiserver.wait_for_unit("hledger-web.service") + apiserver.wait_for_open_port(5000) + with subtest("Check if the JSON API is served"): + transactions = apiserver.succeed("curl -L http://127.0.0.1:5000/transactions") + assert "NixOS Foundation donation" in transactions + ''; +} diff --git a/nixos/tests/hockeypuck.nix b/nixos/tests/hockeypuck.nix index bc24ceb4b70f..d1b5037fbe26 100644 --- a/nixos/tests/hockeypuck.nix +++ b/nixos/tests/hockeypuck.nix @@ -1,72 +1,70 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - gpgKeyring = ( - pkgs.runCommand "gpg-keyring" { buildInputs = [ pkgs.gnupg ]; } '' - mkdir -p $out - export GNUPGHOME=$out - cat > foo < foo <OpenPGP Keyserver" in response, "HTML title not found" + assert "OpenPGP Keyserver" in response, "HTML title not found" - # Copy the keyring - machine.succeed("cp -R ${gpgKeyring} /tmp/GNUPGHOME") + # Copy the keyring + machine.succeed("cp -R ${gpgKeyring} /tmp/GNUPGHOME") - # Extract our GPG key id - keyId = machine.succeed("GNUPGHOME=/tmp/GNUPGHOME gpg --list-keys | grep dsa1024 --after-context=1 | grep -v dsa1024").strip() + # Extract our GPG key id + keyId = machine.succeed("GNUPGHOME=/tmp/GNUPGHOME gpg --list-keys | grep dsa1024 --after-context=1 | grep -v dsa1024").strip() - # Send the key to our local keyserver - machine.succeed("GNUPGHOME=/tmp/GNUPGHOME gpg --keyserver hkp://127.0.0.1:11371 --send-keys " + keyId) + # Send the key to our local keyserver + machine.succeed("GNUPGHOME=/tmp/GNUPGHOME gpg --keyserver hkp://127.0.0.1:11371 --send-keys " + keyId) - # Receive the key from our local keyserver to a separate directory - machine.succeed("GNUPGHOME=$(mktemp -d) gpg --keyserver hkp://127.0.0.1:11371 --recv-keys " + keyId) - ''; - } -) + # Receive the key from our local keyserver to a separate directory + machine.succeed("GNUPGHOME=$(mktemp -d) gpg --keyserver hkp://127.0.0.1:11371 --recv-keys " + keyId) + ''; +} diff --git a/nixos/tests/homebox.nix b/nixos/tests/homebox.nix index aadcd4269774..160d09f116bb 100644 --- a/nixos/tests/homebox.nix +++ b/nixos/tests/homebox.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - port = "7745"; - in - { - name = "homebox"; - meta = with pkgs.lib.maintainers; { - maintainers = [ patrickdag ]; - }; - nodes = - let - self = { - simple = { - services.homebox = { - enable = true; - settings.HBOX_WEB_PORT = port; - }; - }; - - postgres = { - imports = [ self.simple ]; - services.homebox.database.createLocally = true; +{ pkgs, ... }: +let + port = "7745"; +in +{ + name = "homebox"; + meta = with pkgs.lib.maintainers; { + maintainers = [ patrickdag ]; + }; + nodes = + let + self = { + simple = { + services.homebox = { + enable = true; + settings.HBOX_WEB_PORT = port; }; }; - in - self; - testScript = '' - def test_homebox(node): - node.wait_for_unit("homebox.service") - node.wait_for_open_port(${port}) - node.succeed("curl --fail -X GET 'http://localhost:${port}/'") - out = node.succeed("curl --fail 'http://localhost:${port}/api/v1/status'") - assert '"health":true' in out + postgres = { + imports = [ self.simple ]; + services.homebox.database.createLocally = true; + }; + }; + in + self; + testScript = '' + def test_homebox(node): + node.wait_for_unit("homebox.service") + node.wait_for_open_port(${port}) - test_homebox(simple) - simple.send_monitor_command("quit") - simple.wait_for_shutdown() - test_homebox(postgres) - ''; - } -) + node.succeed("curl --fail -X GET 'http://localhost:${port}/'") + out = node.succeed("curl --fail 'http://localhost:${port}/api/v1/status'") + assert '"health":true' in out + + test_homebox(simple) + simple.send_monitor_command("quit") + simple.wait_for_shutdown() + test_homebox(postgres) + ''; +} diff --git a/nixos/tests/hound.nix b/nixos/tests/hound.nix index b73ba27bed29..59574b6ce937 100644 --- a/nixos/tests/hound.nix +++ b/nixos/tests/hound.nix @@ -1,62 +1,60 @@ # Test whether `houndd` indexes nixpkgs -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "hound"; - meta = with pkgs.lib.maintainers; { - maintainers = [ grahamc ]; - }; - nodes.machine = - { pkgs, ... }: - { - services.hound = { - enable = true; - settings = { - "max-concurrent-indexers" = 1; - "dbpath" = "/var/lib/hound/data"; - "repos" = { - "nix" = { - "url" = "file:///var/lib/hound/my-git"; - }; +{ pkgs, ... }: +{ + name = "hound"; + meta = with pkgs.lib.maintainers; { + maintainers = [ grahamc ]; + }; + nodes.machine = + { pkgs, ... }: + { + services.hound = { + enable = true; + settings = { + "max-concurrent-indexers" = 1; + "dbpath" = "/var/lib/hound/data"; + "repos" = { + "nix" = { + "url" = "file:///var/lib/hound/my-git"; }; }; }; - - systemd.services.houndseed = { - description = "seed hound with a git repo"; - requiredBy = [ "hound.service" ]; - before = [ "hound.service" ]; - - serviceConfig = { - User = "hound"; - Group = "hound"; - WorkingDirectory = "/var/lib/hound"; - }; - path = [ pkgs.git ]; - script = '' - git config --global user.email "you@example.com" - git config --global user.name "Your Name" - git init my-git --bare - git init my-git-clone - cd my-git-clone - echo 'hi nix!' > hello - git add hello - git commit -m "hello there :)" - git remote add origin /var/lib/hound/my-git - git push origin master - ''; - }; }; - testScript = '' - start_all() + systemd.services.houndseed = { + description = "seed hound with a git repo"; + requiredBy = [ "hound.service" ]; + before = [ "hound.service" ]; - machine.wait_for_unit("network.target") - machine.wait_for_unit("hound.service") - machine.wait_for_open_port(6080) - machine.wait_until_succeeds( - "curl -f http://127.0.0.1:6080/api/v1/search\?stats\=fosho\&repos\=\*\&rng=%3A20\&q\=hi\&files\=\&i=nope | grep 'Filename' | grep 'hello'" - ) - ''; - } -) + serviceConfig = { + User = "hound"; + Group = "hound"; + WorkingDirectory = "/var/lib/hound"; + }; + path = [ pkgs.git ]; + script = '' + git config --global user.email "you@example.com" + git config --global user.name "Your Name" + git init my-git --bare + git init my-git-clone + cd my-git-clone + echo 'hi nix!' > hello + git add hello + git commit -m "hello there :)" + git remote add origin /var/lib/hound/my-git + git push origin master + ''; + }; + }; + + testScript = '' + start_all() + + machine.wait_for_unit("network.target") + machine.wait_for_unit("hound.service") + machine.wait_for_open_port(6080) + machine.wait_until_succeeds( + "curl -f http://127.0.0.1:6080/api/v1/search\?stats\=fosho\&repos\=\*\&rng=%3A20\&q\=hi\&files\=\&i=nope | grep 'Filename' | grep 'hello'" + ) + ''; +} diff --git a/nixos/tests/i3wm.nix b/nixos/tests/i3wm.nix index 9dfde33aff8a..eb897573deb5 100644 --- a/nixos/tests/i3wm.nix +++ b/nixos/tests/i3wm.nix @@ -1,56 +1,54 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "i3wm"; - meta = with pkgs.lib.maintainers; { - maintainers = [ aszlig ]; +{ pkgs, ... }: +{ + name = "i3wm"; + meta = with pkgs.lib.maintainers; { + maintainers = [ aszlig ]; + }; + + nodes.machine = + { lib, ... }: + { + imports = [ + ./common/x11.nix + ./common/user-account.nix + ]; + test-support.displayManager.auto.user = "alice"; + services.displayManager.defaultSession = lib.mkForce "none+i3"; + services.xserver.windowManager.i3.enable = true; }; - nodes.machine = - { lib, ... }: - { - imports = [ - ./common/x11.nix - ./common/user-account.nix - ]; - test-support.displayManager.auto.user = "alice"; - services.displayManager.defaultSession = lib.mkForce "none+i3"; - services.xserver.windowManager.i3.enable = true; - }; + testScript = + { ... }: + '' + with subtest("ensure x starts"): + machine.wait_for_x() + machine.wait_for_file("/home/alice/.Xauthority") + machine.succeed("xauth merge ~alice/.Xauthority") - testScript = - { ... }: - '' - with subtest("ensure x starts"): - machine.wait_for_x() - machine.wait_for_file("/home/alice/.Xauthority") - machine.succeed("xauth merge ~alice/.Xauthority") + with subtest("ensure we get first configuration window"): + machine.wait_for_window(r".*?first configuration.*?") + machine.sleep(2) + machine.screenshot("started") - with subtest("ensure we get first configuration window"): - machine.wait_for_window(r".*?first configuration.*?") - machine.sleep(2) - machine.screenshot("started") + with subtest("ensure we generate and save a config"): + # press return to indicate we want to gen a new config + machine.send_key("\n") + machine.sleep(2) + machine.screenshot("preconfig") + # press alt then return to indicate we want to use alt as our Mod key + machine.send_key("alt") + machine.send_key("\n") + machine.sleep(2) + # make sure the config file is created before we continue + machine.wait_for_file("/home/alice/.config/i3/config") + machine.screenshot("postconfig") + machine.sleep(2) - with subtest("ensure we generate and save a config"): - # press return to indicate we want to gen a new config - machine.send_key("\n") - machine.sleep(2) - machine.screenshot("preconfig") - # press alt then return to indicate we want to use alt as our Mod key - machine.send_key("alt") - machine.send_key("\n") - machine.sleep(2) - # make sure the config file is created before we continue - machine.wait_for_file("/home/alice/.config/i3/config") - machine.screenshot("postconfig") - machine.sleep(2) - - with subtest("ensure we can open a new terminal"): - machine.send_key("alt-ret") - machine.sleep(2) - machine.wait_for_window(r"alice.*?machine") - machine.sleep(2) - machine.screenshot("terminal") - ''; - } -) + with subtest("ensure we can open a new terminal"): + machine.send_key("alt-ret") + machine.sleep(2) + machine.wait_for_window(r"alice.*?machine") + machine.sleep(2) + machine.screenshot("terminal") + ''; +} diff --git a/nixos/tests/ifm.nix b/nixos/tests/ifm.nix index a1bc8e23a897..228f0a06c121 100644 --- a/nixos/tests/ifm.nix +++ b/nixos/tests/ifm.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "ifm"; - meta = with pkgs.lib.maintainers; { - maintainers = [ litchipi ]; - }; +{ + name = "ifm"; + meta = with pkgs.lib.maintainers; { + maintainers = [ litchipi ]; + }; - nodes = { - server = rec { - services.ifm = { - enable = true; - port = 9001; - dataDir = "/data"; - }; - - system.activationScripts.ifm-setup-dir = '' - mkdir -p ${services.ifm.dataDir} - chmod u+w,g+w,o+w ${services.ifm.dataDir} - ''; + nodes = { + server = rec { + services.ifm = { + enable = true; + port = 9001; + dataDir = "/data"; }; + + system.activationScripts.ifm-setup-dir = '' + mkdir -p ${services.ifm.dataDir} + chmod u+w,g+w,o+w ${services.ifm.dataDir} + ''; }; + }; - testScript = '' - start_all() - server.wait_for_unit("ifm.service") - server.wait_for_open_port(9001) - server.succeed("curl --fail http://localhost:9001") + testScript = '' + start_all() + server.wait_for_unit("ifm.service") + server.wait_for_open_port(9001) + server.succeed("curl --fail http://localhost:9001") - server.succeed("echo \"testfile\" > testfile && shasum testfile >> checksums") - server.succeed("curl --fail http://localhost:9001 -X POST -F \"api=upload\" -F \"dir=\" -F \"file=@testfile\" | grep \"OK\""); - server.succeed("rm testfile") - server.succeed("curl --fail http://localhost:9001 -X POST -F \"api=download\" -F \"filename=testfile\" -F \"dir=\" --output testfile"); - server.succeed("shasum testfile >> checksums && shasum --check checksums") - ''; - } -) + server.succeed("echo \"testfile\" > testfile && shasum testfile >> checksums") + server.succeed("curl --fail http://localhost:9001 -X POST -F \"api=upload\" -F \"dir=\" -F \"file=@testfile\" | grep \"OK\""); + server.succeed("rm testfile") + server.succeed("curl --fail http://localhost:9001 -X POST -F \"api=download\" -F \"filename=testfile\" -F \"dir=\" --output testfile"); + server.succeed("shasum testfile >> checksums && shasum --check checksums") + ''; +} diff --git a/nixos/tests/iftop.nix b/nixos/tests/iftop.nix index 3b4fedbbb940..8afa314293ac 100644 --- a/nixos/tests/iftop.nix +++ b/nixos/tests/iftop.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "iftop"; - meta.maintainers = with lib.maintainers; [ ma27 ]; +{ + name = "iftop"; + meta.maintainers = with lib.maintainers; [ ma27 ]; - nodes = { - withIftop = { - imports = [ ./common/user-account.nix ]; - programs.iftop.enable = true; - }; - withoutIftop = { - imports = [ ./common/user-account.nix ]; - environment.systemPackages = [ pkgs.iftop ]; - }; + nodes = { + withIftop = { + imports = [ ./common/user-account.nix ]; + programs.iftop.enable = true; }; + withoutIftop = { + imports = [ ./common/user-account.nix ]; + environment.systemPackages = [ pkgs.iftop ]; + }; + }; - testScript = '' - with subtest("machine with iftop enabled"): - withIftop.wait_for_unit("default.target") - # limit to eth1 (eth0 is the test driver's control interface) - # and don't try name lookups - withIftop.succeed("su -l alice -c 'iftop -t -s 1 -n -i eth1'") + testScript = '' + with subtest("machine with iftop enabled"): + withIftop.wait_for_unit("default.target") + # limit to eth1 (eth0 is the test driver's control interface) + # and don't try name lookups + withIftop.succeed("su -l alice -c 'iftop -t -s 1 -n -i eth1'") - with subtest("machine without iftop"): - withoutIftop.wait_for_unit("default.target") - # check that iftop is there but user alice lacks capabilitie - withoutIftop.succeed("iftop -t -s 1 -n -i eth1") - withoutIftop.fail("su -l alice -c 'iftop -t -s 1 -n -i eth1'") - ''; - } -) + with subtest("machine without iftop"): + withoutIftop.wait_for_unit("default.target") + # check that iftop is there but user alice lacks capabilitie + withoutIftop.succeed("iftop -t -s 1 -n -i eth1") + withoutIftop.fail("su -l alice -c 'iftop -t -s 1 -n -i eth1'") + ''; +} diff --git a/nixos/tests/incron.nix b/nixos/tests/incron.nix index adbea25b9bde..dc368eb768af 100644 --- a/nixos/tests/incron.nix +++ b/nixos/tests/incron.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "incron"; - meta.maintainers = [ lib.maintainers.aanderse ]; +{ + name = "incron"; + meta.maintainers = [ lib.maintainers.aanderse ]; - nodes.machine = - { ... }: - { - services.incron.enable = true; - services.incron.extraPackages = [ pkgs.coreutils ]; - services.incron.systab = '' - /test IN_CREATE,IN_MODIFY,IN_CLOSE_WRITE,IN_MOVED_FROM,IN_MOVED_TO echo "$@/$# $%" >> /root/incron.log - ''; + nodes.machine = + { ... }: + { + services.incron.enable = true; + services.incron.extraPackages = [ pkgs.coreutils ]; + services.incron.systab = '' + /test IN_CREATE,IN_MODIFY,IN_CLOSE_WRITE,IN_MOVED_FROM,IN_MOVED_TO echo "$@/$# $%" >> /root/incron.log + ''; - # ensure the directory to be monitored exists before incron is started - systemd.tmpfiles.settings.incron-test = { - "/test".d = { }; - }; + # ensure the directory to be monitored exists before incron is started + systemd.tmpfiles.settings.incron-test = { + "/test".d = { }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("incron.service") + machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("incron.service") - machine.succeed("test -d /test") - # create some activity for incron to monitor - machine.succeed("touch /test/file") - machine.succeed("echo foo >> /test/file") - machine.succeed("mv /test/file /root") - machine.succeed("mv /root/file /test") + machine.succeed("test -d /test") + # create some activity for incron to monitor + machine.succeed("touch /test/file") + machine.succeed("echo foo >> /test/file") + machine.succeed("mv /test/file /root") + machine.succeed("mv /root/file /test") - machine.sleep(1) + machine.sleep(1) - # touch /test/file - machine.succeed("grep '/test/file IN_CREATE' /root/incron.log") + # touch /test/file + machine.succeed("grep '/test/file IN_CREATE' /root/incron.log") - # echo foo >> /test/file - machine.succeed("grep '/test/file IN_MODIFY' /root/incron.log") - machine.succeed("grep '/test/file IN_CLOSE_WRITE' /root/incron.log") + # echo foo >> /test/file + machine.succeed("grep '/test/file IN_MODIFY' /root/incron.log") + machine.succeed("grep '/test/file IN_CLOSE_WRITE' /root/incron.log") - # mv /test/file /root - machine.succeed("grep '/test/file IN_MOVED_FROM' /root/incron.log") + # mv /test/file /root + machine.succeed("grep '/test/file IN_MOVED_FROM' /root/incron.log") - # mv /root/file /test - machine.succeed("grep '/test/file IN_MOVED_TO' /root/incron.log") + # mv /root/file /test + machine.succeed("grep '/test/file IN_MOVED_TO' /root/incron.log") - # ensure something unexpected is not present - machine.fail("grep 'IN_OPEN' /root/incron.log") - ''; - } -) + # ensure something unexpected is not present + machine.fail("grep 'IN_OPEN' /root/incron.log") + ''; +} diff --git a/nixos/tests/influxdb.nix b/nixos/tests/influxdb.nix index 87b1192c3062..f8ec355469f7 100644 --- a/nixos/tests/influxdb.nix +++ b/nixos/tests/influxdb.nix @@ -1,45 +1,43 @@ # This test runs influxdb and checks if influxdb is up and running -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "influxdb"; - meta = with pkgs.lib.maintainers; { - maintainers = [ offline ]; - }; +{ pkgs, ... }: +{ + name = "influxdb"; + meta = with pkgs.lib.maintainers; { + maintainers = [ offline ]; + }; - nodes = { - one = - { ... }: - { - services.influxdb.enable = true; - environment.systemPackages = [ pkgs.httpie ]; - }; - }; + nodes = { + one = + { ... }: + { + services.influxdb.enable = true; + environment.systemPackages = [ pkgs.httpie ]; + }; + }; - testScript = '' - import shlex + testScript = '' + import shlex - start_all() + start_all() - one.wait_for_unit("influxdb.service") + one.wait_for_unit("influxdb.service") - # create database - one.succeed( - "curl -XPOST http://localhost:8086/query --data-urlencode 'q=CREATE DATABASE test'" - ) + # create database + one.succeed( + "curl -XPOST http://localhost:8086/query --data-urlencode 'q=CREATE DATABASE test'" + ) - # write some points and run simple query - out = one.succeed( - "curl -XPOST 'http://localhost:8086/write?db=test' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'" - ) + # write some points and run simple query + out = one.succeed( + "curl -XPOST 'http://localhost:8086/write?db=test' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'" + ) - qv = "SELECT value FROM cpu_load_short WHERE region='us-west'" - cmd = f'curl -GET "http://localhost:8086/query?db=test" --data-urlencode {shlex.quote("q="+ qv)}' - out = one.succeed(cmd) + qv = "SELECT value FROM cpu_load_short WHERE region='us-west'" + cmd = f'curl -GET "http://localhost:8086/query?db=test" --data-urlencode {shlex.quote("q="+ qv)}' + out = one.succeed(cmd) - assert "2015-06-11T20:46:02Z" in out - assert "0.64" in out - ''; - } -) + assert "2015-06-11T20:46:02Z" in out + assert "0.64" in out + ''; +} diff --git a/nixos/tests/influxdb2.nix b/nixos/tests/influxdb2.nix index 3646960c1569..f054039fa9a3 100644 --- a/nixos/tests/influxdb2.nix +++ b/nixos/tests/influxdb2.nix @@ -1,241 +1,239 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "influxdb2"; - meta = with pkgs.lib.maintainers; { - maintainers = [ offline ]; - }; +{ pkgs, ... }: +{ + name = "influxdb2"; + meta = with pkgs.lib.maintainers; { + maintainers = [ offline ]; + }; - nodes.machine = - { lib, ... }: - { - environment.systemPackages = [ pkgs.influxdb2-cli ]; - # Make sure that the service is restarted immediately if tokens need to be rewritten - # without relying on any Restart=on-failure behavior - systemd.services.influxdb2.serviceConfig.RestartSec = 6000; - services.influxdb2.enable = true; - services.influxdb2.provision = { - enable = true; - initialSetup = { - organization = "default"; - bucket = "default"; - passwordFile = pkgs.writeText "admin-pw" "ExAmPl3PA55W0rD"; - tokenFile = pkgs.writeText "admin-token" "verysecureadmintoken"; - }; - organizations.someorg = { - buckets.somebucket = { }; - auths.sometoken = { - description = "some auth token"; - readBuckets = [ "somebucket" ]; - writeBuckets = [ "somebucket" ]; - }; - }; - users.someuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga"; + nodes.machine = + { lib, ... }: + { + environment.systemPackages = [ pkgs.influxdb2-cli ]; + # Make sure that the service is restarted immediately if tokens need to be rewritten + # without relying on any Restart=on-failure behavior + systemd.services.influxdb2.serviceConfig.RestartSec = 6000; + services.influxdb2.enable = true; + services.influxdb2.provision = { + enable = true; + initialSetup = { + organization = "default"; + bucket = "default"; + passwordFile = pkgs.writeText "admin-pw" "ExAmPl3PA55W0rD"; + tokenFile = pkgs.writeText "admin-token" "verysecureadmintoken"; }; - - specialisation.withModifications.configuration = - { ... }: - { - services.influxdb2.provision = { - organizations.someorg.buckets.somebucket.present = false; - organizations.someorg.auths.sometoken.present = false; - users.someuser.present = false; - - organizations.myorg = { - description = "Myorg description"; - buckets.mybucket = { - description = "Mybucket description"; - }; - auths.mytoken = { - operator = true; - description = "operator token"; - tokenFile = pkgs.writeText "tmp-tok" "someusertoken"; - }; - }; - users.myuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga"; - }; - }; - - specialisation.withParentDelete.configuration = - { ... }: - { - services.influxdb2.provision = { - organizations.someorg.present = false; - # Deleting the parent implies: - #organizations.someorg.buckets.somebucket.present = false; - #organizations.someorg.auths.sometoken.present = false; - }; - }; - - specialisation.withNewTokens.configuration = - { ... }: - { - services.influxdb2.provision = { - organizations.default = { - auths.operator = { - operator = true; - description = "new optoken"; - tokenFile = pkgs.writeText "tmp-tok" "newoptoken"; - }; - auths.allaccess = { - operator = true; - description = "new allaccess"; - tokenFile = pkgs.writeText "tmp-tok" "newallaccess"; - }; - auths.specifics = { - description = "new specifics"; - readPermissions = [ - "users" - "tasks" - ]; - writePermissions = [ "tasks" ]; - tokenFile = pkgs.writeText "tmp-tok" "newspecificstoken"; - }; - }; - }; + organizations.someorg = { + buckets.somebucket = { }; + auths.sometoken = { + description = "some auth token"; + readBuckets = [ "somebucket" ]; + writeBuckets = [ "somebucket" ]; }; + }; + users.someuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga"; }; - testScript = - { nodes, ... }: - let - specialisations = "${nodes.machine.system.build.toplevel}/specialisation"; - tokenArg = "--token verysecureadmintoken"; - in - '' - def assert_contains(haystack, needle): - if needle not in haystack: - print("The haystack that will cause the following exception is:") - print("---") - print(haystack) - print("---") - raise Exception(f"Expected string '{needle}' was not found") + specialisation.withModifications.configuration = + { ... }: + { + services.influxdb2.provision = { + organizations.someorg.buckets.somebucket.present = false; + organizations.someorg.auths.sometoken.present = false; + users.someuser.present = false; - def assert_lacks(haystack, needle): - if needle in haystack: - print("The haystack that will cause the following exception is:") - print("---") - print(haystack, end="") - print("---") - raise Exception(f"Unexpected string '{needle}' was found") + organizations.myorg = { + description = "Myorg description"; + buckets.mybucket = { + description = "Mybucket description"; + }; + auths.mytoken = { + operator = true; + description = "operator token"; + tokenFile = pkgs.writeText "tmp-tok" "someusertoken"; + }; + }; + users.myuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga"; + }; + }; + specialisation.withParentDelete.configuration = + { ... }: + { + services.influxdb2.provision = { + organizations.someorg.present = false; + # Deleting the parent implies: + #organizations.someorg.buckets.somebucket.present = false; + #organizations.someorg.auths.sometoken.present = false; + }; + }; + + specialisation.withNewTokens.configuration = + { ... }: + { + services.influxdb2.provision = { + organizations.default = { + auths.operator = { + operator = true; + description = "new optoken"; + tokenFile = pkgs.writeText "tmp-tok" "newoptoken"; + }; + auths.allaccess = { + operator = true; + description = "new allaccess"; + tokenFile = pkgs.writeText "tmp-tok" "newallaccess"; + }; + auths.specifics = { + description = "new specifics"; + readPermissions = [ + "users" + "tasks" + ]; + writePermissions = [ "tasks" ]; + tokenFile = pkgs.writeText "tmp-tok" "newspecificstoken"; + }; + }; + }; + }; + }; + + testScript = + { nodes, ... }: + let + specialisations = "${nodes.machine.system.build.toplevel}/specialisation"; + tokenArg = "--token verysecureadmintoken"; + in + '' + def assert_contains(haystack, needle): + if needle not in haystack: + print("The haystack that will cause the following exception is:") + print("---") + print(haystack) + print("---") + raise Exception(f"Expected string '{needle}' was not found") + + def assert_lacks(haystack, needle): + if needle in haystack: + print("The haystack that will cause the following exception is:") + print("---") + print(haystack, end="") + print("---") + raise Exception(f"Unexpected string '{needle}' was found") + + machine.wait_for_unit("influxdb2.service") + + machine.fail("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:wrongpassword") + machine.succeed("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:ExAmPl3PA55W0rD") + + out = machine.succeed("influx org list ${tokenArg}") + assert_contains(out, "default") + assert_lacks(out, "myorg") + assert_contains(out, "someorg") + + out = machine.succeed("influx bucket list ${tokenArg} --org default") + assert_contains(out, "default") + + machine.fail("influx bucket list ${tokenArg} --org myorg") + + out = machine.succeed("influx bucket list ${tokenArg} --org someorg") + assert_contains(out, "somebucket") + + out = machine.succeed("influx user list ${tokenArg}") + assert_contains(out, "admin") + assert_lacks(out, "myuser") + assert_contains(out, "someuser") + + out = machine.succeed("influx auth list ${tokenArg}") + assert_lacks(out, "operator token") + assert_contains(out, "some auth token") + + with subtest("withModifications"): + machine.succeed('${specialisations}/withModifications/bin/switch-to-configuration test') machine.wait_for_unit("influxdb2.service") - machine.fail("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:wrongpassword") - machine.succeed("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:ExAmPl3PA55W0rD") - out = machine.succeed("influx org list ${tokenArg}") assert_contains(out, "default") - assert_lacks(out, "myorg") + assert_contains(out, "myorg") + assert_contains(out, "someorg") + + out = machine.succeed("influx bucket list ${tokenArg} --org myorg") + assert_contains(out, "mybucket") + + out = machine.succeed("influx bucket list ${tokenArg} --org someorg") + assert_lacks(out, "somebucket") + + out = machine.succeed("influx user list ${tokenArg}") + assert_contains(out, "admin") + assert_contains(out, "myuser") + assert_lacks(out, "someuser") + + out = machine.succeed("influx auth list ${tokenArg}") + assert_contains(out, "operator token") + assert_lacks(out, "some auth token") + + # Make sure the user token is also usable + machine.succeed("influx auth list --token someusertoken") + + with subtest("keepsUnrelated"): + machine.succeed('${nodes.machine.system.build.toplevel}/bin/switch-to-configuration test') + machine.wait_for_unit("influxdb2.service") + + out = machine.succeed("influx org list ${tokenArg}") + assert_contains(out, "default") + assert_contains(out, "myorg") assert_contains(out, "someorg") out = machine.succeed("influx bucket list ${tokenArg} --org default") assert_contains(out, "default") - machine.fail("influx bucket list ${tokenArg} --org myorg") + out = machine.succeed("influx bucket list ${tokenArg} --org myorg") + assert_contains(out, "mybucket") out = machine.succeed("influx bucket list ${tokenArg} --org someorg") assert_contains(out, "somebucket") out = machine.succeed("influx user list ${tokenArg}") assert_contains(out, "admin") - assert_lacks(out, "myuser") + assert_contains(out, "myuser") assert_contains(out, "someuser") out = machine.succeed("influx auth list ${tokenArg}") - assert_lacks(out, "operator token") + assert_contains(out, "operator token") assert_contains(out, "some auth token") - with subtest("withModifications"): - machine.succeed('${specialisations}/withModifications/bin/switch-to-configuration test') - machine.wait_for_unit("influxdb2.service") + with subtest("withParentDelete"): + machine.succeed('${specialisations}/withParentDelete/bin/switch-to-configuration test') + machine.wait_for_unit("influxdb2.service") - out = machine.succeed("influx org list ${tokenArg}") - assert_contains(out, "default") - assert_contains(out, "myorg") - assert_contains(out, "someorg") + out = machine.succeed("influx org list ${tokenArg}") + assert_contains(out, "default") + assert_contains(out, "myorg") + assert_lacks(out, "someorg") - out = machine.succeed("influx bucket list ${tokenArg} --org myorg") - assert_contains(out, "mybucket") + out = machine.succeed("influx bucket list ${tokenArg} --org default") + assert_contains(out, "default") - out = machine.succeed("influx bucket list ${tokenArg} --org someorg") - assert_lacks(out, "somebucket") + out = machine.succeed("influx bucket list ${tokenArg} --org myorg") + assert_contains(out, "mybucket") - out = machine.succeed("influx user list ${tokenArg}") - assert_contains(out, "admin") - assert_contains(out, "myuser") - assert_lacks(out, "someuser") + machine.fail("influx bucket list ${tokenArg} --org someorg") - out = machine.succeed("influx auth list ${tokenArg}") - assert_contains(out, "operator token") - assert_lacks(out, "some auth token") + out = machine.succeed("influx user list ${tokenArg}") + assert_contains(out, "admin") + assert_contains(out, "myuser") + assert_contains(out, "someuser") - # Make sure the user token is also usable - machine.succeed("influx auth list --token someusertoken") + out = machine.succeed("influx auth list ${tokenArg}") + assert_contains(out, "operator token") + assert_lacks(out, "some auth token") - with subtest("keepsUnrelated"): - machine.succeed('${nodes.machine.system.build.toplevel}/bin/switch-to-configuration test') - machine.wait_for_unit("influxdb2.service") + with subtest("withNewTokens"): + machine.succeed('${specialisations}/withNewTokens/bin/switch-to-configuration test') + machine.wait_for_unit("influxdb2.service") - out = machine.succeed("influx org list ${tokenArg}") - assert_contains(out, "default") - assert_contains(out, "myorg") - assert_contains(out, "someorg") - - out = machine.succeed("influx bucket list ${tokenArg} --org default") - assert_contains(out, "default") - - out = machine.succeed("influx bucket list ${tokenArg} --org myorg") - assert_contains(out, "mybucket") - - out = machine.succeed("influx bucket list ${tokenArg} --org someorg") - assert_contains(out, "somebucket") - - out = machine.succeed("influx user list ${tokenArg}") - assert_contains(out, "admin") - assert_contains(out, "myuser") - assert_contains(out, "someuser") - - out = machine.succeed("influx auth list ${tokenArg}") - assert_contains(out, "operator token") - assert_contains(out, "some auth token") - - with subtest("withParentDelete"): - machine.succeed('${specialisations}/withParentDelete/bin/switch-to-configuration test') - machine.wait_for_unit("influxdb2.service") - - out = machine.succeed("influx org list ${tokenArg}") - assert_contains(out, "default") - assert_contains(out, "myorg") - assert_lacks(out, "someorg") - - out = machine.succeed("influx bucket list ${tokenArg} --org default") - assert_contains(out, "default") - - out = machine.succeed("influx bucket list ${tokenArg} --org myorg") - assert_contains(out, "mybucket") - - machine.fail("influx bucket list ${tokenArg} --org someorg") - - out = machine.succeed("influx user list ${tokenArg}") - assert_contains(out, "admin") - assert_contains(out, "myuser") - assert_contains(out, "someuser") - - out = machine.succeed("influx auth list ${tokenArg}") - assert_contains(out, "operator token") - assert_lacks(out, "some auth token") - - with subtest("withNewTokens"): - machine.succeed('${specialisations}/withNewTokens/bin/switch-to-configuration test') - machine.wait_for_unit("influxdb2.service") - - out = machine.succeed("influx auth list ${tokenArg}") - assert_contains(out, "operator token") - assert_contains(out, "some auth token") - assert_contains(out, "new optoken") - assert_contains(out, "new allaccess") - assert_contains(out, "new specifics") - ''; - } -) + out = machine.succeed("influx auth list ${tokenArg}") + assert_contains(out, "operator token") + assert_contains(out, "some auth token") + assert_contains(out, "new optoken") + assert_contains(out, "new allaccess") + assert_contains(out, "new specifics") + ''; +} diff --git a/nixos/tests/initrd-luks-empty-passphrase.nix b/nixos/tests/initrd-luks-empty-passphrase.nix index 2b981e009f79..e9f4a1776d52 100644 --- a/nixos/tests/initrd-luks-empty-passphrase.nix +++ b/nixos/tests/initrd-luks-empty-passphrase.nix @@ -4,110 +4,108 @@ pkgs ? import ../.. { inherit system config; }, systemdStage1 ? false, }: -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let +{ lib, pkgs, ... }: +let - keyfile = pkgs.writeText "luks-keyfile" '' - MIGHAoGBAJ4rGTSo/ldyjQypd0kuS7k2OSsmQYzMH6TNj3nQ/vIUjDn7fqa3slt2 - gV6EK3TmTbGc4tzC1v4SWx2m+2Bjdtn4Fs4wiBwn1lbRdC6i5ZYCqasTWIntWn+6 - FllUkMD5oqjOR/YcboxG8Z3B5sJuvTP9llsF+gnuveWih9dpbBr7AgEC - ''; + keyfile = pkgs.writeText "luks-keyfile" '' + MIGHAoGBAJ4rGTSo/ldyjQypd0kuS7k2OSsmQYzMH6TNj3nQ/vIUjDn7fqa3slt2 + gV6EK3TmTbGc4tzC1v4SWx2m+2Bjdtn4Fs4wiBwn1lbRdC6i5ZYCqasTWIntWn+6 + FllUkMD5oqjOR/YcboxG8Z3B5sJuvTP9llsF+gnuveWih9dpbBr7AgEC + ''; - in - { - name = "initrd-luks-empty-passphrase"; +in +{ + name = "initrd-luks-empty-passphrase"; - nodes.machine = - { pkgs, ... }: - { - imports = lib.optionals (!systemdStage1) [ ./common/auto-format-root-device.nix ]; + nodes.machine = + { pkgs, ... }: + { + imports = lib.optionals (!systemdStage1) [ ./common/auto-format-root-device.nix ]; - virtualisation = { - emptyDiskImages = [ 512 ]; - useBootLoader = true; - useEFIBoot = true; - # This requires to have access - # to a host Nix store as - # the new root device is /dev/vdb - # an empty 512MiB drive, containing no Nix store. - mountHostNixStore = true; - fileSystems."/".autoFormat = lib.mkIf systemdStage1 true; - }; - - boot.loader.systemd-boot.enable = true; - boot.initrd.systemd = lib.mkIf systemdStage1 { - enable = true; - emergencyAccess = true; - }; - environment.systemPackages = with pkgs; [ cryptsetup ]; - - specialisation.boot-luks-wrong-keyfile.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - cryptroot = { - device = "/dev/vdb"; - keyFile = "/etc/cryptroot.key"; - tryEmptyPassphrase = true; - fallbackToPassword = !systemdStage1; - }; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - boot.initrd.secrets."/etc/cryptroot.key" = keyfile; - }; - - specialisation.boot-luks-missing-keyfile.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - cryptroot = { - device = "/dev/vdb"; - keyFile = "/etc/cryptroot.key"; - tryEmptyPassphrase = true; - fallbackToPassword = !systemdStage1; - }; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - }; + virtualisation = { + emptyDiskImages = [ 512 ]; + useBootLoader = true; + useEFIBoot = true; + # This requires to have access + # to a host Nix store as + # the new root device is /dev/vdb + # an empty 512MiB drive, containing no Nix store. + mountHostNixStore = true; + fileSystems."/".autoFormat = lib.mkIf systemdStage1 true; }; - testScript = '' - # Encrypt key with empty key so boot should try keyfile and then fallback to empty passphrase + boot.loader.systemd-boot.enable = true; + boot.initrd.systemd = lib.mkIf systemdStage1 { + enable = true; + emergencyAccess = true; + }; + environment.systemPackages = with pkgs; [ cryptsetup ]; + + specialisation.boot-luks-wrong-keyfile.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + cryptroot = { + device = "/dev/vdb"; + keyFile = "/etc/cryptroot.key"; + tryEmptyPassphrase = true; + fallbackToPassword = !systemdStage1; + }; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + boot.initrd.secrets."/etc/cryptroot.key" = keyfile; + }; + + specialisation.boot-luks-missing-keyfile.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + cryptroot = { + device = "/dev/vdb"; + keyFile = "/etc/cryptroot.key"; + tryEmptyPassphrase = true; + fallbackToPassword = !systemdStage1; + }; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + }; + }; + + testScript = '' + # Encrypt key with empty key so boot should try keyfile and then fallback to empty passphrase - def grub_select_boot_luks_wrong_key_file(): - """ - Selects "boot-luks" from the GRUB menu - to trigger a login request. - """ - machine.send_monitor_command("sendkey down") - machine.send_monitor_command("sendkey down") - machine.send_monitor_command("sendkey ret") + def grub_select_boot_luks_wrong_key_file(): + """ + Selects "boot-luks" from the GRUB menu + to trigger a login request. + """ + machine.send_monitor_command("sendkey down") + machine.send_monitor_command("sendkey down") + machine.send_monitor_command("sendkey ret") - def grub_select_boot_luks_missing_key_file(): - """ - Selects "boot-luks" from the GRUB menu - to trigger a login request. - """ - machine.send_monitor_command("sendkey down") - machine.send_monitor_command("sendkey ret") + def grub_select_boot_luks_missing_key_file(): + """ + Selects "boot-luks" from the GRUB menu + to trigger a login request. + """ + machine.send_monitor_command("sendkey down") + machine.send_monitor_command("sendkey ret") - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("echo "" | cryptsetup luksFormat /dev/vdb --batch-mode") - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-wrong-keyfile.conf") - machine.succeed("sync") - machine.crash() + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("echo "" | cryptsetup luksFormat /dev/vdb --batch-mode") + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-wrong-keyfile.conf") + machine.succeed("sync") + machine.crash() - # Check if rootfs is on /dev/mapper/cryptroot - machine.wait_for_unit("multi-user.target") - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + # Check if rootfs is on /dev/mapper/cryptroot + machine.wait_for_unit("multi-user.target") + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") - # Choose boot-luks-missing-keyfile specialisation - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-missing-keyfile.conf") - machine.succeed("sync") - machine.crash() + # Choose boot-luks-missing-keyfile specialisation + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-missing-keyfile.conf") + machine.succeed("sync") + machine.crash() - # Check if rootfs is on /dev/mapper/cryptroot - machine.wait_for_unit("multi-user.target") - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") - ''; - } -) + # Check if rootfs is on /dev/mapper/cryptroot + machine.wait_for_unit("multi-user.target") + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/initrd-network.nix b/nixos/tests/initrd-network.nix index 7e86f7f87075..b84588f1d250 100644 --- a/nixos/tests/initrd-network.nix +++ b/nixos/tests/initrd-network.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "initrd-network"; +{ pkgs, lib, ... }: +{ + name = "initrd-network"; - meta.maintainers = [ ]; + meta.maintainers = [ ]; - nodes.machine = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - boot.initrd.network.enable = true; - boot.initrd.network.postCommands = '' - ip addr show - ip route show - ip addr | grep 10.0.2.15 || exit 1 - ping -c1 10.0.2.2 || exit 1 - ''; - # Check if cleanup was done correctly - boot.initrd.postMountCommands = lib.mkAfter '' - ip addr show - ip route show - ip addr | grep 10.0.2.15 && exit 1 - ping -c1 10.0.2.2 && exit 1 - ''; - }; + nodes.machine = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + boot.initrd.network.enable = true; + boot.initrd.network.postCommands = '' + ip addr show + ip route show + ip addr | grep 10.0.2.15 || exit 1 + ping -c1 10.0.2.2 || exit 1 + ''; + # Check if cleanup was done correctly + boot.initrd.postMountCommands = lib.mkAfter '' + ip addr show + ip route show + ip addr | grep 10.0.2.15 && exit 1 + ping -c1 10.0.2.2 && exit 1 + ''; + }; - testScript = '' - start_all() - machine.wait_for_unit("multi-user.target") - machine.succeed("ip addr show >&2") - machine.succeed("ip route show >&2") - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("multi-user.target") + machine.succeed("ip addr show >&2") + machine.succeed("ip route show >&2") + ''; +} diff --git a/nixos/tests/input-remapper.nix b/nixos/tests/input-remapper.nix index 4937274aa692..eb444d8683a6 100644 --- a/nixos/tests/input-remapper.nix +++ b/nixos/tests/input-remapper.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "input-remapper"; - meta = { - maintainers = with pkgs.lib.maintainers; [ LunNova ]; +{ + name = "input-remapper"; + meta = { + maintainers = with pkgs.lib.maintainers; [ LunNova ]; + }; + + nodes.machine = + { config, ... }: + let + user = config.users.users.sybil; + in + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; + + services.xserver.enable = true; + services.input-remapper.enable = true; + users.users.sybil = { + isNormalUser = true; + group = "wheel"; + }; + test-support.displayManager.auto.user = user.name; + # workaround for pkexec not working in the test environment + # Error creating textual authentication agent: + # Error opening current controlling terminal for the process (`/dev/tty'): + # No such device or address + # passwordless pkexec with polkit module also doesn't work + # to allow the program to run, we replace pkexec with sudo + # and turn on passwordless sudo + # this is not correct in general but good enough for this test + security.sudo = { + enable = true; + wheelNeedsPassword = false; + }; + security.wrappers.pkexec = pkgs.lib.mkForce { + setuid = true; + owner = "root"; + group = "root"; + source = "${pkgs.sudo}/bin/sudo"; + }; }; - nodes.machine = - { config, ... }: - let - user = config.users.users.sybil; - in - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - services.input-remapper.enable = true; - users.users.sybil = { - isNormalUser = true; - group = "wheel"; - }; - test-support.displayManager.auto.user = user.name; - # workaround for pkexec not working in the test environment - # Error creating textual authentication agent: - # Error opening current controlling terminal for the process (`/dev/tty'): - # No such device or address - # passwordless pkexec with polkit module also doesn't work - # to allow the program to run, we replace pkexec with sudo - # and turn on passwordless sudo - # this is not correct in general but good enough for this test - security.sudo = { - enable = true; - wheelNeedsPassword = false; - }; - security.wrappers.pkexec = pkgs.lib.mkForce { - setuid = true; - owner = "root"; - group = "root"; - source = "${pkgs.sudo}/bin/sudo"; - }; - }; + testScript = + { nodes, ... }: + '' + start_all() + machine.wait_for_x() - enableOCR = true; + machine.succeed("systemctl status input-remapper.service") + machine.execute("su - sybil -c input-remapper-gtk >&2 &") - testScript = - { nodes, ... }: - '' - start_all() - machine.wait_for_x() - - machine.succeed("systemctl status input-remapper.service") - machine.execute("su - sybil -c input-remapper-gtk >&2 &") - - machine.wait_for_text("Input Remapper") - machine.wait_for_text("Device") - machine.wait_for_text("Presets") - machine.wait_for_text("Editor") - ''; - } -) + machine.wait_for_text("Input Remapper") + machine.wait_for_text("Device") + machine.wait_for_text("Presets") + machine.wait_for_text("Editor") + ''; +} diff --git a/nixos/tests/inspircd.nix b/nixos/tests/inspircd.nix index b3dc09dd7521..b163622dd5b5 100644 --- a/nixos/tests/inspircd.nix +++ b/nixos/tests/inspircd.nix @@ -9,96 +9,94 @@ let iiDir = "/tmp/irc"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "inspircd"; - nodes = - { - "${server}" = { - networking.firewall.allowedTCPPorts = [ ircPort ]; - services.inspircd = { - enable = true; - package = pkgs.inspircdMinimal; - config = '' - - - ''; - }; +{ pkgs, lib, ... }: +{ + name = "inspircd"; + nodes = + { + "${server}" = { + networking.firewall.allowedTCPPorts = [ ircPort ]; + services.inspircd = { + enable = true; + package = pkgs.inspircdMinimal; + config = '' + + + ''; }; - } - // lib.listToAttrs ( - builtins.map ( - client: - lib.nameValuePair client { - imports = [ - ./common/user-account.nix - ]; + }; + } + // lib.listToAttrs ( + builtins.map ( + client: + lib.nameValuePair client { + imports = [ + ./common/user-account.nix + ]; - systemd.services.ii = { - requires = [ "network.target" ]; - wantedBy = [ "default.target" ]; + systemd.services.ii = { + requires = [ "network.target" ]; + wantedBy = [ "default.target" ]; - serviceConfig = { - Type = "simple"; - ExecPreStartPre = "mkdir -p ${iiDir}"; - ExecStart = '' - ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir} - ''; - User = "alice"; - }; + serviceConfig = { + Type = "simple"; + ExecPreStartPre = "mkdir -p ${iiDir}"; + ExecStart = '' + ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir} + ''; + User = "alice"; }; - } - ) clients - ); + }; + } + ) clients + ); - testScript = - let - msg = client: "Hello, my name is ${client}"; - clientScript = - client: - [ - '' - ${client}.wait_for_unit("network.target") - ${client}.systemctl("start ii") - ${client}.wait_for_unit("ii") - ${client}.wait_for_file("${iiDir}/${server}/out") - '' - # wait until first PING from server arrives before joining, - # so we don't try it too early - '' - ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out") - '' - # join ${channel} - '' - ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in") - ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in") - '' - # send a greeting - '' - ${client}.succeed( - "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in" - ) - '' - # check that all greetings arrived on all clients - ] - ++ builtins.map (other: '' + testScript = + let + msg = client: "Hello, my name is ${client}"; + clientScript = + client: + [ + '' + ${client}.wait_for_unit("network.target") + ${client}.systemctl("start ii") + ${client}.wait_for_unit("ii") + ${client}.wait_for_file("${iiDir}/${server}/out") + '' + # wait until first PING from server arrives before joining, + # so we don't try it too early + '' + ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out") + '' + # join ${channel} + '' + ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in") + ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in") + '' + # send a greeting + '' ${client}.succeed( - "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out" + "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in" ) - '') clients; + '' + # check that all greetings arrived on all clients + ] + ++ builtins.map (other: '' + ${client}.succeed( + "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out" + ) + '') clients; - # foldl', but requires a non-empty list instead of a start value - reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list); - in - '' - start_all() - ${server}.wait_for_open_port(${toString ircPort}) + # foldl', but requires a non-empty list instead of a start value + reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list); + in + '' + start_all() + ${server}.wait_for_open_port(${toString ircPort}) - # run clientScript for all clients so that every list - # entry is executed by every client before advancing - # to the next one. - '' - + lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients)); - } -) + # run clientScript for all clients so that every list + # entry is executed by every client before advancing + # to the next one. + '' + + lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients)); +} diff --git a/nixos/tests/intune.nix b/nixos/tests/intune.nix index eeb55f053f8a..1d36f107f9e6 100644 --- a/nixos/tests/intune.nix +++ b/nixos/tests/intune.nix @@ -1,66 +1,64 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "intune"; - meta = { - maintainers = with pkgs.lib.maintainers; [ rhysmdnz ]; +{ pkgs, ... }: +{ + name = "intune"; + meta = { + maintainers = with pkgs.lib.maintainers; [ rhysmdnz ]; + }; + enableOCR = true; + + nodes.machine = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + { + services.intune.enable = true; + services.gnome.gnome-keyring.enable = true; + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; + test-support.displayManager.auto.user = user.name; + environment = { + variables.DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/user/${builtins.toString user.uid}/bus"; + }; + }; + nodes.pam = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + { + services.intune.enable = true; + imports = [ ./common/user-account.nix ]; }; - enableOCR = true; - nodes.machine = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - { - services.intune.enable = true; - services.gnome.gnome-keyring.enable = true; - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; - test-support.displayManager.auto.user = user.name; - environment = { - variables.DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/user/${builtins.toString user.uid}/bus"; - }; - }; - nodes.pam = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - { - services.intune.enable = true; - imports = [ ./common/user-account.nix ]; - }; + testScript = '' + start_all() - testScript = '' - start_all() + # Check System Daemons successfully start + machine.succeed("systemctl start microsoft-identity-device-broker.service") + machine.succeed("systemctl start intune-daemon.service") - # Check System Daemons successfully start - machine.succeed("systemctl start microsoft-identity-device-broker.service") - machine.succeed("systemctl start intune-daemon.service") + # Check User Daemons and intune-portal execurtable works + # Going any further than starting it would require internet access and a microsoft account + machine.wait_for_x() + # TODO: This needs an unlocked user keychain before it will work + #machine.succeed("su - alice -c 'systemctl start --user microsoft-identity-broker.service'") + machine.succeed("su - alice -c 'systemctl start --user intune-agent.service'") + machine.succeed("su - alice -c intune-portal >&2 &") + machine.wait_for_text("Intune Agent") - # Check User Daemons and intune-portal execurtable works - # Going any further than starting it would require internet access and a microsoft account - machine.wait_for_x() - # TODO: This needs an unlocked user keychain before it will work - #machine.succeed("su - alice -c 'systemctl start --user microsoft-identity-broker.service'") - machine.succeed("su - alice -c 'systemctl start --user intune-agent.service'") - machine.succeed("su - alice -c intune-portal >&2 &") - machine.wait_for_text("Intune Agent") + # Check logging in creates password file + def login_as_alice(): + pam.wait_until_tty_matches("1", "login: ") + pam.send_chars("alice\n") + pam.wait_until_tty_matches("1", "Password: ") + pam.send_chars("foobar\n") + pam.wait_until_tty_matches("1", "alice\@pam") - # Check logging in creates password file - def login_as_alice(): - pam.wait_until_tty_matches("1", "login: ") - pam.send_chars("alice\n") - pam.wait_until_tty_matches("1", "Password: ") - pam.send_chars("foobar\n") - pam.wait_until_tty_matches("1", "alice\@pam") - - pam.wait_for_unit("multi-user.target") - login_as_alice() - pam.wait_for_file("/run/intune/1000/pwquality") - ''; - } -) + pam.wait_for_unit("multi-user.target") + login_as_alice() + pam.wait_for_file("/run/intune/1000/pwquality") + ''; +} diff --git a/nixos/tests/invidious.nix b/nixos/tests/invidious.nix index d4817cac4ebf..24efe642784a 100644 --- a/nixos/tests/invidious.nix +++ b/nixos/tests/invidious.nix @@ -1,154 +1,152 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "invidious"; +{ pkgs, ... }: +{ + name = "invidious"; - meta = with pkgs.lib.maintainers; { - maintainers = [ sbruder ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ sbruder ]; + }; - nodes = { - postgres-tcp = - { config, pkgs, ... }: - { - services.postgresql = { - enable = true; - initialScript = pkgs.writeText "init-postgres-with-password" '' - CREATE USER invidious WITH PASSWORD 'correct horse battery staple'; - CREATE DATABASE invidious WITH OWNER invidious; - ''; - enableTCPIP = true; - authentication = '' - host invidious invidious samenet scram-sha-256 - ''; - }; - networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ]; + nodes = { + postgres-tcp = + { config, pkgs, ... }: + { + services.postgresql = { + enable = true; + initialScript = pkgs.writeText "init-postgres-with-password" '' + CREATE USER invidious WITH PASSWORD 'correct horse battery staple'; + CREATE DATABASE invidious WITH OWNER invidious; + ''; + enableTCPIP = true; + authentication = '' + host invidious invidious samenet scram-sha-256 + ''; + }; + networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ]; + }; + machine = + { lib, pkgs, ... }: + { + services.invidious = { + enable = true; }; - machine = - { lib, pkgs, ... }: - { - services.invidious = { - enable = true; - }; - specialisation = { - nginx.configuration = { - services.invidious = { - nginx.enable = true; - domain = "invidious.example.com"; - }; - services.nginx.virtualHosts."invidious.example.com" = { - forceSSL = false; - enableACME = false; - }; - networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + specialisation = { + nginx.configuration = { + services.invidious = { + nginx.enable = true; + domain = "invidious.example.com"; }; - nginx-sig-helper.configuration = { - services.invidious = { - nginx.enable = true; - domain = "invidious.example.com"; - sig-helper.enable = true; - settings.log_level = "Trace"; - }; - services.nginx.virtualHosts."invidious.example.com" = { - forceSSL = false; - enableACME = false; - }; - networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + services.nginx.virtualHosts."invidious.example.com" = { + forceSSL = false; + enableACME = false; }; - nginx-scale.configuration = { - services.invidious = { - nginx.enable = true; - domain = "invidious.example.com"; - serviceScale = 3; - }; - services.nginx.virtualHosts."invidious.example.com" = { - forceSSL = false; - enableACME = false; - }; - networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + }; + nginx-sig-helper.configuration = { + services.invidious = { + nginx.enable = true; + domain = "invidious.example.com"; + sig-helper.enable = true; + settings.log_level = "Trace"; }; - nginx-scale-ytproxy.configuration = { - services.invidious = { - nginx.enable = true; - http3-ytproxy.enable = true; - domain = "invidious.example.com"; - serviceScale = 3; - }; - services.nginx.virtualHosts."invidious.example.com" = { - forceSSL = false; - enableACME = false; - }; - networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + services.nginx.virtualHosts."invidious.example.com" = { + forceSSL = false; + enableACME = false; }; - postgres-tcp.configuration = { - services.invidious = { - database = { - createLocally = false; - host = "postgres-tcp"; - passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple"); - }; + networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + }; + nginx-scale.configuration = { + services.invidious = { + nginx.enable = true; + domain = "invidious.example.com"; + serviceScale = 3; + }; + services.nginx.virtualHosts."invidious.example.com" = { + forceSSL = false; + enableACME = false; + }; + networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + }; + nginx-scale-ytproxy.configuration = { + services.invidious = { + nginx.enable = true; + http3-ytproxy.enable = true; + domain = "invidious.example.com"; + serviceScale = 3; + }; + services.nginx.virtualHosts."invidious.example.com" = { + forceSSL = false; + enableACME = false; + }; + networking.hosts."127.0.0.1" = [ "invidious.example.com" ]; + }; + postgres-tcp.configuration = { + services.invidious = { + database = { + createLocally = false; + host = "postgres-tcp"; + passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple"); }; }; }; }; - }; + }; + }; - testScript = - { nodes, ... }: - '' - def curl_assert_status_code(url, code, form=None): - assert int(machine.succeed(f"curl -s -o /dev/null -w %{{http_code}} {'-F ' + form + ' ' if form else '''}{url}")) == code + testScript = + { nodes, ... }: + '' + def curl_assert_status_code(url, code, form=None): + assert int(machine.succeed(f"curl -s -o /dev/null -w %{{http_code}} {'-F ' + form + ' ' if form else '''}{url}")) == code - def activate_specialisation(name: str): - machine.succeed(f"${nodes.machine.system.build.toplevel}/specialisation/{name}/bin/switch-to-configuration test >&2") + def activate_specialisation(name: str): + machine.succeed(f"${nodes.machine.system.build.toplevel}/specialisation/{name}/bin/switch-to-configuration test >&2") - url = "http://localhost:${toString nodes.machine.services.invidious.port}" - port = ${toString nodes.machine.services.invidious.port} + url = "http://localhost:${toString nodes.machine.services.invidious.port}" + port = ${toString nodes.machine.services.invidious.port} - # start postgres vm now - postgres_tcp.start() + # start postgres vm now + postgres_tcp.start() - machine.wait_for_open_port(port) - curl_assert_status_code(f"{url}/search", 200) + machine.wait_for_open_port(port) + curl_assert_status_code(f"{url}/search", 200) - activate_specialisation("nginx") - machine.wait_for_open_port(80) - curl_assert_status_code("http://invidious.example.com/search", 200) + activate_specialisation("nginx") + machine.wait_for_open_port(80) + curl_assert_status_code("http://invidious.example.com/search", 200) - activate_specialisation("nginx-scale") - machine.wait_for_open_port(80) - # this depends on nginx round-robin behaviour for the upstream servers - curl_assert_status_code("http://invidious.example.com/search", 200) - curl_assert_status_code("http://invidious.example.com/search", 200) - curl_assert_status_code("http://invidious.example.com/search", 200) - machine.succeed("journalctl -eu invidious.service | grep -o '200 GET /search'") - machine.succeed("journalctl -eu invidious-1.service | grep -o '200 GET /search'") - machine.succeed("journalctl -eu invidious-2.service | grep -o '200 GET /search'") + activate_specialisation("nginx-scale") + machine.wait_for_open_port(80) + # this depends on nginx round-robin behaviour for the upstream servers + curl_assert_status_code("http://invidious.example.com/search", 200) + curl_assert_status_code("http://invidious.example.com/search", 200) + curl_assert_status_code("http://invidious.example.com/search", 200) + machine.succeed("journalctl -eu invidious.service | grep -o '200 GET /search'") + machine.succeed("journalctl -eu invidious-1.service | grep -o '200 GET /search'") + machine.succeed("journalctl -eu invidious-2.service | grep -o '200 GET /search'") - activate_specialisation("nginx-scale-ytproxy") - machine.wait_for_unit("http3-ytproxy.service") - machine.wait_for_open_port(80) - machine.wait_until_succeeds("ls /run/http3-ytproxy/socket/http-proxy.sock") - curl_assert_status_code("http://invidious.example.com/search", 200) - # this should error out as no internet connectivity is available in the test - curl_assert_status_code("http://invidious.example.com/vi/dQw4w9WgXcQ/mqdefault.jpg", 502) - machine.succeed("journalctl -eu http3-ytproxy.service | grep -o 'dQw4w9WgXcQ'") + activate_specialisation("nginx-scale-ytproxy") + machine.wait_for_unit("http3-ytproxy.service") + machine.wait_for_open_port(80) + machine.wait_until_succeeds("ls /run/http3-ytproxy/socket/http-proxy.sock") + curl_assert_status_code("http://invidious.example.com/search", 200) + # this should error out as no internet connectivity is available in the test + curl_assert_status_code("http://invidious.example.com/vi/dQw4w9WgXcQ/mqdefault.jpg", 502) + machine.succeed("journalctl -eu http3-ytproxy.service | grep -o 'dQw4w9WgXcQ'") - activate_specialisation("nginx-sig-helper") - machine.wait_for_unit("invidious-sig-helper.service") - # we can't really test the sig helper that well without internet connection... - # invidious does connect to the sig helper though and crashes when the sig helper is not available - machine.wait_for_open_port(80) - curl_assert_status_code("http://invidious.example.com/search", 200) - machine.succeed("journalctl -eu invidious.service | grep -o \"SigHelper: Using helper at 'tcp://127.0.0.1:2999'\"") + activate_specialisation("nginx-sig-helper") + machine.wait_for_unit("invidious-sig-helper.service") + # we can't really test the sig helper that well without internet connection... + # invidious does connect to the sig helper though and crashes when the sig helper is not available + machine.wait_for_open_port(80) + curl_assert_status_code("http://invidious.example.com/search", 200) + machine.succeed("journalctl -eu invidious.service | grep -o \"SigHelper: Using helper at 'tcp://127.0.0.1:2999'\"") - postgres_tcp.wait_for_unit("postgresql.service") - activate_specialisation("postgres-tcp") - machine.wait_for_open_port(port) - curl_assert_status_code(f"{url}/search", 200) - ''; - } -) + postgres_tcp.wait_for_unit("postgresql.service") + activate_specialisation("postgres-tcp") + machine.wait_for_open_port(port) + curl_assert_status_code(f"{url}/search", 200) + ''; +} diff --git a/nixos/tests/iodine.nix b/nixos/tests/iodine.nix index 573f6d7aebeb..04ed8dcfaa53 100644 --- a/nixos/tests/iodine.nix +++ b/nixos/tests/iodine.nix @@ -1,66 +1,64 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - domain = "whatever.example.com"; - password = "false;foo;exit;withspecialcharacters"; - in - { - name = "iodine"; - nodes = { - server = - { ... }: +{ pkgs, ... }: +let + domain = "whatever.example.com"; + password = "false;foo;exit;withspecialcharacters"; +in +{ + name = "iodine"; + nodes = { + server = + { ... }: - { - networking.firewall = { - allowedUDPPorts = [ 53 ]; - trustedInterfaces = [ "dns0" ]; - }; - boot.kernel.sysctl = { - "net.ipv4.ip_forward" = 1; - "net.ipv6.ip_forward" = 1; - }; - - services.iodine.server = { - enable = true; - ip = "10.53.53.1/24"; - passwordFile = "${builtins.toFile "password" password}"; - inherit domain; - }; - - # test resource: accessible only via tunnel - services.openssh = { - enable = true; - openFirewall = false; - }; + { + networking.firewall = { + allowedUDPPorts = [ 53 ]; + trustedInterfaces = [ "dns0" ]; + }; + boot.kernel.sysctl = { + "net.ipv4.ip_forward" = 1; + "net.ipv6.ip_forward" = 1; }; - client = - { ... }: - { - services.iodine.clients.testClient = { - # test that ProtectHome is "read-only" - passwordFile = "/root/pw"; - relay = "server"; - server = domain; - }; - systemd.tmpfiles.rules = [ - "f /root/pw 0666 root root - ${password}" - ]; - environment.systemPackages = [ - pkgs.nagiosPluginsOfficial - ]; + services.iodine.server = { + enable = true; + ip = "10.53.53.1/24"; + passwordFile = "${builtins.toFile "password" password}"; + inherit domain; }; - }; + # test resource: accessible only via tunnel + services.openssh = { + enable = true; + openFirewall = false; + }; + }; - testScript = '' - start_all() + client = + { ... }: + { + services.iodine.clients.testClient = { + # test that ProtectHome is "read-only" + passwordFile = "/root/pw"; + relay = "server"; + server = domain; + }; + systemd.tmpfiles.rules = [ + "f /root/pw 0666 root root - ${password}" + ]; + environment.systemPackages = [ + pkgs.nagiosPluginsOfficial + ]; + }; - server.wait_for_unit("sshd") - server.wait_for_unit("iodined") - client.wait_for_unit("iodine-testClient") + }; - client.succeed("check_ssh -H 10.53.53.1") - ''; - } -) + testScript = '' + start_all() + + server.wait_for_unit("sshd") + server.wait_for_unit("iodined") + client.wait_for_unit("iodine-testClient") + + client.succeed("check_ssh -H 10.53.53.1") + ''; +} diff --git a/nixos/tests/iosched.nix b/nixos/tests/iosched.nix index 92870e3ae603..c113f23f1d6d 100644 --- a/nixos/tests/iosched.nix +++ b/nixos/tests/iosched.nix @@ -1,71 +1,69 @@ -import ./make-test-python.nix ( - { - pkgs, - ... - }: - let - qemu-img = pkgs.lib.getExe' pkgs.vmTools.qemu "qemu-img"; - empty = pkgs.runCommand "empty.qcow2" { } '' - ${qemu-img} create -f qcow2 "$out" 32M - ''; - in - { - name = "iosched"; - meta.maintainers = with pkgs.lib.maintainers; [ mvs ]; +{ + pkgs, + ... +}: +let + qemu-img = pkgs.lib.getExe' pkgs.vmTools.qemu "qemu-img"; + empty = pkgs.runCommand "empty.qcow2" { } '' + ${qemu-img} create -f qcow2 "$out" 32M + ''; +in +{ + name = "iosched"; + meta.maintainers = with pkgs.lib.maintainers; [ mvs ]; - nodes.machine = { - virtualisation.qemu.options = [ - "-drive" - "id=sda,if=none,format=qcow2,readonly=on,file=${empty}" - "-drive" - "id=sdb,if=none,format=qcow2,readonly=on,file=${empty}" - "-drive" - "id=nvme0n1,if=none,format=qcow2,readonly=on,file=${empty}" - "-drive" - "id=mmcblk0,if=none,format=qcow2,file=./mmcblk0.qcow2" - "-device" - "virtio-scsi-pci,id=scsi0" - "-device" - "sdhci-pci" - "-device" - "scsi-hd,rotation_rate=1,bus=scsi0.0,drive=sda" - "-device" - "scsi-hd,rotation_rate=7200,bus=scsi0.0,drive=sdb" - "-device" - "sd-card,drive=mmcblk0" - "-device" - "nvme,serial=deadbeef,drive=nvme0n1" - ]; + nodes.machine = { + virtualisation.qemu.options = [ + "-drive" + "id=sda,if=none,format=qcow2,readonly=on,file=${empty}" + "-drive" + "id=sdb,if=none,format=qcow2,readonly=on,file=${empty}" + "-drive" + "id=nvme0n1,if=none,format=qcow2,readonly=on,file=${empty}" + "-drive" + "id=mmcblk0,if=none,format=qcow2,file=./mmcblk0.qcow2" + "-device" + "virtio-scsi-pci,id=scsi0" + "-device" + "sdhci-pci" + "-device" + "scsi-hd,rotation_rate=1,bus=scsi0.0,drive=sda" + "-device" + "scsi-hd,rotation_rate=7200,bus=scsi0.0,drive=sdb" + "-device" + "sd-card,drive=mmcblk0" + "-device" + "nvme,serial=deadbeef,drive=nvme0n1" + ]; - hardware.block = { - defaultScheduler = "none"; - defaultSchedulerRotational = "mq-deadline"; - scheduler = { - "nvme[0-9]*" = "kyber"; - "mmcblk[0-9]*" = "bfq"; - }; + hardware.block = { + defaultScheduler = "none"; + defaultSchedulerRotational = "mq-deadline"; + scheduler = { + "nvme[0-9]*" = "kyber"; + "mmcblk[0-9]*" = "bfq"; }; }; + }; - testScript = '' - import subprocess + testScript = '' + import subprocess - def check_scheduler(dev, scheduler): - machine.succeed("grep -F -q '[{}]' /sys/block/{}/queue/scheduler".format(scheduler, dev)) + def check_scheduler(dev, scheduler): + machine.succeed("grep -F -q '[{}]' /sys/block/{}/queue/scheduler".format(scheduler, dev)) - subprocess.check_call([ - "${qemu-img}", "create", "-f", "qcow2", "vm-state-machine/mmcblk0.qcow2", "32M" - ]) + subprocess.check_call([ + "${qemu-img}", "create", "-f", "qcow2", "vm-state-machine/mmcblk0.qcow2", "32M" + ]) - machine.start() - machine.succeed("udevadm verify --no-style") - check_scheduler("sda", "none") - check_scheduler("sdb", "mq-deadline") - check_scheduler("nvme0n1", "kyber") - check_scheduler("mmcblk0", "bfq") + machine.start() + machine.succeed("udevadm verify --no-style") + check_scheduler("sda", "none") + check_scheduler("sdb", "mq-deadline") + check_scheduler("nvme0n1", "kyber") + check_scheduler("mmcblk0", "bfq") - machine.succeed("tmp=\"$(mktemp)\"; losetup /dev/loop0 \"$tmp\"") - check_scheduler("loop0", "none") - ''; - } -) + machine.succeed("tmp=\"$(mktemp)\"; losetup /dev/loop0 \"$tmp\"") + check_scheduler("loop0", "none") + ''; +} diff --git a/nixos/tests/ipv6.nix b/nixos/tests/ipv6.nix index cc0bf895c5e3..ca25c0b4497f 100644 --- a/nixos/tests/ipv6.nix +++ b/nixos/tests/ipv6.nix @@ -1,133 +1,131 @@ # Test of IPv6 functionality in NixOS, including whether router # solicication/advertisement using radvd works. -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "ipv6"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; +{ pkgs, lib, ... }: +{ + name = "ipv6"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + + nodes = { + # We use lib.mkForce here to remove the interface configuration + # provided by makeTest, so that the interfaces are all configured + # implicitly. + + # This client should use privacy extensions fully, having a + # completely-default network configuration. + client_defaults.networking.interfaces = lib.mkForce { }; + + # Both of these clients should obtain temporary addresses, but + # not use them as the default source IP. We thus run the same + # checks against them — but the configuration resulting in this + # behaviour is different. + + # Here, by using an altered default value for the global setting... + client_global_setting = { + networking.interfaces = lib.mkForce { }; + networking.tempAddresses = "enabled"; + }; + # and here, by setting this on the interface explicitly. + client_interface_setting = { + networking.tempAddresses = "disabled"; + networking.interfaces = lib.mkForce { + eth1.tempAddress = "enabled"; + }; }; - nodes = { - # We use lib.mkForce here to remove the interface configuration - # provided by makeTest, so that the interfaces are all configured - # implicitly. - - # This client should use privacy extensions fully, having a - # completely-default network configuration. - client_defaults.networking.interfaces = lib.mkForce { }; - - # Both of these clients should obtain temporary addresses, but - # not use them as the default source IP. We thus run the same - # checks against them — but the configuration resulting in this - # behaviour is different. - - # Here, by using an altered default value for the global setting... - client_global_setting = { - networking.interfaces = lib.mkForce { }; - networking.tempAddresses = "enabled"; - }; - # and here, by setting this on the interface explicitly. - client_interface_setting = { - networking.tempAddresses = "disabled"; - networking.interfaces = lib.mkForce { - eth1.tempAddress = "enabled"; - }; - }; - - server = { - services.httpd.enable = true; - services.httpd.adminAddr = "foo@example.org"; - networking.firewall.allowedTCPPorts = [ 80 ]; - # disable testing driver's default IPv6 address. - networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ ]; - }; - - router = - { ... }: - { - services.radvd.enable = true; - services.radvd.config = '' - interface eth1 { - AdvSendAdvert on; - # ULA prefix (RFC 4193). - prefix fd60:cc69:b537:1::/64 { }; - }; - ''; - }; + server = { + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + networking.firewall.allowedTCPPorts = [ 80 ]; + # disable testing driver's default IPv6 address. + networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ ]; }; - testScript = '' - import re + router = + { ... }: + { + services.radvd.enable = true; + services.radvd.config = '' + interface eth1 { + AdvSendAdvert on; + # ULA prefix (RFC 4193). + prefix fd60:cc69:b537:1::/64 { }; + }; + ''; + }; + }; - # Start the router first so that it respond to router solicitations. - router.wait_for_unit("radvd") + testScript = '' + import re - clients = [client_defaults, client_global_setting, client_interface_setting] + # Start the router first so that it respond to router solicitations. + router.wait_for_unit("radvd") - start_all() + clients = [client_defaults, client_global_setting, client_interface_setting] - for client in clients: - client.wait_for_unit("network.target") - server.wait_for_unit("network.target") - server.wait_for_unit("httpd.service") + start_all() - # Wait until the given interface has a non-tentative address of - # the desired scope (i.e. has completed Duplicate Address - # Detection). - def wait_for_address(machine, iface, scope, temporary=False): - temporary_flag = "temporary" if temporary else "-temporary" - cmd = f"ip -o -6 addr show dev {iface} scope {scope} -tentative {temporary_flag}" + for client in clients: + client.wait_for_unit("network.target") + server.wait_for_unit("network.target") + server.wait_for_unit("httpd.service") - machine.wait_until_succeeds(f"[ `{cmd} | wc -l` -eq 1 ]") - output = machine.succeed(cmd) - ip = re.search(r"inet6 ([0-9a-f:]{2,})/", output).group(1) + # Wait until the given interface has a non-tentative address of + # the desired scope (i.e. has completed Duplicate Address + # Detection). + def wait_for_address(machine, iface, scope, temporary=False): + temporary_flag = "temporary" if temporary else "-temporary" + cmd = f"ip -o -6 addr show dev {iface} scope {scope} -tentative {temporary_flag}" - if temporary: - scope = scope + " temporary" - machine.log(f"{scope} address on {iface} is {ip}") - return ip + machine.wait_until_succeeds(f"[ `{cmd} | wc -l` -eq 1 ]") + output = machine.succeed(cmd) + ip = re.search(r"inet6 ([0-9a-f:]{2,})/", output).group(1) + + if temporary: + scope = scope + " temporary" + machine.log(f"{scope} address on {iface} is {ip}") + return ip - with subtest("Loopback address can be pinged"): - client_defaults.succeed("ping -c 1 ::1 >&2") - client_defaults.fail("ping -c 1 2001:db8:: >&2") + with subtest("Loopback address can be pinged"): + client_defaults.succeed("ping -c 1 ::1 >&2") + client_defaults.fail("ping -c 1 2001:db8:: >&2") - with subtest("Local link addresses can be obtained and pinged"): - for client in clients: - client_ip = wait_for_address(client, "eth1", "link") - server_ip = wait_for_address(server, "eth1", "link") - client.succeed(f"ping -c 1 {client_ip}%eth1 >&2") - client.succeed(f"ping -c 1 {server_ip}%eth1 >&2") + with subtest("Local link addresses can be obtained and pinged"): + for client in clients: + client_ip = wait_for_address(client, "eth1", "link") + server_ip = wait_for_address(server, "eth1", "link") + client.succeed(f"ping -c 1 {client_ip}%eth1 >&2") + client.succeed(f"ping -c 1 {server_ip}%eth1 >&2") - with subtest("Global addresses can be obtained, pinged, and reached via http"): - for client in clients: - client_ip = wait_for_address(client, "eth1", "global") - server_ip = wait_for_address(server, "eth1", "global") - client.succeed(f"ping -c 1 {client_ip} >&2") - client.succeed(f"ping -c 1 {server_ip} >&2") - client.succeed(f"curl --fail -g http://[{server_ip}]") - client.fail(f"curl --fail -g http://[{client_ip}]") + with subtest("Global addresses can be obtained, pinged, and reached via http"): + for client in clients: + client_ip = wait_for_address(client, "eth1", "global") + server_ip = wait_for_address(server, "eth1", "global") + client.succeed(f"ping -c 1 {client_ip} >&2") + client.succeed(f"ping -c 1 {server_ip} >&2") + client.succeed(f"curl --fail -g http://[{server_ip}]") + client.fail(f"curl --fail -g http://[{client_ip}]") - with subtest( - "Privacy extensions: Global temporary address is used as default source address" - ): - ip = wait_for_address(client_defaults, "eth1", "global", temporary=True) - # Default route should have "src " in it - client_defaults.succeed(f"ip route get 2001:db8:: | grep 'src {ip}'") + with subtest( + "Privacy extensions: Global temporary address is used as default source address" + ): + ip = wait_for_address(client_defaults, "eth1", "global", temporary=True) + # Default route should have "src " in it + client_defaults.succeed(f"ip route get 2001:db8:: | grep 'src {ip}'") - for client, setting_desc in ( - (client_global_setting, "global"), - (client_interface_setting, "interface"), - ): - with subtest(f'Privacy extensions: "enabled" through {setting_desc} setting)'): - # We should be obtaining both a temporary address and an EUI-64 address... - ip = wait_for_address(client, "eth1", "global") - assert "ff:fe" in ip - ip_temp = wait_for_address(client, "eth1", "global", temporary=True) - # But using the EUI-64 one. - client.succeed(f"ip route get 2001:db8:: | grep 'src {ip}'") - ''; - } -) + for client, setting_desc in ( + (client_global_setting, "global"), + (client_interface_setting, "interface"), + ): + with subtest(f'Privacy extensions: "enabled" through {setting_desc} setting)'): + # We should be obtaining both a temporary address and an EUI-64 address... + ip = wait_for_address(client, "eth1", "global") + assert "ff:fe" in ip + ip_temp = wait_for_address(client, "eth1", "global", temporary=True) + # But using the EUI-64 one. + client.succeed(f"ip route get 2001:db8:: | grep 'src {ip}'") + ''; +} diff --git a/nixos/tests/iscsi-multipath-root.nix b/nixos/tests/iscsi-multipath-root.nix index 56f9e1ae7cc1..45b1baf40c61 100644 --- a/nixos/tests/iscsi-multipath-root.nix +++ b/nixos/tests/iscsi-multipath-root.nix @@ -1,298 +1,296 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example"; - targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af"; - in - { - name = "iscsi"; - meta = { - maintainers = pkgs.lib.teams.deshaw.members; - }; +{ pkgs, lib, ... }: +let + initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example"; + targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af"; +in +{ + name = "iscsi"; + meta = { + maintainers = pkgs.lib.teams.deshaw.members; + }; - nodes = { - target = - { - config, - pkgs, - lib, - ... - }: - { - virtualisation.vlans = [ - 1 - 2 - ]; - services.target = { - enable = true; - config = { - fabric_modules = [ ]; - storage_objects = [ - { - dev = "/dev/vdb"; - name = "test"; - plugin = "block"; - write_back = true; - wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522"; - } - ]; - targets = [ - { - fabric = "iscsi"; - tpgs = [ - { - enable = true; - attributes = { - authentication = 0; - generate_node_acls = 1; - }; - luns = [ - { - alias = "94dfe06967"; - alua_tg_pt_gp_name = "default_tg_pt_gp"; - index = 0; - storage_object = "/backstores/block/test"; - } - ]; - node_acls = [ - { - mapped_luns = [ - { - alias = "d42f5bdf8a"; - index = 0; - tpg_lun = 0; - write_protect = false; - } - ]; - node_wwn = initiatorName; - } - ]; - portals = [ - { - ip_address = "0.0.0.0"; - iser = false; - offload = false; - port = 3260; - } - ]; - tag = 1; - } - ]; - wwn = targetName; - } - ]; - }; - }; - - networking.firewall.allowedTCPPorts = [ 3260 ]; - networking.firewall.allowedUDPPorts = [ 3260 ]; - - virtualisation.memorySize = 2048; - virtualisation.emptyDiskImages = [ 2048 ]; - }; - - initiatorAuto = - { - nodes, - config, - pkgs, - ... - }: - { - virtualisation.vlans = [ - 1 - 2 - ]; - - services.multipath = { - enable = true; - defaults = '' - find_multipaths yes - user_friendly_names yes - ''; - pathGroups = [ + nodes = { + target = + { + config, + pkgs, + lib, + ... + }: + { + virtualisation.vlans = [ + 1 + 2 + ]; + services.target = { + enable = true; + config = { + fabric_modules = [ ]; + storage_objects = [ { - alias = 123456; - wwid = "3600140592b17c3f6b404168b082ceeb7"; + dev = "/dev/vdb"; + name = "test"; + plugin = "block"; + write_back = true; + wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522"; + } + ]; + targets = [ + { + fabric = "iscsi"; + tpgs = [ + { + enable = true; + attributes = { + authentication = 0; + generate_node_acls = 1; + }; + luns = [ + { + alias = "94dfe06967"; + alua_tg_pt_gp_name = "default_tg_pt_gp"; + index = 0; + storage_object = "/backstores/block/test"; + } + ]; + node_acls = [ + { + mapped_luns = [ + { + alias = "d42f5bdf8a"; + index = 0; + tpg_lun = 0; + write_protect = false; + } + ]; + node_wwn = initiatorName; + } + ]; + portals = [ + { + ip_address = "0.0.0.0"; + iser = false; + offload = false; + port = 3260; + } + ]; + tag = 1; + } + ]; + wwn = targetName; } ]; }; + }; - services.openiscsi = { - enable = true; - enableAutoLoginOut = true; - discoverPortal = "target"; - name = initiatorName; - }; + networking.firewall.allowedTCPPorts = [ 3260 ]; + networking.firewall.allowedUDPPorts = [ 3260 ]; - environment.systemPackages = with pkgs; [ - xfsprogs + virtualisation.memorySize = 2048; + virtualisation.emptyDiskImages = [ 2048 ]; + }; + + initiatorAuto = + { + nodes, + config, + pkgs, + ... + }: + { + virtualisation.vlans = [ + 1 + 2 + ]; + + services.multipath = { + enable = true; + defaults = '' + find_multipaths yes + user_friendly_names yes + ''; + pathGroups = [ + { + alias = 123456; + wwid = "3600140592b17c3f6b404168b082ceeb7"; + } ]; + }; - environment.etc."initiator-root-disk-closure".source = - nodes.initiatorRootDisk.config.system.build.toplevel; + services.openiscsi = { + enable = true; + enableAutoLoginOut = true; + discoverPortal = "target"; + name = initiatorName; + }; - nix.settings = { - substituters = lib.mkForce [ ]; - hashed-mirrors = null; - connect-timeout = 1; + environment.systemPackages = with pkgs; [ + xfsprogs + ]; + + environment.etc."initiator-root-disk-closure".source = + nodes.initiatorRootDisk.config.system.build.toplevel; + + nix.settings = { + substituters = lib.mkForce [ ]; + hashed-mirrors = null; + connect-timeout = 1; + }; + }; + + initiatorRootDisk = + { + config, + pkgs, + modulesPath, + lib, + ... + }: + { + boot.initrd.network.enable = true; + boot.loader.grub.enable = false; + + boot.kernelParams = lib.mkOverride 5 ([ + "boot.shell_on_fail" + "console=tty1" + "ip=192.168.1.1:::255.255.255.0::ens9:none" + "ip=192.168.2.1:::255.255.255.0::ens10:none" + ]); + + # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store + virtualisation.writableStore = false; + virtualisation.vlans = [ + 1 + 2 + ]; + + services.multipath = { + enable = true; + defaults = '' + find_multipaths yes + user_friendly_names yes + ''; + pathGroups = [ + { + alias = 123456; + wwid = "3600140592b17c3f6b404168b082ceeb7"; + } + ]; + }; + + fileSystems = lib.mkOverride 5 { + "/" = { + fsType = "xfs"; + device = "/dev/mapper/123456"; + options = [ "_netdev" ]; }; }; - initiatorRootDisk = - { - config, - pkgs, - modulesPath, - lib, - ... - }: - { - boot.initrd.network.enable = true; - boot.loader.grub.enable = false; + boot.initrd.extraFiles."etc/multipath/wwids".source = + pkgs.writeText "wwids" "/3600140592b17c3f6b404168b082ceeb7/"; - boot.kernelParams = lib.mkOverride 5 ([ - "boot.shell_on_fail" - "console=tty1" - "ip=192.168.1.1:::255.255.255.0::ens9:none" - "ip=192.168.2.1:::255.255.255.0::ens10:none" - ]); - - # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store - virtualisation.writableStore = false; - virtualisation.vlans = [ - 1 - 2 - ]; - - services.multipath = { - enable = true; - defaults = '' - find_multipaths yes - user_friendly_names yes - ''; - pathGroups = [ - { - alias = 123456; - wwid = "3600140592b17c3f6b404168b082ceeb7"; - } - ]; - }; - - fileSystems = lib.mkOverride 5 { - "/" = { - fsType = "xfs"; - device = "/dev/mapper/123456"; - options = [ "_netdev" ]; - }; - }; - - boot.initrd.extraFiles."etc/multipath/wwids".source = - pkgs.writeText "wwids" "/3600140592b17c3f6b404168b082ceeb7/"; - - boot.iscsi-initiator = { - discoverPortal = "target"; - name = initiatorName; - target = targetName; - extraIscsiCommands = '' - iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login - ''; - }; + boot.iscsi-initiator = { + discoverPortal = "target"; + name = initiatorName; + target = targetName; + extraIscsiCommands = '' + iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login + ''; }; + }; - }; + }; - testScript = - { nodes, ... }: - '' - target.start() - target.wait_for_unit("iscsi-target.service") + testScript = + { nodes, ... }: + '' + target.start() + target.wait_for_unit("iscsi-target.service") - initiatorAuto.start() + initiatorAuto.start() - initiatorAuto.wait_for_unit("iscsid.service") - initiatorAuto.wait_for_unit("iscsi.service") - initiatorAuto.get_unit_info("iscsi") + initiatorAuto.wait_for_unit("iscsid.service") + initiatorAuto.wait_for_unit("iscsi.service") + initiatorAuto.get_unit_info("iscsi") - # Expecting this to fail since we should already know about 192.168.1.3 - initiatorAuto.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login") - # Expecting this to succeed since we don't yet know about 192.168.2.3 - initiatorAuto.succeed("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login") + # Expecting this to fail since we should already know about 192.168.1.3 + initiatorAuto.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login") + # Expecting this to succeed since we don't yet know about 192.168.2.3 + initiatorAuto.succeed("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login") - # /dev/sda is provided by iscsi on target - initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done") + # /dev/sda is provided by iscsi on target + initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done") - initiatorAuto.succeed("mkfs.xfs /dev/sda") - initiatorAuto.succeed("mkdir /mnt") + initiatorAuto.succeed("mkfs.xfs /dev/sda") + initiatorAuto.succeed("mkdir /mnt") - # Start by verifying /dev/sda and /dev/sdb are both the same disk - initiatorAuto.succeed("mount /dev/sda /mnt") - initiatorAuto.succeed("touch /mnt/hi") - initiatorAuto.succeed("umount /mnt") + # Start by verifying /dev/sda and /dev/sdb are both the same disk + initiatorAuto.succeed("mount /dev/sda /mnt") + initiatorAuto.succeed("touch /mnt/hi") + initiatorAuto.succeed("umount /mnt") - initiatorAuto.succeed("mount /dev/sdb /mnt") - initiatorAuto.succeed("test -e /mnt/hi") - initiatorAuto.succeed("umount /mnt") + initiatorAuto.succeed("mount /dev/sdb /mnt") + initiatorAuto.succeed("test -e /mnt/hi") + initiatorAuto.succeed("umount /mnt") - initiatorAuto.succeed("systemctl restart multipathd") - initiatorAuto.succeed("systemd-cat multipath -ll") + initiatorAuto.succeed("systemctl restart multipathd") + initiatorAuto.succeed("systemd-cat multipath -ll") - # Install our RootDisk machine to 123456, the alias to the device that multipath is now managing - initiatorAuto.succeed("mount /dev/mapper/123456 /mnt") - initiatorAuto.succeed("mkdir -p /mnt/etc/{multipath,iscsi}") - initiatorAuto.succeed("cp -r /etc/multipath/wwids /mnt/etc/multipath/wwids") - initiatorAuto.succeed("cp -r /etc/iscsi/{nodes,send_targets} /mnt/etc/iscsi") - initiatorAuto.succeed( - "nixos-install --no-bootloader --no-root-passwd --system /etc/initiator-root-disk-closure" - ) - initiatorAuto.succeed("umount /mnt") - initiatorAuto.shutdown() + # Install our RootDisk machine to 123456, the alias to the device that multipath is now managing + initiatorAuto.succeed("mount /dev/mapper/123456 /mnt") + initiatorAuto.succeed("mkdir -p /mnt/etc/{multipath,iscsi}") + initiatorAuto.succeed("cp -r /etc/multipath/wwids /mnt/etc/multipath/wwids") + initiatorAuto.succeed("cp -r /etc/iscsi/{nodes,send_targets} /mnt/etc/iscsi") + initiatorAuto.succeed( + "nixos-install --no-bootloader --no-root-passwd --system /etc/initiator-root-disk-closure" + ) + initiatorAuto.succeed("umount /mnt") + initiatorAuto.shutdown() - initiatorRootDisk.start() - initiatorRootDisk.wait_for_unit("multi-user.target") - initiatorRootDisk.wait_for_unit("iscsid") + initiatorRootDisk.start() + initiatorRootDisk.wait_for_unit("multi-user.target") + initiatorRootDisk.wait_for_unit("iscsid") - # Log in over both nodes - initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login") - initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login") - initiatorRootDisk.succeed("systemctl restart multipathd") - initiatorRootDisk.succeed("systemd-cat multipath -ll") + # Log in over both nodes + initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login") + initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login") + initiatorRootDisk.succeed("systemctl restart multipathd") + initiatorRootDisk.succeed("systemd-cat multipath -ll") - # Verify we can write and sync the root disk - initiatorRootDisk.succeed("mkdir /scratch") - initiatorRootDisk.succeed("touch /scratch/both-up") - initiatorRootDisk.succeed("sync /scratch") + # Verify we can write and sync the root disk + initiatorRootDisk.succeed("mkdir /scratch") + initiatorRootDisk.succeed("touch /scratch/both-up") + initiatorRootDisk.succeed("sync /scratch") - # Verify we can write to the root with ens9 (sda, 192.168.1.3) down - initiatorRootDisk.succeed("ip link set ens9 down") - initiatorRootDisk.succeed("touch /scratch/ens9-down") - initiatorRootDisk.succeed("sync /scratch") - initiatorRootDisk.succeed("ip link set ens9 up") + # Verify we can write to the root with ens9 (sda, 192.168.1.3) down + initiatorRootDisk.succeed("ip link set ens9 down") + initiatorRootDisk.succeed("touch /scratch/ens9-down") + initiatorRootDisk.succeed("sync /scratch") + initiatorRootDisk.succeed("ip link set ens9 up") - # todo: better way to wait until multipath notices the link is back - initiatorRootDisk.succeed("sleep 5") - initiatorRootDisk.succeed("touch /scratch/both-down") - initiatorRootDisk.succeed("sync /scratch") + # todo: better way to wait until multipath notices the link is back + initiatorRootDisk.succeed("sleep 5") + initiatorRootDisk.succeed("touch /scratch/both-down") + initiatorRootDisk.succeed("sync /scratch") - # Verify we can write to the root with ens10 (sdb, 192.168.2.3) down - initiatorRootDisk.succeed("ip link set ens10 down") - initiatorRootDisk.succeed("touch /scratch/ens10-down") - initiatorRootDisk.succeed("sync /scratch") - initiatorRootDisk.succeed("ip link set ens10 up") - initiatorRootDisk.succeed("touch /scratch/ens10-down") - initiatorRootDisk.succeed("sync /scratch") + # Verify we can write to the root with ens10 (sdb, 192.168.2.3) down + initiatorRootDisk.succeed("ip link set ens10 down") + initiatorRootDisk.succeed("touch /scratch/ens10-down") + initiatorRootDisk.succeed("sync /scratch") + initiatorRootDisk.succeed("ip link set ens10 up") + initiatorRootDisk.succeed("touch /scratch/ens10-down") + initiatorRootDisk.succeed("sync /scratch") - initiatorRootDisk.succeed("ip link set ens9 up") - initiatorRootDisk.succeed("ip link set ens10 up") - initiatorRootDisk.shutdown() + initiatorRootDisk.succeed("ip link set ens9 up") + initiatorRootDisk.succeed("ip link set ens10 up") + initiatorRootDisk.shutdown() - # Verify we can boot with the target's eth1 down, forcing - # it to multipath via the second link - target.succeed("ip link set eth1 down") - initiatorRootDisk.start() - initiatorRootDisk.wait_for_unit("multi-user.target") - initiatorRootDisk.wait_for_unit("iscsid") - initiatorRootDisk.succeed("test -e /scratch/both-up") - ''; - } -) + # Verify we can boot with the target's eth1 down, forcing + # it to multipath via the second link + target.succeed("ip link set eth1 down") + initiatorRootDisk.start() + initiatorRootDisk.wait_for_unit("multi-user.target") + initiatorRootDisk.wait_for_unit("iscsid") + initiatorRootDisk.succeed("test -e /scratch/both-up") + ''; +} diff --git a/nixos/tests/iscsi-root.nix b/nixos/tests/iscsi-root.nix index ab0ab9f89e9b..9d0b0765b311 100644 --- a/nixos/tests/iscsi-root.nix +++ b/nixos/tests/iscsi-root.nix @@ -1,182 +1,180 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example"; - targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af"; - in - { - name = "iscsi"; - meta = { - maintainers = lib.teams.deshaw.members ++ lib.teams.helsinki-systems.members; - }; +{ pkgs, lib, ... }: +let + initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example"; + targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af"; +in +{ + name = "iscsi"; + meta = { + maintainers = lib.teams.deshaw.members ++ lib.teams.helsinki-systems.members; + }; - nodes = { - target = - { - config, - pkgs, - lib, - ... - }: - { - services.target = { - enable = true; - config = { - fabric_modules = [ ]; - storage_objects = [ - { - dev = "/dev/vdb"; - name = "test"; - plugin = "block"; - write_back = true; - wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522"; - } - ]; - targets = [ - { - fabric = "iscsi"; - tpgs = [ - { - enable = true; - attributes = { - authentication = 0; - generate_node_acls = 1; - }; - luns = [ - { - alias = "94dfe06967"; - alua_tg_pt_gp_name = "default_tg_pt_gp"; - index = 0; - storage_object = "/backstores/block/test"; - } - ]; - node_acls = [ - { - mapped_luns = [ - { - alias = "d42f5bdf8a"; - index = 0; - tpg_lun = 0; - write_protect = false; - } - ]; - node_wwn = initiatorName; - } - ]; - portals = [ - { - ip_address = "[::]"; - iser = false; - offload = false; - port = 3260; - } - ]; - tag = 1; - } - ]; - wwn = targetName; - } - ]; - }; - }; - - networking.firewall.allowedTCPPorts = [ 3260 ]; - networking.firewall.allowedUDPPorts = [ 3260 ]; - - virtualisation.memorySize = 2048; - virtualisation.emptyDiskImages = [ 2048 ]; - }; - - initiatorAuto = - { - nodes, - config, - pkgs, - ... - }: - { - services.openiscsi = { - enable = true; - enableAutoLoginOut = true; - discoverPortal = "target"; - name = initiatorName; - }; - - environment.systemPackages = with pkgs; [ - xfsprogs - ]; - - system.extraDependencies = [ nodes.initiatorRootDisk.system.build.toplevel ]; - - nix.settings = { - substituters = lib.mkForce [ ]; - hashed-mirrors = null; - connect-timeout = 1; + nodes = { + target = + { + config, + pkgs, + lib, + ... + }: + { + services.target = { + enable = true; + config = { + fabric_modules = [ ]; + storage_objects = [ + { + dev = "/dev/vdb"; + name = "test"; + plugin = "block"; + write_back = true; + wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522"; + } + ]; + targets = [ + { + fabric = "iscsi"; + tpgs = [ + { + enable = true; + attributes = { + authentication = 0; + generate_node_acls = 1; + }; + luns = [ + { + alias = "94dfe06967"; + alua_tg_pt_gp_name = "default_tg_pt_gp"; + index = 0; + storage_object = "/backstores/block/test"; + } + ]; + node_acls = [ + { + mapped_luns = [ + { + alias = "d42f5bdf8a"; + index = 0; + tpg_lun = 0; + write_protect = false; + } + ]; + node_wwn = initiatorName; + } + ]; + portals = [ + { + ip_address = "[::]"; + iser = false; + offload = false; + port = 3260; + } + ]; + tag = 1; + } + ]; + wwn = targetName; + } + ]; }; }; - initiatorRootDisk = - { - config, - pkgs, - modulesPath, - lib, - ... - }: - { - boot.loader.grub.enable = false; - boot.kernelParams = lib.mkOverride 5 ([ - "boot.shell_on_fail" - "console=tty1" - "ip=${config.networking.primaryIPAddress}:::255.255.255.0::eth1:none" - ]); + networking.firewall.allowedTCPPorts = [ 3260 ]; + networking.firewall.allowedUDPPorts = [ 3260 ]; - # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store - virtualisation.writableStore = false; + virtualisation.memorySize = 2048; + virtualisation.emptyDiskImages = [ 2048 ]; + }; - fileSystems = lib.mkOverride 5 { - "/" = { - fsType = "xfs"; - device = "/dev/sda"; - options = [ "_netdev" ]; - }; - }; + initiatorAuto = + { + nodes, + config, + pkgs, + ... + }: + { + services.openiscsi = { + enable = true; + enableAutoLoginOut = true; + discoverPortal = "target"; + name = initiatorName; + }; - boot.iscsi-initiator = { - discoverPortal = "target"; - name = initiatorName; - target = targetName; + environment.systemPackages = with pkgs; [ + xfsprogs + ]; + + system.extraDependencies = [ nodes.initiatorRootDisk.system.build.toplevel ]; + + nix.settings = { + substituters = lib.mkForce [ ]; + hashed-mirrors = null; + connect-timeout = 1; + }; + }; + + initiatorRootDisk = + { + config, + pkgs, + modulesPath, + lib, + ... + }: + { + boot.loader.grub.enable = false; + boot.kernelParams = lib.mkOverride 5 ([ + "boot.shell_on_fail" + "console=tty1" + "ip=${config.networking.primaryIPAddress}:::255.255.255.0::eth1:none" + ]); + + # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store + virtualisation.writableStore = false; + + fileSystems = lib.mkOverride 5 { + "/" = { + fsType = "xfs"; + device = "/dev/sda"; + options = [ "_netdev" ]; }; }; - }; - testScript = - { nodes, ... }: - '' - target.start() - target.wait_for_unit("iscsi-target.service") + boot.iscsi-initiator = { + discoverPortal = "target"; + name = initiatorName; + target = targetName; + }; + }; + }; - initiatorAuto.start() + testScript = + { nodes, ... }: + '' + target.start() + target.wait_for_unit("iscsi-target.service") - initiatorAuto.wait_for_unit("iscsid.service") - initiatorAuto.wait_for_unit("iscsi.service") - initiatorAuto.get_unit_info("iscsi") + initiatorAuto.start() - initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done") + initiatorAuto.wait_for_unit("iscsid.service") + initiatorAuto.wait_for_unit("iscsi.service") + initiatorAuto.get_unit_info("iscsi") - initiatorAuto.succeed("mkfs.xfs /dev/sda") - initiatorAuto.succeed("mkdir /mnt && mount /dev/sda /mnt") - initiatorAuto.succeed( - "nixos-install --no-bootloader --no-root-passwd --system ${nodes.initiatorRootDisk.config.system.build.toplevel}" - ) - initiatorAuto.succeed("umount /mnt && rmdir /mnt") - initiatorAuto.shutdown() + initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done") - initiatorRootDisk.start() - initiatorRootDisk.wait_for_unit("multi-user.target") - initiatorRootDisk.wait_for_unit("iscsid") - initiatorRootDisk.succeed("touch test") - initiatorRootDisk.shutdown() - ''; - } -) + initiatorAuto.succeed("mkfs.xfs /dev/sda") + initiatorAuto.succeed("mkdir /mnt && mount /dev/sda /mnt") + initiatorAuto.succeed( + "nixos-install --no-bootloader --no-root-passwd --system ${nodes.initiatorRootDisk.config.system.build.toplevel}" + ) + initiatorAuto.succeed("umount /mnt && rmdir /mnt") + initiatorAuto.shutdown() + + initiatorRootDisk.start() + initiatorRootDisk.wait_for_unit("multi-user.target") + initiatorRootDisk.wait_for_unit("iscsid") + initiatorRootDisk.succeed("touch test") + initiatorRootDisk.shutdown() + ''; +} diff --git a/nixos/tests/isolate.nix b/nixos/tests/isolate.nix index c6f83240ad08..fe9ea18a2c6c 100644 --- a/nixos/tests/isolate.nix +++ b/nixos/tests/isolate.nix @@ -1,40 +1,38 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "isolate"; - meta.maintainers = with lib.maintainers; [ virchau13 ]; +{ lib, ... }: +{ + name = "isolate"; + meta.maintainers = with lib.maintainers; [ virchau13 ]; - nodes.machine = - { ... }: - { - security.isolate = { - enable = true; - }; + nodes.machine = + { ... }: + { + security.isolate = { + enable = true; }; + }; - testScript = '' - bash_path = machine.succeed('realpath $(which bash)').strip() - sleep_path = machine.succeed('realpath $(which sleep)').strip() - def sleep_test(walltime, sleeptime): - return f'isolate --no-default-dirs --wall-time {walltime} ' + \ - f'--dir=/box={box_path} --dir=/nix=/nix --run -- ' + \ - f"{bash_path} -c 'exec -a sleep {sleep_path} {sleeptime}'" + testScript = '' + bash_path = machine.succeed('realpath $(which bash)').strip() + sleep_path = machine.succeed('realpath $(which sleep)').strip() + def sleep_test(walltime, sleeptime): + return f'isolate --no-default-dirs --wall-time {walltime} ' + \ + f'--dir=/box={box_path} --dir=/nix=/nix --run -- ' + \ + f"{bash_path} -c 'exec -a sleep {sleep_path} {sleeptime}'" - def sleep_test_cg(walltime, sleeptime): - return f'isolate --cg --no-default-dirs --wall-time {walltime} ' + \ - f'--dir=/box={box_path} --dir=/nix=/nix --processes=2 --run -- ' + \ - f"{bash_path} -c '( exec -a sleep {sleep_path} {sleeptime} )'" + def sleep_test_cg(walltime, sleeptime): + return f'isolate --cg --no-default-dirs --wall-time {walltime} ' + \ + f'--dir=/box={box_path} --dir=/nix=/nix --processes=2 --run -- ' + \ + f"{bash_path} -c '( exec -a sleep {sleep_path} {sleeptime} )'" - with subtest("without cgroups"): - box_path = machine.succeed('isolate --init').strip() - machine.succeed(sleep_test(1, 0.5)) - machine.fail(sleep_test(0.5, 1)) - machine.succeed('isolate --cleanup') - with subtest("with cgroups"): - box_path = machine.succeed('isolate --cg --init').strip() - machine.succeed(sleep_test_cg(1, 0.5)) - machine.fail(sleep_test_cg(0.5, 1)) - machine.succeed('isolate --cg --cleanup') - ''; - } -) + with subtest("without cgroups"): + box_path = machine.succeed('isolate --init').strip() + machine.succeed(sleep_test(1, 0.5)) + machine.fail(sleep_test(0.5, 1)) + machine.succeed('isolate --cleanup') + with subtest("with cgroups"): + box_path = machine.succeed('isolate --cg --init').strip() + machine.succeed(sleep_test_cg(1, 0.5)) + machine.fail(sleep_test_cg(0.5, 1)) + machine.succeed('isolate --cg --cleanup') + ''; +} diff --git a/nixos/tests/isso.nix b/nixos/tests/isso.nix index f3af293bf75b..a5ed88044b26 100644 --- a/nixos/tests/isso.nix +++ b/nixos/tests/isso.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "isso"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, ... }: +{ + name = "isso"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes.machine = - { config, pkgs, ... }: - { - services.isso = { - enable = true; - settings = { - general = { - dbpath = "/var/lib/isso/comments.db"; - host = "http://localhost"; - }; + nodes.machine = + { config, pkgs, ... }: + { + services.isso = { + enable = true; + settings = { + general = { + dbpath = "/var/lib/isso/comments.db"; + host = "http://localhost"; }; }; }; + }; - testScript = - let - port = 8080; - in - '' - machine.wait_for_unit("isso.service") + testScript = + let + port = 8080; + in + '' + machine.wait_for_unit("isso.service") - machine.wait_for_open_port(${toString port}) + machine.wait_for_open_port(${toString port}) - machine.succeed("curl --fail http://localhost:${toString port}/?uri") - machine.succeed("curl --fail http://localhost:${toString port}/js/embed.min.js") - ''; - } -) + machine.succeed("curl --fail http://localhost:${toString port}/?uri") + machine.succeed("curl --fail http://localhost:${toString port}/js/embed.min.js") + ''; +} diff --git a/nixos/tests/jackett.nix b/nixos/tests/jackett.nix index 11931c7c96bd..9ce8209f49dd 100644 --- a/nixos/tests/jackett.nix +++ b/nixos/tests/jackett.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - let - jackettPort = 9117; - in - { - name = "jackett"; - meta.maintainers = with lib.maintainers; [ etu ]; +let + jackettPort = 9117; +in +{ + name = "jackett"; + meta.maintainers = with lib.maintainers; [ etu ]; - nodes.machine = - { pkgs, ... }: - { - services.jackett.enable = true; - services.jackett.port = jackettPort; - }; + nodes.machine = + { pkgs, ... }: + { + services.jackett.enable = true; + services.jackett.port = jackettPort; + }; - testScript = '' - machine.start() - machine.wait_for_unit("jackett.service") - machine.wait_for_open_port(${toString jackettPort}) - machine.succeed("curl --fail http://localhost:${toString jackettPort}/") - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("jackett.service") + machine.wait_for_open_port(${toString jackettPort}) + machine.succeed("curl --fail http://localhost:${toString jackettPort}/") + ''; +} diff --git a/nixos/tests/jellyfin.nix b/nixos/tests/jellyfin.nix index 2b879ff83751..6b4a962e029f 100644 --- a/nixos/tests/jellyfin.nix +++ b/nixos/tests/jellyfin.nix @@ -1,159 +1,157 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "jellyfin"; - meta.maintainers = with lib.maintainers; [ minijackson ]; +{ + name = "jellyfin"; + meta.maintainers = with lib.maintainers; [ minijackson ]; - nodes.machine = - { ... }: - { - services.jellyfin.enable = true; - environment.systemPackages = with pkgs; [ ffmpeg ]; + nodes.machine = + { ... }: + { + services.jellyfin.enable = true; + environment.systemPackages = with pkgs; [ ffmpeg ]; + }; + + # Documentation of the Jellyfin API: https://api.jellyfin.org/ + # Beware, this link can be resource intensive + testScript = + let + payloads = { + auth = pkgs.writeText "auth.json" ( + builtins.toJSON { + Username = "jellyfin"; + } + ); + empty = pkgs.writeText "empty.json" (builtins.toJSON { }); }; + in + '' + import json + from urllib.parse import urlencode - # Documentation of the Jellyfin API: https://api.jellyfin.org/ - # Beware, this link can be resource intensive - testScript = - let - payloads = { - auth = pkgs.writeText "auth.json" ( - builtins.toJSON { - Username = "jellyfin"; - } - ); - empty = pkgs.writeText "empty.json" (builtins.toJSON { }); - }; - in - '' - import json - from urllib.parse import urlencode + machine.wait_for_unit("jellyfin.service") + machine.wait_for_open_port(8096) + machine.succeed("curl --fail http://localhost:8096/") - machine.wait_for_unit("jellyfin.service") - machine.wait_for_open_port(8096) - machine.succeed("curl --fail http://localhost:8096/") + machine.wait_until_succeeds("curl --fail http://localhost:8096/health | grep Healthy") - machine.wait_until_succeeds("curl --fail http://localhost:8096/health | grep Healthy") - - auth_header = 'MediaBrowser Client="NixOS Integration Tests", DeviceId="1337", Device="Apple II", Version="20.09"' + auth_header = 'MediaBrowser Client="NixOS Integration Tests", DeviceId="1337", Device="Apple II", Version="20.09"' - def api_get(path): - return f"curl --fail 'http://localhost:8096{path}' -H 'X-Emby-Authorization:{auth_header}'" + def api_get(path): + return f"curl --fail 'http://localhost:8096{path}' -H 'X-Emby-Authorization:{auth_header}'" - def api_post(path, json_file=None): - if json_file: - return f"curl --fail -X post 'http://localhost:8096{path}' -d '@{json_file}' -H Content-Type:application/json -H 'X-Emby-Authorization:{auth_header}'" - else: - return f"curl --fail -X post 'http://localhost:8096{path}' -H 'X-Emby-Authorization:{auth_header}'" + def api_post(path, json_file=None): + if json_file: + return f"curl --fail -X post 'http://localhost:8096{path}' -d '@{json_file}' -H Content-Type:application/json -H 'X-Emby-Authorization:{auth_header}'" + else: + return f"curl --fail -X post 'http://localhost:8096{path}' -H 'X-Emby-Authorization:{auth_header}'" - with machine.nested("Wizard completes"): - machine.wait_until_succeeds(api_get("/Startup/Configuration")) - machine.succeed(api_get("/Startup/FirstUser")) - machine.succeed(api_post("/Startup/Complete")) + with machine.nested("Wizard completes"): + machine.wait_until_succeeds(api_get("/Startup/Configuration")) + machine.succeed(api_get("/Startup/FirstUser")) + machine.succeed(api_post("/Startup/Complete")) - with machine.nested("Can login"): - auth_result_str = machine.succeed( - api_post( - "/Users/AuthenticateByName", - "${payloads.auth}", - ) - ) - auth_result = json.loads(auth_result_str) - auth_token = auth_result["AccessToken"] - auth_header += f", Token={auth_token}" + with machine.nested("Can login"): + auth_result_str = machine.succeed( + api_post( + "/Users/AuthenticateByName", + "${payloads.auth}", + ) + ) + auth_result = json.loads(auth_result_str) + auth_token = auth_result["AccessToken"] + auth_header += f", Token={auth_token}" - sessions_result_str = machine.succeed(api_get("/Sessions")) - sessions_result = json.loads(sessions_result_str) + sessions_result_str = machine.succeed(api_get("/Sessions")) + sessions_result = json.loads(sessions_result_str) - this_session = [ - session for session in sessions_result if session["DeviceId"] == "1337" - ] - if len(this_session) != 1: - raise Exception("Session not created") + this_session = [ + session for session in sessions_result if session["DeviceId"] == "1337" + ] + if len(this_session) != 1: + raise Exception("Session not created") - me_str = machine.succeed(api_get("/Users/Me")) - me = json.loads(me_str)["Id"] + me_str = machine.succeed(api_get("/Users/Me")) + me = json.loads(me_str)["Id"] - with machine.nested("Can add library"): - tempdir = machine.succeed("mktemp -d -p /var/lib/jellyfin").strip() - machine.succeed(f"chmod 755 '{tempdir}'") + with machine.nested("Can add library"): + tempdir = machine.succeed("mktemp -d -p /var/lib/jellyfin").strip() + machine.succeed(f"chmod 755 '{tempdir}'") - # Generate a dummy video that we can test later - videofile = f"{tempdir}/Big Buck Bunny (2008) [1080p].mkv" - machine.succeed(f"ffmpeg -f lavfi -i testsrc2=duration=5 '{videofile}'") + # Generate a dummy video that we can test later + videofile = f"{tempdir}/Big Buck Bunny (2008) [1080p].mkv" + machine.succeed(f"ffmpeg -f lavfi -i testsrc2=duration=5 '{videofile}'") - add_folder_query = urlencode( - { - "name": "My Library", - "collectionType": "Movies", - "paths": tempdir, - "refreshLibrary": "true", - } - ) + add_folder_query = urlencode( + { + "name": "My Library", + "collectionType": "Movies", + "paths": tempdir, + "refreshLibrary": "true", + } + ) - machine.succeed( - api_post( - f"/Library/VirtualFolders?{add_folder_query}", - "${payloads.empty}", - ) - ) + machine.succeed( + api_post( + f"/Library/VirtualFolders?{add_folder_query}", + "${payloads.empty}", + ) + ) - def is_refreshed(_): - folders_str = machine.succeed(api_get("/Library/VirtualFolders")) - folders = json.loads(folders_str) - print(folders) - return all(folder["RefreshStatus"] == "Idle" for folder in folders) + def is_refreshed(_): + folders_str = machine.succeed(api_get("/Library/VirtualFolders")) + folders = json.loads(folders_str) + print(folders) + return all(folder["RefreshStatus"] == "Idle" for folder in folders) - retry(is_refreshed) + retry(is_refreshed) - with machine.nested("Can identify videos"): - items = [] + with machine.nested("Can identify videos"): + items = [] - # For some reason, having the folder refreshed doesn't mean the - # movie was scanned - def has_movie(_): - global items + # For some reason, having the folder refreshed doesn't mean the + # movie was scanned + def has_movie(_): + global items - items_str = machine.succeed( - api_get(f"/Users/{me}/Items?IncludeItemTypes=Movie&Recursive=true") - ) - items = json.loads(items_str)["Items"] + items_str = machine.succeed( + api_get(f"/Users/{me}/Items?IncludeItemTypes=Movie&Recursive=true") + ) + items = json.loads(items_str)["Items"] - return len(items) == 1 + return len(items) == 1 - retry(has_movie) + retry(has_movie) - video = items[0]["Id"] + video = items[0]["Id"] - item_info_str = machine.succeed(api_get(f"/Users/{me}/Items/{video}")) - item_info = json.loads(item_info_str) + item_info_str = machine.succeed(api_get(f"/Users/{me}/Items/{video}")) + item_info = json.loads(item_info_str) - if item_info["Name"] != "Big Buck Bunny": - raise Exception("Jellyfin failed to properly identify file") + if item_info["Name"] != "Big Buck Bunny": + raise Exception("Jellyfin failed to properly identify file") - with machine.nested("Can read videos"): - media_source_id = item_info["MediaSources"][0]["Id"] + with machine.nested("Can read videos"): + media_source_id = item_info["MediaSources"][0]["Id"] - machine.succeed( - "ffmpeg" - + f" -headers 'X-Emby-Authorization:{auth_header}'" - + f" -i http://localhost:8096/Videos/{video}/master.m3u8?mediaSourceId={media_source_id}" - + " /tmp/test.mkv" - ) + machine.succeed( + "ffmpeg" + + f" -headers 'X-Emby-Authorization:{auth_header}'" + + f" -i http://localhost:8096/Videos/{video}/master.m3u8?mediaSourceId={media_source_id}" + + " /tmp/test.mkv" + ) - duration = machine.succeed( - "ffprobe /tmp/test.mkv" - + " -show_entries format=duration" - + " -of compact=print_section=0:nokey=1" - ) + duration = machine.succeed( + "ffprobe /tmp/test.mkv" + + " -show_entries format=duration" + + " -of compact=print_section=0:nokey=1" + ) - if duration.strip() != "5.000000": - raise Exception("Downloaded video has wrong duration") - ''; - } -) + if duration.strip() != "5.000000": + raise Exception("Downloaded video has wrong duration") + ''; +} diff --git a/nixos/tests/jenkins-cli.nix b/nixos/tests/jenkins-cli.nix index fdd2767cf041..69167ffa3a6f 100644 --- a/nixos/tests/jenkins-cli.nix +++ b/nixos/tests/jenkins-cli.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - rec { - name = "jenkins-cli"; - meta = with pkgs.lib.maintainers; { - maintainers = [ pamplemousse ]; - }; +{ pkgs, ... }: +rec { + name = "jenkins-cli"; + meta = with pkgs.lib.maintainers; { + maintainers = [ pamplemousse ]; + }; - nodes = { - machine = - { ... }: - { - services.jenkins = { - enable = true; - withCLI = true; - }; + nodes = { + machine = + { ... }: + { + services.jenkins = { + enable = true; + withCLI = true; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("jenkins") + machine.wait_for_unit("jenkins") - assert "JENKINS_URL" in machine.succeed("env") - assert "http://0.0.0.0:8080" in machine.succeed("echo $JENKINS_URL") + assert "JENKINS_URL" in machine.succeed("env") + assert "http://0.0.0.0:8080" in machine.succeed("echo $JENKINS_URL") - machine.succeed( - "jenkins-cli -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword)" - ) - ''; - } -) + machine.succeed( + "jenkins-cli -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword)" + ) + ''; +} diff --git a/nixos/tests/jenkins.nix b/nixos/tests/jenkins.nix index cd82214fb3ac..7a8e4ea4d0b4 100644 --- a/nixos/tests/jenkins.nix +++ b/nixos/tests/jenkins.nix @@ -4,140 +4,138 @@ # 3. jenkins service not started on slave node # 4. declarative jobs can be added and removed -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "jenkins"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - bjornfor - domenkozar - ]; - }; +{ pkgs, ... }: +{ + name = "jenkins"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + bjornfor + domenkozar + ]; + }; - nodes = { + nodes = { - master = - { ... }: - { - services.jenkins = { + master = + { ... }: + { + services.jenkins = { + enable = true; + jobBuilder = { enable = true; - jobBuilder = { - enable = true; - nixJobs = [ - { - job = { - name = "job-1"; - builders = [ - { - shell = '' - echo "Running job-1" - ''; - } - ]; - }; - } + nixJobs = [ + { + job = { + name = "job-1"; + builders = [ + { + shell = '' + echo "Running job-1" + ''; + } + ]; + }; + } - { - job = { - name = "folder-1"; - project-type = "folder"; - }; - } + { + job = { + name = "folder-1"; + project-type = "folder"; + }; + } - { - job = { - name = "folder-1/job-2"; - builders = [ - { - shell = '' - echo "Running job-2" - ''; - } - ]; - }; - } - ]; - }; + { + job = { + name = "folder-1/job-2"; + builders = [ + { + shell = '' + echo "Running job-2" + ''; + } + ]; + }; + } + ]; }; - - specialisation.noJenkinsJobs.configuration = { - services.jenkins.jobBuilder.nixJobs = pkgs.lib.mkForce [ ]; - }; - - # should have no effect - services.jenkinsSlave.enable = true; - - users.users.jenkins.extraGroups = [ "users" ]; - - systemd.services.jenkins.serviceConfig.TimeoutStartSec = "6min"; - - # Increase disk space to prevent this issue: - # - # WARNING h.n.DiskSpaceMonitorDescriptor#markNodeOfflineOrOnline: Making Built-In Node offline temporarily due to the lack of disk space - virtualisation.diskSize = 2 * 1024; }; - slave = - { ... }: - { - services.jenkinsSlave.enable = true; - - users.users.jenkins.extraGroups = [ "users" ]; + specialisation.noJenkinsJobs.configuration = { + services.jenkins.jobBuilder.nixJobs = pkgs.lib.mkForce [ ]; }; - }; + # should have no effect + services.jenkinsSlave.enable = true; - testScript = - { nodes, ... }: - let - configWithoutJobs = "${nodes.master.system.build.toplevel}/specialisation/noJenkinsJobs"; - jenkinsPort = nodes.master.services.jenkins.port; - jenkinsUrl = "http://localhost:${toString jenkinsPort}"; - in - '' - start_all() + users.users.jenkins.extraGroups = [ "users" ]; - master.wait_for_unit("default.target") + systemd.services.jenkins.serviceConfig.TimeoutStartSec = "6min"; - assert "Authentication required" in master.succeed("curl http://localhost:8080") + # Increase disk space to prevent this issue: + # + # WARNING h.n.DiskSpaceMonitorDescriptor#markNodeOfflineOrOnline: Making Built-In Node offline temporarily due to the lack of disk space + virtualisation.diskSize = 2 * 1024; + }; - for host in master, slave: - groups = host.succeed("sudo -u jenkins groups") - assert "jenkins" in groups - assert "users" in groups + slave = + { ... }: + { + services.jenkinsSlave.enable = true; - slave.fail("systemctl is-enabled jenkins.service") + users.users.jenkins.extraGroups = [ "users" ]; + }; - slave.succeed("java -fullversion") + }; - with subtest("jobs are declarative"): - # Check that jobs are created on disk. - master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/job-1/config.xml") - master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/config.xml") - master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml") + testScript = + { nodes, ... }: + let + configWithoutJobs = "${nodes.master.system.build.toplevel}/specialisation/noJenkinsJobs"; + jenkinsPort = nodes.master.services.jenkins.port; + jenkinsUrl = "http://localhost:${toString jenkinsPort}"; + in + '' + start_all() - # Verify that jenkins also sees the jobs. - out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs") - jobs = [x.strip() for x in out.splitlines()] - # Seeing jobs inside folders requires the Folders plugin - # (https://plugins.jenkins.io/cloudbees-folder/), which we don't have - # in this vanilla jenkins install, so limit ourself to non-folder jobs. - assert jobs == ['job-1'], f"jobs != ['job-1']: {jobs}" + master.wait_for_unit("default.target") - master.succeed( - "${configWithoutJobs}/bin/switch-to-configuration test >&2" - ) + assert "Authentication required" in master.succeed("curl http://localhost:8080") - # Check that jobs are removed from disk. - master.wait_until_fails("test -f /var/lib/jenkins/jobs/job-1/config.xml") - master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/config.xml") - master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml") + for host in master, slave: + groups = host.succeed("sudo -u jenkins groups") + assert "jenkins" in groups + assert "users" in groups - # Verify that jenkins also sees the jobs as removed. - out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs") - jobs = [x.strip() for x in out.splitlines()] - assert jobs == [], f"jobs != []: {jobs}" - ''; - } -) + slave.fail("systemctl is-enabled jenkins.service") + + slave.succeed("java -fullversion") + + with subtest("jobs are declarative"): + # Check that jobs are created on disk. + master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/job-1/config.xml") + master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/config.xml") + master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml") + + # Verify that jenkins also sees the jobs. + out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs") + jobs = [x.strip() for x in out.splitlines()] + # Seeing jobs inside folders requires the Folders plugin + # (https://plugins.jenkins.io/cloudbees-folder/), which we don't have + # in this vanilla jenkins install, so limit ourself to non-folder jobs. + assert jobs == ['job-1'], f"jobs != ['job-1']: {jobs}" + + master.succeed( + "${configWithoutJobs}/bin/switch-to-configuration test >&2" + ) + + # Check that jobs are removed from disk. + master.wait_until_fails("test -f /var/lib/jenkins/jobs/job-1/config.xml") + master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/config.xml") + master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml") + + # Verify that jenkins also sees the jobs as removed. + out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs") + jobs = [x.strip() for x in out.splitlines()] + assert jobs == [], f"jobs != []: {jobs}" + ''; +} diff --git a/nixos/tests/jibri.nix b/nixos/tests/jibri.nix index d42e82bf1f47..2bfc41ed188d 100644 --- a/nixos/tests/jibri.nix +++ b/nixos/tests/jibri.nix @@ -1,74 +1,72 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "jibri"; - meta = with pkgs.lib; { - maintainers = teams.jitsi.members; - }; +{ pkgs, ... }: +{ + name = "jibri"; + meta = with pkgs.lib; { + maintainers = teams.jitsi.members; + }; - nodes.machine = - { config, pkgs, ... }: - { - virtualisation.memorySize = 5120; + nodes.machine = + { config, pkgs, ... }: + { + virtualisation.memorySize = 5120; - services.jitsi-meet = { - enable = true; - hostName = "machine"; - jibri.enable = true; - }; - services.jibri.ignoreCert = true; - services.jitsi-videobridge.openFirewall = true; + services.jitsi-meet = { + enable = true; + hostName = "machine"; + jibri.enable = true; + }; + services.jibri.ignoreCert = true; + services.jitsi-videobridge.openFirewall = true; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; - services.nginx.virtualHosts.machine = { - enableACME = true; - forceSSL = true; - }; - - security.acme.defaults.email = "me@example.org"; - security.acme.acceptTerms = true; - security.acme.defaults.server = "https://example.com"; # self-signed only + services.nginx.virtualHosts.machine = { + enableACME = true; + forceSSL = true; }; - testScript = '' - machine.wait_for_unit("jitsi-videobridge2.service") - machine.wait_for_unit("jicofo.service") - machine.wait_for_unit("nginx.service") - machine.wait_for_unit("prosody.service") - machine.wait_for_unit("jibri.service") + security.acme.defaults.email = "me@example.org"; + security.acme.acceptTerms = true; + security.acme.defaults.server = "https://example.com"; # self-signed only + }; - machine.wait_until_succeeds( - "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.machine'", timeout=31 - ) - machine.wait_until_succeeds( - "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jvb@auth.machine'", timeout=32 - ) - machine.wait_until_succeeds( - "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jibri@auth.machine'", timeout=33 - ) - machine.wait_until_succeeds( - "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'Joined MUC: jibribrewery@internal.auth.machine'", timeout=34 - ) + testScript = '' + machine.wait_for_unit("jitsi-videobridge2.service") + machine.wait_for_unit("jicofo.service") + machine.wait_for_unit("nginx.service") + machine.wait_for_unit("prosody.service") + machine.wait_for_unit("jibri.service") - assert '"busyStatus":"IDLE","health":{"healthStatus":"HEALTHY"' in machine.succeed( - "curl -X GET http://machine:2222/jibri/api/v1.0/health" - ) - machine.succeed( - """curl -H "Content-Type: application/json" -X POST http://localhost:2222/jibri/api/v1.0/startService -d '{"sessionId": "RecordTest","callParams":{"callUrlInfo":{"baseUrl": "https://machine","callName": "TestCall"}},"callLoginParams":{"domain": "recorder.machine", "username": "recorder", "password": "'"$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"'" },"sinkType": "file"}'""" - ) - machine.wait_until_succeeds( - "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'File recording service transitioning from state Starting up to Running'", timeout=35 - ) - machine.succeed( - """sleep 15 && curl -H "Content-Type: application/json" -X POST http://localhost:2222/jibri/api/v1.0/stopService -d '{"sessionId": "RecordTest","callParams":{"callUrlInfo":{"baseUrl": "https://machine","callName": "TestCall"}},"callLoginParams":{"domain": "recorder.machine", "username": "recorder", "password": "'"$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"'" },"sinkType": "file"}'""" - ) - machine.wait_until_succeeds( - "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'Finalize script finished with exit value 0'", timeout=36 - ) - ''; - } -) + machine.wait_until_succeeds( + "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.machine'", timeout=31 + ) + machine.wait_until_succeeds( + "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jvb@auth.machine'", timeout=32 + ) + machine.wait_until_succeeds( + "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jibri@auth.machine'", timeout=33 + ) + machine.wait_until_succeeds( + "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'Joined MUC: jibribrewery@internal.auth.machine'", timeout=34 + ) + + assert '"busyStatus":"IDLE","health":{"healthStatus":"HEALTHY"' in machine.succeed( + "curl -X GET http://machine:2222/jibri/api/v1.0/health" + ) + machine.succeed( + """curl -H "Content-Type: application/json" -X POST http://localhost:2222/jibri/api/v1.0/startService -d '{"sessionId": "RecordTest","callParams":{"callUrlInfo":{"baseUrl": "https://machine","callName": "TestCall"}},"callLoginParams":{"domain": "recorder.machine", "username": "recorder", "password": "'"$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"'" },"sinkType": "file"}'""" + ) + machine.wait_until_succeeds( + "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'File recording service transitioning from state Starting up to Running'", timeout=35 + ) + machine.succeed( + """sleep 15 && curl -H "Content-Type: application/json" -X POST http://localhost:2222/jibri/api/v1.0/stopService -d '{"sessionId": "RecordTest","callParams":{"callUrlInfo":{"baseUrl": "https://machine","callName": "TestCall"}},"callLoginParams":{"domain": "recorder.machine", "username": "recorder", "password": "'"$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"'" },"sinkType": "file"}'""" + ) + machine.wait_until_succeeds( + "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'Finalize script finished with exit value 0'", timeout=36 + ) + ''; +} diff --git a/nixos/tests/jirafeau.nix b/nixos/tests/jirafeau.nix index 6cace65ed6f3..ebbd637a13ec 100644 --- a/nixos/tests/jirafeau.nix +++ b/nixos/tests/jirafeau.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "jirafeau"; - meta.maintainers = [ ]; +{ + name = "jirafeau"; + meta.maintainers = [ ]; - nodes.machine = - { pkgs, ... }: - { - services.jirafeau = { - enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.jirafeau = { + enable = true; }; + }; - testScript = '' - machine.start() - machine.wait_for_unit("phpfpm-jirafeau.service") - machine.wait_for_unit("nginx.service") - machine.wait_for_open_port(80) - machine.succeed("curl -sSfL http://localhost/ | grep 'Jirafeau'") - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("phpfpm-jirafeau.service") + machine.wait_for_unit("nginx.service") + machine.wait_for_open_port(80) + machine.succeed("curl -sSfL http://localhost/ | grep 'Jirafeau'") + ''; +} diff --git a/nixos/tests/jitsi-meet.nix b/nixos/tests/jitsi-meet.nix index a9934700ecf3..2181ed863a88 100644 --- a/nixos/tests/jitsi-meet.nix +++ b/nixos/tests/jitsi-meet.nix @@ -1,80 +1,78 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "jitsi-meet"; - meta = with pkgs.lib; { - maintainers = teams.jitsi.members; - }; +{ pkgs, ... }: +{ + name = "jitsi-meet"; + meta = with pkgs.lib; { + maintainers = teams.jitsi.members; + }; - nodes = { - client = - { nodes, pkgs, ... }: - { + nodes = { + client = + { nodes, pkgs, ... }: + { + }; + server = + { config, pkgs, ... }: + { + services.jitsi-meet = { + enable = true; + hostName = "server"; }; - server = - { config, pkgs, ... }: - { - services.jitsi-meet = { - enable = true; - hostName = "server"; - }; - services.jitsi-videobridge.openFirewall = true; + services.jitsi-videobridge.openFirewall = true; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; - services.nginx.virtualHosts.server = { - enableACME = true; - forceSSL = true; - }; + services.nginx.virtualHosts.server = { + enableACME = true; + forceSSL = true; + }; - security.acme.acceptTerms = true; - security.acme.defaults.email = "me@example.org"; - security.acme.defaults.server = "https://example.com"; # self-signed only + security.acme.acceptTerms = true; + security.acme.defaults.email = "me@example.org"; + security.acme.defaults.server = "https://example.com"; # self-signed only - specialisation.caddy = { - inheritParentConfig = true; - configuration = { - services.jitsi-meet = { - caddy.enable = true; - nginx.enable = false; - }; - services.caddy.virtualHosts.${config.services.jitsi-meet.hostName}.extraConfig = '' - tls internal - ''; + specialisation.caddy = { + inheritParentConfig = true; + configuration = { + services.jitsi-meet = { + caddy.enable = true; + nginx.enable = false; }; + services.caddy.virtualHosts.${config.services.jitsi-meet.hostName}.extraConfig = '' + tls internal + ''; }; }; - }; + }; + }; - testScript = - { nodes, ... }: - '' - server.wait_for_unit("jitsi-videobridge2.service") - server.wait_for_unit("jicofo.service") - server.wait_for_unit("nginx.service") - server.wait_for_unit("prosody.service") + testScript = + { nodes, ... }: + '' + server.wait_for_unit("jitsi-videobridge2.service") + server.wait_for_unit("jicofo.service") + server.wait_for_unit("nginx.service") + server.wait_for_unit("prosody.service") - server.wait_until_succeeds( - "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.server'" - ) - server.wait_until_succeeds( - "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jvb@auth.server'" - ) + server.wait_until_succeeds( + "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.server'" + ) + server.wait_until_succeeds( + "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jvb@auth.server'" + ) - client.wait_for_unit("network.target") + client.wait_for_unit("network.target") - def client_curl(): - assert "Jitsi Meet" in client.succeed("curl -sSfkL http://server/") + def client_curl(): + assert "Jitsi Meet" in client.succeed("curl -sSfkL http://server/") - client_curl() + client_curl() - with subtest("Testing backup service"): - server.succeed("${nodes.server.system.build.toplevel}/specialisation/caddy/bin/switch-to-configuration test") - server.wait_for_unit("caddy.service") - client_curl() - ''; - } -) + with subtest("Testing backup service"): + server.succeed("${nodes.server.system.build.toplevel}/specialisation/caddy/bin/switch-to-configuration test") + server.wait_for_unit("caddy.service") + client_curl() + ''; +} diff --git a/nixos/tests/jotta-cli.nix b/nixos/tests/jotta-cli.nix index 27f576f93a53..caf3ebeaad78 100644 --- a/nixos/tests/jotta-cli.nix +++ b/nixos/tests/jotta-cli.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { +{ pkgs, ... }: +{ - name = "jotta-cli"; - meta.maintainers = with pkgs.lib.maintainers; [ evenbrenden ]; + name = "jotta-cli"; + meta.maintainers = with pkgs.lib.maintainers; [ evenbrenden ]; - nodes.machine = - { pkgs, ... }: - { - services.jotta-cli.enable = true; - imports = [ ./common/user-account.nix ]; - }; + nodes.machine = + { pkgs, ... }: + { + services.jotta-cli.enable = true; + imports = [ ./common/user-account.nix ]; + }; - testScript = - { nodes, ... }: - let - uid = toString nodes.machine.users.users.alice.uid; - in - '' - machine.start() + testScript = + { nodes, ... }: + let + uid = toString nodes.machine.users.users.alice.uid; + in + '' + machine.start() - machine.succeed("loginctl enable-linger alice") - machine.wait_for_unit("user@${uid}.service") + machine.succeed("loginctl enable-linger alice") + machine.wait_for_unit("user@${uid}.service") - machine.wait_for_unit("jottad.service", "alice") - machine.wait_for_open_unix_socket("/run/user/${uid}/jottad/jottad.socket") + machine.wait_for_unit("jottad.service", "alice") + machine.wait_for_open_unix_socket("/run/user/${uid}/jottad/jottad.socket") - # "jotta-cli version" should fail if jotta-cli cannot connect to jottad - machine.succeed('XDG_RUNTIME_DIR=/run/user/${uid} su alice -c "jotta-cli version"') - ''; - } -) + # "jotta-cli version" should fail if jotta-cli cannot connect to jottad + machine.succeed('XDG_RUNTIME_DIR=/run/user/${uid} su alice -c "jotta-cli version"') + ''; +} diff --git a/nixos/tests/kanidm-provisioning.nix b/nixos/tests/kanidm-provisioning.nix index 8f0ca0ec0859..c6e32f35ed08 100644 --- a/nixos/tests/kanidm-provisioning.nix +++ b/nixos/tests/kanidm-provisioning.nix @@ -1,522 +1,520 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - certs = import ./common/acme/server/snakeoil-certs.nix; - serverDomain = certs.domain; +{ pkgs, ... }: +let + certs = import ./common/acme/server/snakeoil-certs.nix; + serverDomain = certs.domain; - # copy certs to store to work around mount namespacing - certsPath = pkgs.runCommandNoCC "snakeoil-certs" { } '' - mkdir $out - cp ${certs."${serverDomain}".cert} $out/snakeoil.crt - cp ${certs."${serverDomain}".key} $out/snakeoil.key - ''; + # copy certs to store to work around mount namespacing + certsPath = pkgs.runCommandNoCC "snakeoil-certs" { } '' + mkdir $out + cp ${certs."${serverDomain}".cert} $out/snakeoil.crt + cp ${certs."${serverDomain}".key} $out/snakeoil.key + ''; - provisionAdminPassword = "very-strong-password-for-admin"; - provisionIdmAdminPassword = "very-strong-password-for-idm-admin"; - provisionIdmAdminPassword2 = "very-strong-alternative-password-for-idm-admin"; - in - { - name = "kanidm-provisioning"; - meta.maintainers = with pkgs.lib.maintainers; [ oddlama ]; + provisionAdminPassword = "very-strong-password-for-admin"; + provisionIdmAdminPassword = "very-strong-password-for-idm-admin"; + provisionIdmAdminPassword2 = "very-strong-alternative-password-for-idm-admin"; +in +{ + name = "kanidm-provisioning"; + meta.maintainers = with pkgs.lib.maintainers; [ oddlama ]; - nodes.provision = - { pkgs, lib, ... }: - { - services.kanidm = { - package = pkgs.kanidmWithSecretProvisioning_1_6; - enableServer = true; - serverSettings = { - origin = "https://${serverDomain}"; - domain = serverDomain; - bindaddress = "[::]:443"; - ldapbindaddress = "[::1]:636"; - tls_chain = "${certsPath}/snakeoil.crt"; - tls_key = "${certsPath}/snakeoil.key"; - }; - # So we can check whether provisioning did what we wanted - enableClient = true; - clientSettings = { - uri = "https://${serverDomain}"; - verify_ca = true; - verify_hostnames = true; + nodes.provision = + { pkgs, lib, ... }: + { + services.kanidm = { + package = pkgs.kanidmWithSecretProvisioning_1_6; + enableServer = true; + serverSettings = { + origin = "https://${serverDomain}"; + domain = serverDomain; + bindaddress = "[::]:443"; + ldapbindaddress = "[::1]:636"; + tls_chain = "${certsPath}/snakeoil.crt"; + tls_key = "${certsPath}/snakeoil.key"; + }; + # So we can check whether provisioning did what we wanted + enableClient = true; + clientSettings = { + uri = "https://${serverDomain}"; + verify_ca = true; + verify_hostnames = true; + }; + }; + + specialisation.credentialProvision.configuration = + { ... }: + { + services.kanidm.provision = lib.mkForce { + enable = true; + adminPasswordFile = pkgs.writeText "admin-pw" provisionAdminPassword; + idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; }; }; - specialisation.credentialProvision.configuration = - { ... }: - { - services.kanidm.provision = lib.mkForce { - enable = true; - adminPasswordFile = pkgs.writeText "admin-pw" provisionAdminPassword; - idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; - }; + specialisation.changedCredential.configuration = + { ... }: + { + services.kanidm.provision = lib.mkForce { + enable = true; + idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword2; }; + }; - specialisation.changedCredential.configuration = - { ... }: - { - services.kanidm.provision = lib.mkForce { - enable = true; - idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword2; + specialisation.addEntities.configuration = + { ... }: + { + services.kanidm.provision = lib.mkForce { + enable = true; + # Test whether credential recovery works without specific idmAdmin password + #idmAdminPasswordFile = + + groups.supergroup1 = { + members = [ "testgroup1" ]; }; - }; - specialisation.addEntities.configuration = - { ... }: - { - services.kanidm.provision = lib.mkForce { - enable = true; - # Test whether credential recovery works without specific idmAdmin password - #idmAdminPasswordFile = + groups.testgroup1 = { }; - groups.supergroup1 = { - members = [ "testgroup1" ]; - }; + persons.testuser1 = { + displayName = "Test User"; + legalName = "Jane Doe"; + mailAddresses = [ "jane.doe@example.com" ]; + groups = [ + "testgroup1" + "service1-access" + ]; + }; - groups.testgroup1 = { }; + persons.testuser2 = { + displayName = "Powerful Test User"; + legalName = "Ryouiki Tenkai"; + groups = [ "service1-admin" ]; + }; - persons.testuser1 = { - displayName = "Test User"; - legalName = "Jane Doe"; - mailAddresses = [ "jane.doe@example.com" ]; - groups = [ - "testgroup1" - "service1-access" - ]; - }; - - persons.testuser2 = { - displayName = "Powerful Test User"; - legalName = "Ryouiki Tenkai"; - groups = [ "service1-admin" ]; - }; - - groups.service1-access = { }; - groups.service1-admin = { }; - systems.oauth2.service1 = { - displayName = "Service One"; - originUrl = "https://one.example.com/"; - originLanding = "https://one.example.com/landing"; - basicSecretFile = pkgs.writeText "bs-service1" "very-strong-secret-for-service1"; - scopeMaps.service1-access = [ - "openid" - "email" - "profile" - ]; - supplementaryScopeMaps.service1-admin = [ "admin" ]; - claimMaps.groups = { - valuesByGroup.service1-admin = [ "admin" ]; - }; - }; - - systems.oauth2.service2 = { - displayName = "Service Two"; - originUrl = "https://two.example.com/"; - originLanding = "https://landing2.example.com/"; - # Test not setting secret - # basicSecretFile = - allowInsecureClientDisablePkce = true; - preferShortUsername = true; + groups.service1-access = { }; + groups.service1-admin = { }; + systems.oauth2.service1 = { + displayName = "Service One"; + originUrl = "https://one.example.com/"; + originLanding = "https://one.example.com/landing"; + basicSecretFile = pkgs.writeText "bs-service1" "very-strong-secret-for-service1"; + scopeMaps.service1-access = [ + "openid" + "email" + "profile" + ]; + supplementaryScopeMaps.service1-admin = [ "admin" ]; + claimMaps.groups = { + valuesByGroup.service1-admin = [ "admin" ]; }; }; - }; - specialisation.changeAttributes.configuration = - { ... }: - { - services.kanidm.provision = lib.mkForce { - enable = true; - # Changing admin credentials at any time should not be a problem: - idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; - - groups.supergroup1 = { - #members = ["testgroup1"]; - }; - - groups.testgroup1 = { }; - - persons.testuser1 = { - displayName = "Test User (changed)"; - legalName = "Jane Doe (changed)"; - mailAddresses = [ - "jane.doe@example.com" - "second.doe@example.com" - ]; - groups = [ - #"testgroup1" - "service1-access" - ]; - }; - - persons.testuser2 = { - displayName = "Powerful Test User (changed)"; - legalName = "Ryouiki Tenkai (changed)"; - groups = [ "service1-admin" ]; - }; - - groups.service1-access = { }; - groups.service1-admin = { }; - systems.oauth2.service1 = { - displayName = "Service One (changed)"; - # multiple origin urls - originUrl = [ - "https://changed-one.example.com/" - "https://changed-one.example.org/" - ]; - originLanding = "https://changed-one.example.com/landing-changed"; - basicSecretFile = pkgs.writeText "bs-service1" "changed-very-strong-secret-for-service1"; - scopeMaps.service1-access = [ - "openid" - "email" - #"profile" - ]; - supplementaryScopeMaps.service1-admin = [ "adminchanged" ]; - claimMaps.groups = { - valuesByGroup.service1-admin = [ "adminchanged" ]; - }; - }; - - systems.oauth2.service2 = { - displayName = "Service Two (changed)"; - originUrl = "https://changed-two.example.com/"; - originLanding = "https://changed-landing2.example.com/"; - # Test not setting secret - # basicSecretFile = - allowInsecureClientDisablePkce = false; - preferShortUsername = false; - }; + systems.oauth2.service2 = { + displayName = "Service Two"; + originUrl = "https://two.example.com/"; + originLanding = "https://landing2.example.com/"; + # Test not setting secret + # basicSecretFile = + allowInsecureClientDisablePkce = true; + preferShortUsername = true; }; }; + }; - specialisation.removeAttributes.configuration = - { ... }: - { - services.kanidm.provision = lib.mkForce { - enable = true; - idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; + specialisation.changeAttributes.configuration = + { ... }: + { + services.kanidm.provision = lib.mkForce { + enable = true; + # Changing admin credentials at any time should not be a problem: + idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; - groups.supergroup1 = { }; + groups.supergroup1 = { + #members = ["testgroup1"]; + }; - persons.testuser1 = { - displayName = "Test User (changed)"; - }; + groups.testgroup1 = { }; - persons.testuser2 = { - displayName = "Powerful Test User (changed)"; - groups = [ "service1-admin" ]; - }; + persons.testuser1 = { + displayName = "Test User (changed)"; + legalName = "Jane Doe (changed)"; + mailAddresses = [ + "jane.doe@example.com" + "second.doe@example.com" + ]; + groups = [ + #"testgroup1" + "service1-access" + ]; + }; - groups.service1-access = { }; - groups.service1-admin = { }; - systems.oauth2.service1 = { - displayName = "Service One (changed)"; - originUrl = "https://changed-one.example.com/"; - originLanding = "https://changed-one.example.com/landing-changed"; - basicSecretFile = pkgs.writeText "bs-service1" "changed-very-strong-secret-for-service1"; - # Removing maps requires setting them to the empty list - scopeMaps.service1-access = [ ]; - supplementaryScopeMaps.service1-admin = [ ]; - }; + persons.testuser2 = { + displayName = "Powerful Test User (changed)"; + legalName = "Ryouiki Tenkai (changed)"; + groups = [ "service1-admin" ]; + }; - systems.oauth2.service2 = { - displayName = "Service Two (changed)"; - originUrl = "https://changed-two.example.com/"; - originLanding = "https://changed-landing2.example.com/"; + groups.service1-access = { }; + groups.service1-admin = { }; + systems.oauth2.service1 = { + displayName = "Service One (changed)"; + # multiple origin urls + originUrl = [ + "https://changed-one.example.com/" + "https://changed-one.example.org/" + ]; + originLanding = "https://changed-one.example.com/landing-changed"; + basicSecretFile = pkgs.writeText "bs-service1" "changed-very-strong-secret-for-service1"; + scopeMaps.service1-access = [ + "openid" + "email" + #"profile" + ]; + supplementaryScopeMaps.service1-admin = [ "adminchanged" ]; + claimMaps.groups = { + valuesByGroup.service1-admin = [ "adminchanged" ]; }; }; - }; - specialisation.removeEntities.configuration = - { ... }: - { - services.kanidm.provision = lib.mkForce { - enable = true; - idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; + systems.oauth2.service2 = { + displayName = "Service Two (changed)"; + originUrl = "https://changed-two.example.com/"; + originLanding = "https://changed-landing2.example.com/"; + # Test not setting secret + # basicSecretFile = + allowInsecureClientDisablePkce = false; + preferShortUsername = false; }; }; + }; - security.pki.certificateFiles = [ certs.ca.cert ]; + specialisation.removeAttributes.configuration = + { ... }: + { + services.kanidm.provision = lib.mkForce { + enable = true; + idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; - networking.hosts."::1" = [ serverDomain ]; - networking.firewall.allowedTCPPorts = [ 443 ]; + groups.supergroup1 = { }; - users.users.kanidm.shell = pkgs.bashInteractive; + persons.testuser1 = { + displayName = "Test User (changed)"; + }; - environment.systemPackages = with pkgs; [ - kanidm - openldap - ripgrep - jq - ]; - }; + persons.testuser2 = { + displayName = "Powerful Test User (changed)"; + groups = [ "service1-admin" ]; + }; - testScript = - { nodes, ... }: - let - # We need access to the config file in the test script. - filteredConfig = pkgs.lib.converge (pkgs.lib.filterAttrsRecursive ( - _: v: v != null - )) nodes.provision.services.kanidm.serverSettings; - serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig; + groups.service1-access = { }; + groups.service1-admin = { }; + systems.oauth2.service1 = { + displayName = "Service One (changed)"; + originUrl = "https://changed-one.example.com/"; + originLanding = "https://changed-one.example.com/landing-changed"; + basicSecretFile = pkgs.writeText "bs-service1" "changed-very-strong-secret-for-service1"; + # Removing maps requires setting them to the empty list + scopeMaps.service1-access = [ ]; + supplementaryScopeMaps.service1-admin = [ ]; + }; - specialisations = "${nodes.provision.system.build.toplevel}/specialisation"; - in - '' - import re + systems.oauth2.service2 = { + displayName = "Service Two (changed)"; + originUrl = "https://changed-two.example.com/"; + originLanding = "https://changed-landing2.example.com/"; + }; + }; + }; - def assert_contains(haystack, needle): - if needle not in haystack: - print("The haystack that will cause the following exception is:") - print("---") - print(haystack) - print("---") - raise Exception(f"Expected string '{needle}' was not found") + specialisation.removeEntities.configuration = + { ... }: + { + services.kanidm.provision = lib.mkForce { + enable = true; + idmAdminPasswordFile = pkgs.writeText "idm-admin-pw" provisionIdmAdminPassword; + }; + }; - def assert_matches(haystack, expr): - if not re.search(expr, haystack): - print("The haystack that will cause the following exception is:") - print("---") - print(haystack) - print("---") - raise Exception(f"Expected regex '{expr}' did not match") + security.pki.certificateFiles = [ certs.ca.cert ]; - def assert_lacks(haystack, needle): - if needle in haystack: - print("The haystack that will cause the following exception is:") - print("---") - print(haystack, end="") - print("---") - raise Exception(f"Unexpected string '{needle}' was found") + networking.hosts."::1" = [ serverDomain ]; + networking.firewall.allowedTCPPorts = [ 443 ]; - provision.start() + users.users.kanidm.shell = pkgs.bashInteractive; - def provision_login(pw): - provision.wait_for_unit("kanidm.service") - provision.wait_until_succeeds("curl -Lsf https://${serverDomain} | grep Kanidm") - if pw is None: - pw = provision.succeed("su - kanidm -c 'kanidmd recover-account -c ${serverConfigFile} idm_admin 2>&1 | rg -o \'[A-Za-z0-9]{48}\' '").strip().removeprefix("'").removesuffix("'") - out = provision.succeed(f"KANIDM_PASSWORD={pw} kanidm login -D idm_admin") - assert_contains(out, "Login Success for idm_admin") + environment.systemPackages = with pkgs; [ + kanidm + openldap + ripgrep + jq + ]; + }; - with subtest("Test Provisioning - setup"): - provision_login(None) - provision.succeed("kanidm logout -D idm_admin") + testScript = + { nodes, ... }: + let + # We need access to the config file in the test script. + filteredConfig = pkgs.lib.converge (pkgs.lib.filterAttrsRecursive ( + _: v: v != null + )) nodes.provision.services.kanidm.serverSettings; + serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig; - with subtest("Test Provisioning - credentialProvision"): - provision.succeed('${specialisations}/credentialProvision/bin/switch-to-configuration test') - provision_login("${provisionIdmAdminPassword}") + specialisations = "${nodes.provision.system.build.toplevel}/specialisation"; + in + '' + import re - # Make sure neither password is logged - provision.fail("journalctl --since -10m --unit kanidm.service --grep '${provisionAdminPassword}'") - provision.fail("journalctl --since -10m --unit kanidm.service --grep '${provisionIdmAdminPassword}'") + def assert_contains(haystack, needle): + if needle not in haystack: + print("The haystack that will cause the following exception is:") + print("---") + print(haystack) + print("---") + raise Exception(f"Expected string '{needle}' was not found") - # Test provisioned admin pw - out = provision.succeed("KANIDM_PASSWORD=${provisionAdminPassword} kanidm login -D admin") - assert_contains(out, "Login Success for admin") - provision.succeed("kanidm logout -D admin") - provision.succeed("kanidm logout -D idm_admin") + def assert_matches(haystack, expr): + if not re.search(expr, haystack): + print("The haystack that will cause the following exception is:") + print("---") + print(haystack) + print("---") + raise Exception(f"Expected regex '{expr}' did not match") - with subtest("Test Provisioning - changedCredential"): - provision.succeed('${specialisations}/changedCredential/bin/switch-to-configuration test') - provision_login("${provisionIdmAdminPassword2}") - provision.succeed("kanidm logout -D idm_admin") + def assert_lacks(haystack, needle): + if needle in haystack: + print("The haystack that will cause the following exception is:") + print("---") + print(haystack, end="") + print("---") + raise Exception(f"Unexpected string '{needle}' was found") - with subtest("Test Provisioning - addEntities"): - provision.succeed('${specialisations}/addEntities/bin/switch-to-configuration test') - # Unspecified idm admin password - provision_login(None) + provision.start() - out = provision.succeed("kanidm group get testgroup1") - assert_contains(out, "name: testgroup1") + def provision_login(pw): + provision.wait_for_unit("kanidm.service") + provision.wait_until_succeeds("curl -Lsf https://${serverDomain} | grep Kanidm") + if pw is None: + pw = provision.succeed("su - kanidm -c 'kanidmd recover-account -c ${serverConfigFile} idm_admin 2>&1 | rg -o \'[A-Za-z0-9]{48}\' '").strip().removeprefix("'").removesuffix("'") + out = provision.succeed(f"KANIDM_PASSWORD={pw} kanidm login -D idm_admin") + assert_contains(out, "Login Success for idm_admin") - out = provision.succeed("kanidm group get supergroup1") - assert_contains(out, "name: supergroup1") - assert_contains(out, "member: testgroup1") + with subtest("Test Provisioning - setup"): + provision_login(None) + provision.succeed("kanidm logout -D idm_admin") - out = provision.succeed("kanidm person get testuser1") - assert_contains(out, "name: testuser1") - assert_contains(out, "displayname: Test User") - assert_contains(out, "legalname: Jane Doe") - assert_contains(out, "mail: jane.doe@example.com") - assert_contains(out, "memberof: testgroup1") - assert_contains(out, "memberof: service1-access") + with subtest("Test Provisioning - credentialProvision"): + provision.succeed('${specialisations}/credentialProvision/bin/switch-to-configuration test') + provision_login("${provisionIdmAdminPassword}") - out = provision.succeed("kanidm person get testuser2") - assert_contains(out, "name: testuser2") - assert_contains(out, "displayname: Powerful Test User") - assert_contains(out, "legalname: Ryouiki Tenkai") - assert_contains(out, "memberof: service1-admin") - assert_lacks(out, "mail:") + # Make sure neither password is logged + provision.fail("journalctl --since -10m --unit kanidm.service --grep '${provisionAdminPassword}'") + provision.fail("journalctl --since -10m --unit kanidm.service --grep '${provisionIdmAdminPassword}'") - out = provision.succeed("kanidm group get service1-access") - assert_contains(out, "name: service1-access") + # Test provisioned admin pw + out = provision.succeed("KANIDM_PASSWORD=${provisionAdminPassword} kanidm login -D admin") + assert_contains(out, "Login Success for admin") + provision.succeed("kanidm logout -D admin") + provision.succeed("kanidm logout -D idm_admin") - out = provision.succeed("kanidm group get service1-admin") - assert_contains(out, "name: service1-admin") + with subtest("Test Provisioning - changedCredential"): + provision.succeed('${specialisations}/changedCredential/bin/switch-to-configuration test') + provision_login("${provisionIdmAdminPassword2}") + provision.succeed("kanidm logout -D idm_admin") - out = provision.succeed("kanidm system oauth2 get service1") - assert_contains(out, "name: service1") - assert_contains(out, "displayname: Service One") - assert_contains(out, "oauth2_rs_origin: https://one.example.com/") - assert_contains(out, "oauth2_rs_origin_landing: https://one.example.com/landing") - assert_matches(out, 'oauth2_rs_scope_map: service1-access.*{"email", "openid", "profile"}') - assert_matches(out, 'oauth2_rs_sup_scope_map: service1-admin.*{"admin"}') - assert_matches(out, 'oauth2_rs_claim_map: groups:.*"admin"') + with subtest("Test Provisioning - addEntities"): + provision.succeed('${specialisations}/addEntities/bin/switch-to-configuration test') + # Unspecified idm admin password + provision_login(None) - out = provision.succeed("kanidm system oauth2 show-basic-secret service1") - assert_contains(out, "very-strong-secret-for-service1") + out = provision.succeed("kanidm group get testgroup1") + assert_contains(out, "name: testgroup1") - out = provision.succeed("kanidm system oauth2 get service2") - assert_contains(out, "name: service2") - assert_contains(out, "displayname: Service Two") - assert_contains(out, "oauth2_rs_origin: https://two.example.com/") - assert_contains(out, "oauth2_rs_origin_landing: https://landing2.example.com/") - assert_contains(out, "oauth2_allow_insecure_client_disable_pkce: true") - assert_contains(out, "oauth2_prefer_short_username: true") + out = provision.succeed("kanidm group get supergroup1") + assert_contains(out, "name: supergroup1") + assert_contains(out, "member: testgroup1") - provision.succeed("kanidm logout -D idm_admin") + out = provision.succeed("kanidm person get testuser1") + assert_contains(out, "name: testuser1") + assert_contains(out, "displayname: Test User") + assert_contains(out, "legalname: Jane Doe") + assert_contains(out, "mail: jane.doe@example.com") + assert_contains(out, "memberof: testgroup1") + assert_contains(out, "memberof: service1-access") - with subtest("Test Provisioning - changeAttributes"): - provision.succeed('${specialisations}/changeAttributes/bin/switch-to-configuration test') - provision_login("${provisionIdmAdminPassword}") + out = provision.succeed("kanidm person get testuser2") + assert_contains(out, "name: testuser2") + assert_contains(out, "displayname: Powerful Test User") + assert_contains(out, "legalname: Ryouiki Tenkai") + assert_contains(out, "memberof: service1-admin") + assert_lacks(out, "mail:") - out = provision.succeed("kanidm group get testgroup1") - assert_contains(out, "name: testgroup1") + out = provision.succeed("kanidm group get service1-access") + assert_contains(out, "name: service1-access") - out = provision.succeed("kanidm group get supergroup1") - assert_contains(out, "name: supergroup1") - assert_lacks(out, "member: testgroup1") + out = provision.succeed("kanidm group get service1-admin") + assert_contains(out, "name: service1-admin") - out = provision.succeed("kanidm person get testuser1") - assert_contains(out, "name: testuser1") - assert_contains(out, "displayname: Test User (changed)") - assert_contains(out, "legalname: Jane Doe (changed)") - assert_contains(out, "mail: jane.doe@example.com") - assert_contains(out, "mail: second.doe@example.com") - assert_lacks(out, "memberof: testgroup1") - assert_contains(out, "memberof: service1-access") + out = provision.succeed("kanidm system oauth2 get service1") + assert_contains(out, "name: service1") + assert_contains(out, "displayname: Service One") + assert_contains(out, "oauth2_rs_origin: https://one.example.com/") + assert_contains(out, "oauth2_rs_origin_landing: https://one.example.com/landing") + assert_matches(out, 'oauth2_rs_scope_map: service1-access.*{"email", "openid", "profile"}') + assert_matches(out, 'oauth2_rs_sup_scope_map: service1-admin.*{"admin"}') + assert_matches(out, 'oauth2_rs_claim_map: groups:.*"admin"') - out = provision.succeed("kanidm person get testuser2") - assert_contains(out, "name: testuser2") - assert_contains(out, "displayname: Powerful Test User (changed)") - assert_contains(out, "legalname: Ryouiki Tenkai (changed)") - assert_contains(out, "memberof: service1-admin") - assert_lacks(out, "mail:") + out = provision.succeed("kanidm system oauth2 show-basic-secret service1") + assert_contains(out, "very-strong-secret-for-service1") - out = provision.succeed("kanidm group get service1-access") - assert_contains(out, "name: service1-access") + out = provision.succeed("kanidm system oauth2 get service2") + assert_contains(out, "name: service2") + assert_contains(out, "displayname: Service Two") + assert_contains(out, "oauth2_rs_origin: https://two.example.com/") + assert_contains(out, "oauth2_rs_origin_landing: https://landing2.example.com/") + assert_contains(out, "oauth2_allow_insecure_client_disable_pkce: true") + assert_contains(out, "oauth2_prefer_short_username: true") - out = provision.succeed("kanidm group get service1-admin") - assert_contains(out, "name: service1-admin") + provision.succeed("kanidm logout -D idm_admin") - out = provision.succeed("kanidm system oauth2 get service1") - assert_contains(out, "name: service1") - assert_contains(out, "displayname: Service One (changed)") - assert_contains(out, "oauth2_rs_origin: https://changed-one.example.com/") - assert_contains(out, "oauth2_rs_origin: https://changed-one.example.org/") - assert_contains(out, "oauth2_rs_origin_landing: https://changed-one.example.com/landing") - assert_matches(out, 'oauth2_rs_scope_map: service1-access.*{"email", "openid"}') - assert_matches(out, 'oauth2_rs_sup_scope_map: service1-admin.*{"adminchanged"}') - assert_matches(out, 'oauth2_rs_claim_map: groups:.*"adminchanged"') + with subtest("Test Provisioning - changeAttributes"): + provision.succeed('${specialisations}/changeAttributes/bin/switch-to-configuration test') + provision_login("${provisionIdmAdminPassword}") - out = provision.succeed("kanidm system oauth2 show-basic-secret service1") - assert_contains(out, "changed-very-strong-secret-for-service1") + out = provision.succeed("kanidm group get testgroup1") + assert_contains(out, "name: testgroup1") - out = provision.succeed("kanidm system oauth2 get service2") - assert_contains(out, "name: service2") - assert_contains(out, "displayname: Service Two (changed)") - assert_contains(out, "oauth2_rs_origin: https://changed-two.example.com/") - assert_contains(out, "oauth2_rs_origin_landing: https://changed-landing2.example.com/") - assert_lacks(out, "oauth2_allow_insecure_client_disable_pkce: true") - assert_lacks(out, "oauth2_prefer_short_username: true") + out = provision.succeed("kanidm group get supergroup1") + assert_contains(out, "name: supergroup1") + assert_lacks(out, "member: testgroup1") - provision.succeed("kanidm logout -D idm_admin") + out = provision.succeed("kanidm person get testuser1") + assert_contains(out, "name: testuser1") + assert_contains(out, "displayname: Test User (changed)") + assert_contains(out, "legalname: Jane Doe (changed)") + assert_contains(out, "mail: jane.doe@example.com") + assert_contains(out, "mail: second.doe@example.com") + assert_lacks(out, "memberof: testgroup1") + assert_contains(out, "memberof: service1-access") - with subtest("Test Provisioning - removeAttributes"): - provision.succeed('${specialisations}/removeAttributes/bin/switch-to-configuration test') - provision_login("${provisionIdmAdminPassword}") + out = provision.succeed("kanidm person get testuser2") + assert_contains(out, "name: testuser2") + assert_contains(out, "displayname: Powerful Test User (changed)") + assert_contains(out, "legalname: Ryouiki Tenkai (changed)") + assert_contains(out, "memberof: service1-admin") + assert_lacks(out, "mail:") - out = provision.succeed("kanidm group get testgroup1") - assert_lacks(out, "name: testgroup1") + out = provision.succeed("kanidm group get service1-access") + assert_contains(out, "name: service1-access") - out = provision.succeed("kanidm group get supergroup1") - assert_contains(out, "name: supergroup1") - assert_lacks(out, "member: testgroup1") + out = provision.succeed("kanidm group get service1-admin") + assert_contains(out, "name: service1-admin") - out = provision.succeed("kanidm person get testuser1") - assert_contains(out, "name: testuser1") - assert_contains(out, "displayname: Test User (changed)") - assert_lacks(out, "legalname: Jane Doe (changed)") - assert_lacks(out, "mail: jane.doe@example.com") - assert_lacks(out, "mail: second.doe@example.com") - assert_lacks(out, "memberof: testgroup1") - assert_lacks(out, "memberof: service1-access") + out = provision.succeed("kanidm system oauth2 get service1") + assert_contains(out, "name: service1") + assert_contains(out, "displayname: Service One (changed)") + assert_contains(out, "oauth2_rs_origin: https://changed-one.example.com/") + assert_contains(out, "oauth2_rs_origin: https://changed-one.example.org/") + assert_contains(out, "oauth2_rs_origin_landing: https://changed-one.example.com/landing") + assert_matches(out, 'oauth2_rs_scope_map: service1-access.*{"email", "openid"}') + assert_matches(out, 'oauth2_rs_sup_scope_map: service1-admin.*{"adminchanged"}') + assert_matches(out, 'oauth2_rs_claim_map: groups:.*"adminchanged"') - out = provision.succeed("kanidm person get testuser2") - assert_contains(out, "name: testuser2") - assert_contains(out, "displayname: Powerful Test User (changed)") - assert_lacks(out, "legalname: Ryouiki Tenkai (changed)") - assert_contains(out, "memberof: service1-admin") - assert_lacks(out, "mail:") + out = provision.succeed("kanidm system oauth2 show-basic-secret service1") + assert_contains(out, "changed-very-strong-secret-for-service1") - out = provision.succeed("kanidm group get service1-access") - assert_contains(out, "name: service1-access") + out = provision.succeed("kanidm system oauth2 get service2") + assert_contains(out, "name: service2") + assert_contains(out, "displayname: Service Two (changed)") + assert_contains(out, "oauth2_rs_origin: https://changed-two.example.com/") + assert_contains(out, "oauth2_rs_origin_landing: https://changed-landing2.example.com/") + assert_lacks(out, "oauth2_allow_insecure_client_disable_pkce: true") + assert_lacks(out, "oauth2_prefer_short_username: true") - out = provision.succeed("kanidm group get service1-admin") - assert_contains(out, "name: service1-admin") + provision.succeed("kanidm logout -D idm_admin") - out = provision.succeed("kanidm system oauth2 get service1") - assert_contains(out, "name: service1") - assert_contains(out, "displayname: Service One (changed)") - assert_contains(out, "oauth2_rs_origin: https://changed-one.example.com/") - assert_lacks(out, "oauth2_rs_origin: https://changed-one.example.org/") - assert_contains(out, "oauth2_rs_origin_landing: https://changed-one.example.com/landing") - assert_lacks(out, "oauth2_rs_scope_map") - assert_lacks(out, "oauth2_rs_sup_scope_map") - assert_lacks(out, "oauth2_rs_claim_map") + with subtest("Test Provisioning - removeAttributes"): + provision.succeed('${specialisations}/removeAttributes/bin/switch-to-configuration test') + provision_login("${provisionIdmAdminPassword}") - out = provision.succeed("kanidm system oauth2 show-basic-secret service1") - assert_contains(out, "changed-very-strong-secret-for-service1") + out = provision.succeed("kanidm group get testgroup1") + assert_lacks(out, "name: testgroup1") - out = provision.succeed("kanidm system oauth2 get service2") - assert_contains(out, "name: service2") - assert_contains(out, "displayname: Service Two (changed)") - assert_contains(out, "oauth2_rs_origin: https://changed-two.example.com/") - assert_contains(out, "oauth2_rs_origin_landing: https://changed-landing2.example.com/") - assert_lacks(out, "oauth2_allow_insecure_client_disable_pkce: true") - assert_lacks(out, "oauth2_prefer_short_username: true") + out = provision.succeed("kanidm group get supergroup1") + assert_contains(out, "name: supergroup1") + assert_lacks(out, "member: testgroup1") - provision.succeed("kanidm logout -D idm_admin") + out = provision.succeed("kanidm person get testuser1") + assert_contains(out, "name: testuser1") + assert_contains(out, "displayname: Test User (changed)") + assert_lacks(out, "legalname: Jane Doe (changed)") + assert_lacks(out, "mail: jane.doe@example.com") + assert_lacks(out, "mail: second.doe@example.com") + assert_lacks(out, "memberof: testgroup1") + assert_lacks(out, "memberof: service1-access") - with subtest("Test Provisioning - removeEntities"): - provision.succeed('${specialisations}/removeEntities/bin/switch-to-configuration test') - provision_login("${provisionIdmAdminPassword}") + out = provision.succeed("kanidm person get testuser2") + assert_contains(out, "name: testuser2") + assert_contains(out, "displayname: Powerful Test User (changed)") + assert_lacks(out, "legalname: Ryouiki Tenkai (changed)") + assert_contains(out, "memberof: service1-admin") + assert_lacks(out, "mail:") - out = provision.succeed("kanidm group get testgroup1") - assert_lacks(out, "name: testgroup1") + out = provision.succeed("kanidm group get service1-access") + assert_contains(out, "name: service1-access") - out = provision.succeed("kanidm group get supergroup1") - assert_lacks(out, "name: supergroup1") + out = provision.succeed("kanidm group get service1-admin") + assert_contains(out, "name: service1-admin") - out = provision.succeed("kanidm person get testuser1") - assert_lacks(out, "name: testuser1") + out = provision.succeed("kanidm system oauth2 get service1") + assert_contains(out, "name: service1") + assert_contains(out, "displayname: Service One (changed)") + assert_contains(out, "oauth2_rs_origin: https://changed-one.example.com/") + assert_lacks(out, "oauth2_rs_origin: https://changed-one.example.org/") + assert_contains(out, "oauth2_rs_origin_landing: https://changed-one.example.com/landing") + assert_lacks(out, "oauth2_rs_scope_map") + assert_lacks(out, "oauth2_rs_sup_scope_map") + assert_lacks(out, "oauth2_rs_claim_map") - out = provision.succeed("kanidm person get testuser2") - assert_lacks(out, "name: testuser2") + out = provision.succeed("kanidm system oauth2 show-basic-secret service1") + assert_contains(out, "changed-very-strong-secret-for-service1") - out = provision.succeed("kanidm group get service1-access") - assert_lacks(out, "name: service1-access") + out = provision.succeed("kanidm system oauth2 get service2") + assert_contains(out, "name: service2") + assert_contains(out, "displayname: Service Two (changed)") + assert_contains(out, "oauth2_rs_origin: https://changed-two.example.com/") + assert_contains(out, "oauth2_rs_origin_landing: https://changed-landing2.example.com/") + assert_lacks(out, "oauth2_allow_insecure_client_disable_pkce: true") + assert_lacks(out, "oauth2_prefer_short_username: true") - out = provision.succeed("kanidm group get service1-admin") - assert_lacks(out, "name: service1-admin") + provision.succeed("kanidm logout -D idm_admin") - out = provision.succeed("kanidm system oauth2 get service1") - assert_lacks(out, "name: service1") + with subtest("Test Provisioning - removeEntities"): + provision.succeed('${specialisations}/removeEntities/bin/switch-to-configuration test') + provision_login("${provisionIdmAdminPassword}") - out = provision.succeed("kanidm system oauth2 get service2") - assert_lacks(out, "name: service2") + out = provision.succeed("kanidm group get testgroup1") + assert_lacks(out, "name: testgroup1") - provision.succeed("kanidm logout -D idm_admin") - ''; - } -) + out = provision.succeed("kanidm group get supergroup1") + assert_lacks(out, "name: supergroup1") + + out = provision.succeed("kanidm person get testuser1") + assert_lacks(out, "name: testuser1") + + out = provision.succeed("kanidm person get testuser2") + assert_lacks(out, "name: testuser2") + + out = provision.succeed("kanidm group get service1-access") + assert_lacks(out, "name: service1-access") + + out = provision.succeed("kanidm group get service1-admin") + assert_lacks(out, "name: service1-admin") + + out = provision.succeed("kanidm system oauth2 get service1") + assert_lacks(out, "name: service1") + + out = provision.succeed("kanidm system oauth2 get service2") + assert_lacks(out, "name: service2") + + provision.succeed("kanidm logout -D idm_admin") + ''; +} diff --git a/nixos/tests/kanidm.nix b/nixos/tests/kanidm.nix index 69dac0de6865..f2f4981da276 100644 --- a/nixos/tests/kanidm.nix +++ b/nixos/tests/kanidm.nix @@ -1,154 +1,152 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - certs = import ./common/acme/server/snakeoil-certs.nix; - serverDomain = certs.domain; +{ pkgs, ... }: +let + certs = import ./common/acme/server/snakeoil-certs.nix; + serverDomain = certs.domain; - testCredentials = { - password = "Password1_cZPEwpCWvrReripJmAZdmVIZd8HHoHcl"; + testCredentials = { + password = "Password1_cZPEwpCWvrReripJmAZdmVIZd8HHoHcl"; + }; + + # copy certs to store to work around mount namespacing + certsPath = pkgs.runCommandNoCC "snakeoil-certs" { } '' + mkdir $out + cp ${certs."${serverDomain}".cert} $out/snakeoil.crt + cp ${certs."${serverDomain}".key} $out/snakeoil.key + ''; +in +{ + name = "kanidm"; + meta.maintainers = with pkgs.lib.maintainers; [ + Flakebi + oddlama + ]; + + nodes.server = + { pkgs, ... }: + { + services.kanidm = { + package = pkgs.kanidm_1_6; + enableServer = true; + serverSettings = { + origin = "https://${serverDomain}"; + domain = serverDomain; + bindaddress = "[::]:443"; + ldapbindaddress = "[::1]:636"; + tls_chain = "${certsPath}/snakeoil.crt"; + tls_key = "${certsPath}/snakeoil.key"; + }; + }; + + security.pki.certificateFiles = [ certs.ca.cert ]; + + networking.hosts."::1" = [ serverDomain ]; + networking.firewall.allowedTCPPorts = [ 443 ]; + + users.users.kanidm.shell = pkgs.bashInteractive; + + environment.systemPackages = with pkgs; [ + kanidm + openldap + ripgrep + ]; }; - # copy certs to store to work around mount namespacing - certsPath = pkgs.runCommandNoCC "snakeoil-certs" { } '' - mkdir $out - cp ${certs."${serverDomain}".cert} $out/snakeoil.crt - cp ${certs."${serverDomain}".key} $out/snakeoil.key + nodes.client = + { nodes, ... }: + { + services.kanidm = { + package = pkgs.kanidm_1_6; + enableClient = true; + clientSettings = { + uri = "https://${serverDomain}"; + verify_ca = true; + verify_hostnames = true; + }; + enablePam = true; + unixSettings = { + pam_allowed_login_groups = [ "shell" ]; + }; + }; + + networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ serverDomain ]; + + security.pki.certificateFiles = [ certs.ca.cert ]; + }; + + testScript = + { nodes, ... }: + let + ldapBaseDN = builtins.concatStringsSep "," ( + map (s: "dc=" + s) (pkgs.lib.splitString "." serverDomain) + ); + + # We need access to the config file in the test script. + filteredConfig = pkgs.lib.converge (pkgs.lib.filterAttrsRecursive ( + _: v: v != null + )) nodes.server.services.kanidm.serverSettings; + serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig; + in + '' + server.start() + client.start() + server.wait_for_unit("kanidm.service") + client.systemctl("start network-online.target") + client.wait_for_unit("network-online.target") + + with subtest("Test HTTP interface"): + server.wait_until_succeeds("curl -Lsf https://${serverDomain} | grep Kanidm") + + with subtest("Test LDAP interface"): + server.succeed("ldapsearch -H ldaps://${serverDomain}:636 -b '${ldapBaseDN}' -x '(name=test)'") + + with subtest("Recover idm_admin account"): + idm_admin_password = server.succeed("su - kanidm -c 'kanidmd recover-account -c ${serverConfigFile} idm_admin 2>&1 | rg -o \'[A-Za-z0-9]{48}\' '").strip().removeprefix("'").removesuffix("'") + + with subtest("Test CLI login"): + client.wait_until_tty_matches("1", "login: ") + client.send_chars("root\n") + client.send_chars("kanidm login -D idm_admin\n") + client.wait_until_tty_matches("1", "Enter password: ") + client.send_chars(f"{idm_admin_password}\n") + client.wait_until_tty_matches("1", "Login Success for idm_admin") + + with subtest("Test unixd connection"): + client.wait_for_unit("kanidm-unixd.service") + client.wait_for_file("/run/kanidm-unixd/sock") + client.wait_until_succeeds("kanidm-unix status | grep online") + + with subtest("Test user creation"): + client.wait_for_unit("getty@tty1.service") + client.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + client.succeed("kanidm person create testuser TestUser") + client.succeed("kanidm person posix set --shell \"$SHELL\" testuser") + client.send_chars("kanidm person posix set-password testuser\n") + client.wait_until_tty_matches("1", "Enter new") + client.send_chars("${testCredentials.password}\n") + client.wait_until_tty_matches("1", "Reenter") + client.send_chars("${testCredentials.password}\n") + output = client.succeed("getent passwd testuser") + assert "TestUser" in output + client.succeed("kanidm group create shell") + client.succeed("kanidm group posix set shell") + client.succeed("kanidm group add-members shell testuser") + + with subtest("Test user login"): + client.send_key("alt-f2") + client.wait_until_succeeds("[ $(fgconsole) = 2 ]") + client.wait_for_unit("getty@tty2.service") + client.wait_until_succeeds("pgrep -f 'agetty.*tty2'") + client.wait_until_tty_matches("2", "login: ") + client.send_chars("testuser\n") + client.wait_until_tty_matches("2", "login: testuser") + client.wait_until_succeeds("pgrep login") + client.wait_until_tty_matches("2", "Password: ") + client.send_chars("${testCredentials.password}\n") + client.wait_until_succeeds("systemctl is-active user@$(id -u testuser).service") + client.send_chars("touch done\n") + client.wait_for_file("/home/testuser@${serverDomain}/done") + + server.shutdown() + client.shutdown() ''; - in - { - name = "kanidm"; - meta.maintainers = with pkgs.lib.maintainers; [ - Flakebi - oddlama - ]; - - nodes.server = - { pkgs, ... }: - { - services.kanidm = { - package = pkgs.kanidm_1_6; - enableServer = true; - serverSettings = { - origin = "https://${serverDomain}"; - domain = serverDomain; - bindaddress = "[::]:443"; - ldapbindaddress = "[::1]:636"; - tls_chain = "${certsPath}/snakeoil.crt"; - tls_key = "${certsPath}/snakeoil.key"; - }; - }; - - security.pki.certificateFiles = [ certs.ca.cert ]; - - networking.hosts."::1" = [ serverDomain ]; - networking.firewall.allowedTCPPorts = [ 443 ]; - - users.users.kanidm.shell = pkgs.bashInteractive; - - environment.systemPackages = with pkgs; [ - kanidm - openldap - ripgrep - ]; - }; - - nodes.client = - { nodes, ... }: - { - services.kanidm = { - package = pkgs.kanidm_1_6; - enableClient = true; - clientSettings = { - uri = "https://${serverDomain}"; - verify_ca = true; - verify_hostnames = true; - }; - enablePam = true; - unixSettings = { - pam_allowed_login_groups = [ "shell" ]; - }; - }; - - networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ serverDomain ]; - - security.pki.certificateFiles = [ certs.ca.cert ]; - }; - - testScript = - { nodes, ... }: - let - ldapBaseDN = builtins.concatStringsSep "," ( - map (s: "dc=" + s) (pkgs.lib.splitString "." serverDomain) - ); - - # We need access to the config file in the test script. - filteredConfig = pkgs.lib.converge (pkgs.lib.filterAttrsRecursive ( - _: v: v != null - )) nodes.server.services.kanidm.serverSettings; - serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig; - in - '' - server.start() - client.start() - server.wait_for_unit("kanidm.service") - client.systemctl("start network-online.target") - client.wait_for_unit("network-online.target") - - with subtest("Test HTTP interface"): - server.wait_until_succeeds("curl -Lsf https://${serverDomain} | grep Kanidm") - - with subtest("Test LDAP interface"): - server.succeed("ldapsearch -H ldaps://${serverDomain}:636 -b '${ldapBaseDN}' -x '(name=test)'") - - with subtest("Recover idm_admin account"): - idm_admin_password = server.succeed("su - kanidm -c 'kanidmd recover-account -c ${serverConfigFile} idm_admin 2>&1 | rg -o \'[A-Za-z0-9]{48}\' '").strip().removeprefix("'").removesuffix("'") - - with subtest("Test CLI login"): - client.wait_until_tty_matches("1", "login: ") - client.send_chars("root\n") - client.send_chars("kanidm login -D idm_admin\n") - client.wait_until_tty_matches("1", "Enter password: ") - client.send_chars(f"{idm_admin_password}\n") - client.wait_until_tty_matches("1", "Login Success for idm_admin") - - with subtest("Test unixd connection"): - client.wait_for_unit("kanidm-unixd.service") - client.wait_for_file("/run/kanidm-unixd/sock") - client.wait_until_succeeds("kanidm-unix status | grep online") - - with subtest("Test user creation"): - client.wait_for_unit("getty@tty1.service") - client.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - client.succeed("kanidm person create testuser TestUser") - client.succeed("kanidm person posix set --shell \"$SHELL\" testuser") - client.send_chars("kanidm person posix set-password testuser\n") - client.wait_until_tty_matches("1", "Enter new") - client.send_chars("${testCredentials.password}\n") - client.wait_until_tty_matches("1", "Reenter") - client.send_chars("${testCredentials.password}\n") - output = client.succeed("getent passwd testuser") - assert "TestUser" in output - client.succeed("kanidm group create shell") - client.succeed("kanidm group posix set shell") - client.succeed("kanidm group add-members shell testuser") - - with subtest("Test user login"): - client.send_key("alt-f2") - client.wait_until_succeeds("[ $(fgconsole) = 2 ]") - client.wait_for_unit("getty@tty2.service") - client.wait_until_succeeds("pgrep -f 'agetty.*tty2'") - client.wait_until_tty_matches("2", "login: ") - client.send_chars("testuser\n") - client.wait_until_tty_matches("2", "login: testuser") - client.wait_until_succeeds("pgrep login") - client.wait_until_tty_matches("2", "Password: ") - client.send_chars("${testCredentials.password}\n") - client.wait_until_succeeds("systemctl is-active user@$(id -u testuser).service") - client.send_chars("touch done\n") - client.wait_for_file("/home/testuser@${serverDomain}/done") - - server.shutdown() - client.shutdown() - ''; - } -) +} diff --git a/nixos/tests/karma.nix b/nixos/tests/karma.nix index 2e80aa97fbb5..7d21d99bc505 100644 --- a/nixos/tests/karma.nix +++ b/nixos/tests/karma.nix @@ -1,89 +1,87 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "karma"; - nodes = { - server = - { ... }: - { - services.prometheus.alertmanager = { - enable = true; - logLevel = "debug"; - port = 9093; - openFirewall = true; - configuration = { - global = { - resolve_timeout = "1m"; - }; - route = { - # Root route node - receiver = "test"; - group_by = [ "..." ]; - continue = false; - group_wait = "1s"; - group_interval = "15s"; - repeat_interval = "24h"; - }; - receivers = [ +{ lib, pkgs, ... }: +{ + name = "karma"; + nodes = { + server = + { ... }: + { + services.prometheus.alertmanager = { + enable = true; + logLevel = "debug"; + port = 9093; + openFirewall = true; + configuration = { + global = { + resolve_timeout = "1m"; + }; + route = { + # Root route node + receiver = "test"; + group_by = [ "..." ]; + continue = false; + group_wait = "1s"; + group_interval = "15s"; + repeat_interval = "24h"; + }; + receivers = [ + { + name = "test"; + webhook_configs = [ + { + url = "http://localhost:1234"; + send_resolved = true; + max_alerts = 0; + } + ]; + } + ]; + }; + }; + services.karma = { + enable = true; + openFirewall = true; + settings = { + listen = { + address = "0.0.0.0"; + port = 8081; + }; + alertmanager = { + servers = [ { - name = "test"; - webhook_configs = [ - { - url = "http://localhost:1234"; - send_resolved = true; - max_alerts = 0; - } - ]; + name = "alertmanager"; + uri = "https://127.0.0.1:9093"; } ]; }; - }; - services.karma = { - enable = true; - openFirewall = true; - settings = { - listen = { - address = "0.0.0.0"; - port = 8081; - }; - alertmanager = { - servers = [ - { - name = "alertmanager"; - uri = "https://127.0.0.1:9093"; - } - ]; - }; - karma.name = "test-dashboard"; - log.config = true; - log.requests = true; - log.timestamp = true; - }; + karma.name = "test-dashboard"; + log.config = true; + log.requests = true; + log.timestamp = true; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("Wait for server to come up"): + with subtest("Wait for server to come up"): - server.wait_for_unit("alertmanager.service") - server.wait_for_unit("karma.service") + server.wait_for_unit("alertmanager.service") + server.wait_for_unit("karma.service") - server.sleep(5) # wait for both services to settle + server.sleep(5) # wait for both services to settle - server.wait_for_open_port(9093) - server.wait_for_open_port(8081) + server.wait_for_open_port(9093) + server.wait_for_open_port(8081) - with subtest("Test alertmanager readiness"): - server.succeed("curl -s http://127.0.0.1:9093/-/ready") + with subtest("Test alertmanager readiness"): + server.succeed("curl -s http://127.0.0.1:9093/-/ready") - # Karma only starts serving the dashboard once it has established connectivity to all alertmanagers in its config - # Therefore, this will fail if karma isn't able to reach alertmanager - server.succeed("curl -s http://127.0.0.1:8081") + # Karma only starts serving the dashboard once it has established connectivity to all alertmanagers in its config + # Therefore, this will fail if karma isn't able to reach alertmanager + server.succeed("curl -s http://127.0.0.1:8081") - server.shutdown() - ''; - } -) + server.shutdown() + ''; +} diff --git a/nixos/tests/kavita.nix b/nixos/tests/kavita.nix index f9f51ebb80f8..6749c98f4d83 100644 --- a/nixos/tests/kavita.nix +++ b/nixos/tests/kavita.nix @@ -1,47 +1,45 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "kavita"; - meta = with pkgs.lib.maintainers; { - maintainers = [ misterio77 ]; - }; +{ pkgs, ... }: +{ + name = "kavita"; + meta = with pkgs.lib.maintainers; { + maintainers = [ misterio77 ]; + }; - nodes = { - kavita = - { config, pkgs, ... }: - { - services.kavita = { - enable = true; - tokenKeyFile = builtins.toFile "kavita.key" "d26ba694b455271a8872415830fb7b5c58f8da98f9ef7f58b2ca4c34bd406512"; - }; + nodes = { + kavita = + { config, pkgs, ... }: + { + services.kavita = { + enable = true; + tokenKeyFile = builtins.toFile "kavita.key" "d26ba694b455271a8872415830fb7b5c58f8da98f9ef7f58b2ca4c34bd406512"; }; - }; + }; + }; - testScript = - let - regUrl = "http://kavita:5000/api/Account/register"; - loginUrl = "http://kavita:5000/api/Account/login"; - localeUrl = "http://kavita:5000/api/locale"; - in - '' - import json + testScript = + let + regUrl = "http://kavita:5000/api/Account/register"; + loginUrl = "http://kavita:5000/api/Account/login"; + localeUrl = "http://kavita:5000/api/locale"; + in + '' + import json - kavita.start - kavita.wait_for_unit("kavita.service") + kavita.start + kavita.wait_for_unit("kavita.service") - # Check that static assets are working - kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita") + # Check that static assets are working + kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita") - # Check that registration is working - kavita.succeed("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""") - # But only for the first one - kavita.fail("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""") + # Check that registration is working + kavita.succeed("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""") + # But only for the first one + kavita.fail("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""") - # Log in and retrieve token - session = json.loads(kavita.succeed("""curl -fX POST ${loginUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")) - # Check list of locales - locales = json.loads(kavita.succeed(f"curl -fX GET ${localeUrl} -H 'Authorization: Bearer {session['token']}'")) - assert len(locales) > 0, "expected a list of locales" - ''; - } -) + # Log in and retrieve token + session = json.loads(kavita.succeed("""curl -fX POST ${loginUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")) + # Check list of locales + locales = json.loads(kavita.succeed(f"curl -fX GET ${localeUrl} -H 'Authorization: Bearer {session['token']}'")) + assert len(locales) > 0, "expected a list of locales" + ''; +} diff --git a/nixos/tests/kbd-setfont-decompress.nix b/nixos/tests/kbd-setfont-decompress.nix index 07ecf2c8979d..a6ad57d46fd2 100644 --- a/nixos/tests/kbd-setfont-decompress.nix +++ b/nixos/tests/kbd-setfont-decompress.nix @@ -1,23 +1,21 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "kbd-setfont-decompress"; +{ lib, pkgs, ... }: +{ + name = "kbd-setfont-decompress"; - meta.maintainers = with lib.maintainers; [ oxalica ]; + meta.maintainers = with lib.maintainers; [ oxalica ]; - nodes.machine = { ... }: { }; + nodes.machine = { ... }: { }; - testScript = '' - machine.succeed("gzip -cd ${pkgs.terminus_font}/share/consolefonts/ter-v16b.psf.gz >font.psf") - machine.succeed("gzip font.psf.gz") - machine.succeed("bzip2 font.psf.bz2") - machine.succeed("xz font.psf.xz") - machine.succeed("zstd font.psf.zst") - # setfont returns 0 even on error. - assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.gz 2>&1") == "" - assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.bz2 2>&1") == "" - assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.xz 2>&1") == "" - assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.zst 2>&1") == "" - ''; - } -) + testScript = '' + machine.succeed("gzip -cd ${pkgs.terminus_font}/share/consolefonts/ter-v16b.psf.gz >font.psf") + machine.succeed("gzip font.psf.gz") + machine.succeed("bzip2 font.psf.bz2") + machine.succeed("xz font.psf.xz") + machine.succeed("zstd font.psf.zst") + # setfont returns 0 even on error. + assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.gz 2>&1") == "" + assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.bz2 2>&1") == "" + assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.xz 2>&1") == "" + assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.zst 2>&1") == "" + ''; +} diff --git a/nixos/tests/kbd-update-search-paths-patch.nix b/nixos/tests/kbd-update-search-paths-patch.nix index da2cf99b44a6..7c1c36174f8a 100644 --- a/nixos/tests/kbd-update-search-paths-patch.nix +++ b/nixos/tests/kbd-update-search-paths-patch.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "kbd-update-search-paths-patch"; +{ pkgs, ... }: +{ + name = "kbd-update-search-paths-patch"; - nodes.machine = - { pkgs, options, ... }: - { - console = { - packages = options.console.packages.default ++ [ pkgs.terminus_font ]; - }; + nodes.machine = + { pkgs, options, ... }: + { + console = { + packages = options.console.packages.default ++ [ pkgs.terminus_font ]; }; + }; - testScript = '' - command = "${pkgs.kbd}/bin/setfont ter-112n 2>&1" - (status, out) = machine.execute(command) - import re - pattern = re.compile(r".*Unable to find file:.*") - match = pattern.match(out) - if match: - raise Exception("command `{}` failed".format(command)) - ''; - } -) + testScript = '' + command = "${pkgs.kbd}/bin/setfont ter-112n 2>&1" + (status, out) = machine.execute(command) + import re + pattern = re.compile(r".*Unable to find file:.*") + match = pattern.match(out) + if match: + raise Exception("command `{}` failed".format(command)) + ''; +} diff --git a/nixos/tests/keepalived.nix b/nixos/tests/keepalived.nix index ba75dde8c5dc..d5ffa6e9838d 100644 --- a/nixos/tests/keepalived.nix +++ b/nixos/tests/keepalived.nix @@ -1,50 +1,48 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "keepalived"; - meta.maintainers = [ lib.maintainers.raitobezarius ]; +{ pkgs, lib, ... }: +{ + name = "keepalived"; + meta.maintainers = [ lib.maintainers.raitobezarius ]; - nodes = { - node1 = - { pkgs, ... }: - { - services.keepalived.enable = true; - services.keepalived.openFirewall = true; - services.keepalived.vrrpInstances.test = { - interface = "eth1"; - state = "MASTER"; - priority = 50; - virtualIps = [ { addr = "192.168.1.200"; } ]; - virtualRouterId = 1; - }; - environment.systemPackages = [ pkgs.tcpdump ]; + nodes = { + node1 = + { pkgs, ... }: + { + services.keepalived.enable = true; + services.keepalived.openFirewall = true; + services.keepalived.vrrpInstances.test = { + interface = "eth1"; + state = "MASTER"; + priority = 50; + virtualIps = [ { addr = "192.168.1.200"; } ]; + virtualRouterId = 1; }; - node2 = - { pkgs, ... }: - { - services.keepalived.enable = true; - services.keepalived.openFirewall = true; - services.keepalived.vrrpInstances.test = { - interface = "eth1"; - state = "MASTER"; - priority = 100; - virtualIps = [ { addr = "192.168.1.200"; } ]; - virtualRouterId = 1; - }; - environment.systemPackages = [ pkgs.tcpdump ]; + environment.systemPackages = [ pkgs.tcpdump ]; + }; + node2 = + { pkgs, ... }: + { + services.keepalived.enable = true; + services.keepalived.openFirewall = true; + services.keepalived.vrrpInstances.test = { + interface = "eth1"; + state = "MASTER"; + priority = 100; + virtualIps = [ { addr = "192.168.1.200"; } ]; + virtualRouterId = 1; }; - }; + environment.systemPackages = [ pkgs.tcpdump ]; + }; + }; - testScript = '' - # wait for boot time delay to pass - for node in [node1, node2]: - node.wait_until_succeeds( - "systemctl show -p LastTriggerUSecMonotonic keepalived-boot-delay.timer | grep -vq 'LastTriggerUSecMonotonic=0'" - ) - node.wait_for_unit("keepalived") - node2.wait_until_succeeds("ip addr show dev eth1 | grep -q 192.168.1.200") - node1.fail("ip addr show dev eth1 | grep -q 192.168.1.200") - node1.succeed("ping -c1 192.168.1.200") - ''; - } -) + testScript = '' + # wait for boot time delay to pass + for node in [node1, node2]: + node.wait_until_succeeds( + "systemctl show -p LastTriggerUSecMonotonic keepalived-boot-delay.timer | grep -vq 'LastTriggerUSecMonotonic=0'" + ) + node.wait_for_unit("keepalived") + node2.wait_until_succeeds("ip addr show dev eth1 | grep -q 192.168.1.200") + node1.fail("ip addr show dev eth1 | grep -q 192.168.1.200") + node1.succeed("ping -c1 192.168.1.200") + ''; +} diff --git a/nixos/tests/keepassxc.nix b/nixos/tests/keepassxc.nix index c2d68e29b798..1cdbdb445c3d 100644 --- a/nixos/tests/keepassxc.nix +++ b/nixos/tests/keepassxc.nix @@ -1,94 +1,92 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "keepassxc"; - meta = with pkgs.lib.maintainers; { - maintainers = [ turion ]; - timeout = 1800; - }; +{ + name = "keepassxc"; + meta = with pkgs.lib.maintainers; { + maintainers = [ turion ]; + timeout = 1800; + }; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; - services.xserver.enable = true; + services.xserver.enable = true; - # for better OCR - environment.etc."icewm/prefoverride".text = '' - ColorActiveTitleBar = "rgb:FF/FF/FF" - ''; + # for better OCR + environment.etc."icewm/prefoverride".text = '' + ColorActiveTitleBar = "rgb:FF/FF/FF" + ''; - # Regression test for https://github.com/NixOS/nixpkgs/issues/163482 - qt = { - enable = true; - platformTheme = "gnome"; - style = "adwaita-dark"; - }; - - test-support.displayManager.auto.user = "alice"; - environment.systemPackages = with pkgs; [ - keepassxc - xdotool - ]; + # Regression test for https://github.com/NixOS/nixpkgs/issues/163482 + qt = { + enable = true; + platformTheme = "gnome"; + style = "adwaita-dark"; }; - enableOCR = true; + test-support.displayManager.auto.user = "alice"; + environment.systemPackages = with pkgs; [ + keepassxc + xdotool + ]; + }; - testScript = - { nodes, ... }: - let - aliceDo = cmd: ''machine.succeed("su - alice -c '${cmd}' >&2 &");''; - in - '' - with subtest("Ensure X starts"): - start_all() - machine.wait_for_x() + enableOCR = true; - with subtest("Can create database and entry with CLI"): - ${aliceDo "keepassxc-cli db-create --set-key-file foo.keyfile foo.kdbx"} - ${aliceDo "keepassxc-cli add --no-password -k foo.keyfile foo.kdbx bar"} + testScript = + { nodes, ... }: + let + aliceDo = cmd: ''machine.succeed("su - alice -c '${cmd}' >&2 &");''; + in + '' + with subtest("Ensure X starts"): + start_all() + machine.wait_for_x() - with subtest("Ensure KeePassXC starts"): - # start KeePassXC window - ${aliceDo "keepassxc >&2 &"} + with subtest("Can create database and entry with CLI"): + ${aliceDo "keepassxc-cli db-create --set-key-file foo.keyfile foo.kdbx"} + ${aliceDo "keepassxc-cli add --no-password -k foo.keyfile foo.kdbx bar"} - machine.wait_for_text("KeePassXC ${pkgs.keepassxc.version}") - machine.screenshot("KeePassXC") + with subtest("Ensure KeePassXC starts"): + # start KeePassXC window + ${aliceDo "keepassxc >&2 &"} - with subtest("Can open existing database"): - machine.send_key("ctrl-o") - machine.sleep(5) - # Regression #163482: keepassxc did not crash - machine.succeed("ps -e | grep keepassxc") - machine.wait_for_text("Open database") - machine.send_key("ret") + machine.wait_for_text("KeePassXC ${pkgs.keepassxc.version}") + machine.screenshot("KeePassXC") - # Wait for the enter password screen to appear. - machine.wait_for_text("/home/alice/foo.kdbx") + with subtest("Can open existing database"): + machine.send_key("ctrl-o") + machine.sleep(5) + # Regression #163482: keepassxc did not crash + machine.succeed("ps -e | grep keepassxc") + machine.wait_for_text("Open database") + machine.send_key("ret") - # Click on "I have key file" button to open keyfile dialog - machine.send_key("tab") - machine.send_key("tab") - machine.send_key("tab") - machine.send_key("ret") + # Wait for the enter password screen to appear. + machine.wait_for_text("/home/alice/foo.kdbx") - # Select keyfile - machine.wait_for_text("Select key file") - machine.send_chars("/home/alice/foo.keyfile") - machine.send_key("ret") + # Click on "I have key file" button to open keyfile dialog + machine.send_key("tab") + machine.send_key("tab") + machine.send_key("tab") + machine.send_key("ret") - # Open database - machine.wait_for_text("foo.kdbx \\[Locked] - KeePassXC") - machine.send_key("ret") + # Select keyfile + machine.wait_for_text("Select key file") + machine.send_chars("/home/alice/foo.keyfile") + machine.send_key("ret") - # Database is unlocked (doesn't have "[Locked]" in the title anymore) - machine.wait_for_text("foo.kdbx - KeePassXC") - ''; - } -) + # Open database + machine.wait_for_text("foo.kdbx \\[Locked] - KeePassXC") + machine.send_key("ret") + + # Database is unlocked (doesn't have "[Locked]" in the title anymore) + machine.wait_for_text("foo.kdbx - KeePassXC") + ''; +} diff --git a/nixos/tests/kernel-latest-ath-user-regd.nix b/nixos/tests/kernel-latest-ath-user-regd.nix index ede82d7d340a..f60b42ac6741 100644 --- a/nixos/tests/kernel-latest-ath-user-regd.nix +++ b/nixos/tests/kernel-latest-ath-user-regd.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "kernel-latest-ath-user-regd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ veehaitch ]; +{ pkgs, ... }: +{ + name = "kernel-latest-ath-user-regd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ veehaitch ]; + }; + + nodes.machine = + { pkgs, ... }: + { + boot.kernelPackages = pkgs.linuxPackages_latest; + networking.wireless.athUserRegulatoryDomain = true; }; - nodes.machine = - { pkgs, ... }: - { - boot.kernelPackages = pkgs.linuxPackages_latest; - networking.wireless.athUserRegulatoryDomain = true; - }; - - testScript = '' - assert "CONFIG_ATH_USER_REGD=y" in machine.succeed("zcat /proc/config.gz") - ''; - } -) + testScript = '' + assert "CONFIG_ATH_USER_REGD=y" in machine.succeed("zcat /proc/config.gz") + ''; +} diff --git a/nixos/tests/keter.nix b/nixos/tests/keter.nix index d42b30d869d9..129ed25803cc 100644 --- a/nixos/tests/keter.nix +++ b/nixos/tests/keter.nix @@ -1,47 +1,45 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - port = 81; - in - { - name = "keter"; - meta = with pkgs.lib.maintainers; { - maintainers = [ jappie ]; - }; +{ pkgs, ... }: +let + port = 81; +in +{ + name = "keter"; + meta = with pkgs.lib.maintainers; { + maintainers = [ jappie ]; + }; - nodes.machine = - { config, pkgs, ... }: - { - services.keter = { - enable = true; + nodes.machine = + { config, pkgs, ... }: + { + services.keter = { + enable = true; - globalKeterConfig = { - cli-port = 123; # just adding this to test the freeform - listeners = [ - { - host = "*4"; - inherit port; - } - ]; - }; - bundle = { - appName = "test-bundle"; - domain = "localhost"; - executable = pkgs.writeShellScript "run" '' - ${pkgs.python3}/bin/python -m http.server $PORT - ''; - }; + globalKeterConfig = { + cli-port = 123; # just adding this to test the freeform + listeners = [ + { + host = "*4"; + inherit port; + } + ]; + }; + bundle = { + appName = "test-bundle"; + domain = "localhost"; + executable = pkgs.writeShellScript "run" '' + ${pkgs.python3}/bin/python -m http.server $PORT + ''; }; }; + }; - testScript = '' - machine.wait_for_unit("keter.service") + testScript = '' + machine.wait_for_unit("keter.service") - machine.wait_for_open_port(${toString port}) - machine.wait_for_console_text("Activating app test-bundle with hosts: localhost") + machine.wait_for_open_port(${toString port}) + machine.wait_for_console_text("Activating app test-bundle with hosts: localhost") - machine.succeed("curl --fail http://localhost:${toString port}/") - ''; - } -) + machine.succeed("curl --fail http://localhost:${toString port}/") + ''; +} diff --git a/nixos/tests/komga.nix b/nixos/tests/komga.nix index 22c63d8cac6b..8d3ef130a5cd 100644 --- a/nixos/tests/komga.nix +++ b/nixos/tests/komga.nix @@ -1,23 +1,21 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "komga"; - meta.maintainers = with lib.maintainers; [ govanify ]; +{ + name = "komga"; + meta.maintainers = with lib.maintainers; [ govanify ]; - nodes.machine = - { pkgs, ... }: - { - services.komga = { - enable = true; - settings.server.port = 1234; - }; + nodes.machine = + { pkgs, ... }: + { + services.komga = { + enable = true; + settings.server.port = 1234; }; + }; - testScript = '' - machine.wait_for_unit("komga.service") - machine.wait_for_open_port(1234) - machine.succeed("curl --fail http://localhost:1234/") - ''; - } -) + testScript = '' + machine.wait_for_unit("komga.service") + machine.wait_for_open_port(1234) + machine.succeed("curl --fail http://localhost:1234/") + ''; +} diff --git a/nixos/tests/ksm.nix b/nixos/tests/ksm.nix index 73ef3bdc8eb0..2268843268ff 100644 --- a/nixos/tests/ksm.nix +++ b/nixos/tests/ksm.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "ksm"; - meta = with lib.maintainers; { - maintainers = [ rnhmjoj ]; +{ + name = "ksm"; + meta = with lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; + + nodes.machine = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + + hardware.ksm.enable = true; + hardware.ksm.sleep = 300; }; - nodes.machine = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - - hardware.ksm.enable = true; - hardware.ksm.sleep = 300; - }; - - testScript = '' - machine.start() - machine.wait_until_succeeds("test $(

Hello world

' > page.html") - machine.execute("Ladybird file://$(pwd)/page.html >&2 &") - machine.wait_for_window("Ladybird") - machine.sleep(5) - machine.wait_for_text("Hello world") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.succeed("echo '

Hello world

' > page.html") + machine.execute("Ladybird file://$(pwd)/page.html >&2 &") + machine.wait_for_window("Ladybird") + machine.sleep(5) + machine.wait_for_text("Hello world") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/languagetool.nix b/nixos/tests/languagetool.nix index 35ab59b5d861..bbd6f3bf97c3 100644 --- a/nixos/tests/languagetool.nix +++ b/nixos/tests/languagetool.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - port = 8082; - in - { - name = "languagetool"; - meta = with lib.maintainers; { - maintainers = [ fbeffa ]; +{ pkgs, lib, ... }: +let + port = 8082; +in +{ + name = "languagetool"; + meta = with lib.maintainers; { + maintainers = [ fbeffa ]; + }; + + nodes.machine = + { ... }: + { + services.languagetool.enable = true; + services.languagetool.port = port; }; - nodes.machine = - { ... }: - { - services.languagetool.enable = true; - services.languagetool.port = port; - }; - - testScript = '' - machine.start() - machine.wait_for_unit("languagetool.service") - machine.wait_for_open_port(${toString port}) - machine.wait_until_succeeds('curl -d "language=en-US" -d "text=a simple test" http://localhost:${toString port}/v2/check') - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("languagetool.service") + machine.wait_for_open_port(${toString port}) + machine.wait_until_succeeds('curl -d "language=en-US" -d "text=a simple test" http://localhost:${toString port}/v2/check') + ''; +} diff --git a/nixos/tests/lanraragi.nix b/nixos/tests/lanraragi.nix index 21a065845c64..ccc95a5e0483 100644 --- a/nixos/tests/lanraragi.nix +++ b/nixos/tests/lanraragi.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "lanraragi"; - meta.maintainers = with lib.maintainers; [ tomasajt ]; +{ pkgs, lib, ... }: +{ + name = "lanraragi"; + meta.maintainers = with lib.maintainers; [ tomasajt ]; - nodes = { - machine1 = - { pkgs, ... }: - { - services.lanraragi.enable = true; - }; - machine2 = - { pkgs, ... }: - { - services.lanraragi = { - enable = true; - passwordFile = pkgs.writeText "lrr-test-pass" '' - Ultra-secure-p@ssword-"with-spec1al\chars + nodes = { + machine1 = + { pkgs, ... }: + { + services.lanraragi.enable = true; + }; + machine2 = + { pkgs, ... }: + { + services.lanraragi = { + enable = true; + passwordFile = pkgs.writeText "lrr-test-pass" '' + Ultra-secure-p@ssword-"with-spec1al\chars + ''; + port = 4000; + redis = { + port = 4001; + passwordFile = pkgs.writeText "redis-lrr-test-pass" '' + 123-redis-PASS ''; - port = 4000; - redis = { - port = 4001; - passwordFile = pkgs.writeText "redis-lrr-test-pass" '' - 123-redis-PASS - ''; - }; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine1.wait_for_unit("lanraragi.service") - machine1.wait_until_succeeds("curl -f localhost:3000") - machine1.succeed("[ $(curl -o /dev/null -X post 'http://localhost:3000/login' --data-raw 'password=kamimamita' -w '%{http_code}') -eq 302 ]") + machine1.wait_for_unit("lanraragi.service") + machine1.wait_until_succeeds("curl -f localhost:3000") + machine1.succeed("[ $(curl -o /dev/null -X post 'http://localhost:3000/login' --data-raw 'password=kamimamita' -w '%{http_code}') -eq 302 ]") - machine2.wait_for_unit("lanraragi.service") - machine2.wait_until_succeeds("curl -f localhost:4000") - machine2.succeed("[ $(curl -o /dev/null -X post 'http://localhost:4000/login' --data-raw 'password=Ultra-secure-p@ssword-\"with-spec1al\\chars' -w '%{http_code}') -eq 302 ]") - ''; - } -) + machine2.wait_for_unit("lanraragi.service") + machine2.wait_until_succeeds("curl -f localhost:4000") + machine2.succeed("[ $(curl -o /dev/null -X post 'http://localhost:4000/login' --data-raw 'password=Ultra-secure-p@ssword-\"with-spec1al\\chars' -w '%{http_code}') -eq 302 ]") + ''; +} diff --git a/nixos/tests/leaps.nix b/nixos/tests/leaps.nix index 92847d83eaeb..dd26ca181a5a 100644 --- a/nixos/tests/leaps.nix +++ b/nixos/tests/leaps.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "leaps"; - meta = with pkgs.lib.maintainers; { - maintainers = [ qknight ]; - }; +{ + name = "leaps"; + meta = with pkgs.lib.maintainers; { + maintainers = [ qknight ]; + }; - nodes = { - client = { }; + nodes = { + client = { }; - server = { - services.leaps = { - enable = true; - port = 6666; - path = "/leaps/"; - }; - networking.firewall.enable = false; + server = { + services.leaps = { + enable = true; + port = 6666; + path = "/leaps/"; }; + networking.firewall.enable = false; }; + }; - testScript = '' - start_all() - server.wait_for_open_port(6666) - client.wait_for_unit("network.target") - assert "leaps" in client.succeed( - "${pkgs.curl}/bin/curl -f http://server:6666/leaps/" - ) - ''; - } -) + testScript = '' + start_all() + server.wait_for_open_port(6666) + client.wait_for_unit("network.target") + assert "leaps" in client.succeed( + "${pkgs.curl}/bin/curl -f http://server:6666/leaps/" + ) + ''; +} diff --git a/nixos/tests/legit.nix b/nixos/tests/legit.nix index 405c69a52fad..d107e74fedd9 100644 --- a/nixos/tests/legit.nix +++ b/nixos/tests/legit.nix @@ -1,59 +1,57 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - port = 5000; - scanPath = "/var/lib/legit"; - in - { - name = "legit-web"; - meta.maintainers = [ lib.maintainers.ratsclub ]; +{ lib, pkgs, ... }: +let + port = 5000; + scanPath = "/var/lib/legit"; +in +{ + name = "legit-web"; + meta.maintainers = [ lib.maintainers.ratsclub ]; - nodes = { - server = - { config, pkgs, ... }: - { - services.legit = { - enable = true; - settings = { - server.port = 5000; - repo = { inherit scanPath; }; - }; + nodes = { + server = + { config, pkgs, ... }: + { + services.legit = { + enable = true; + settings = { + server.port = 5000; + repo = { inherit scanPath; }; }; - - environment.systemPackages = [ pkgs.git ]; }; - }; - testScript = - { nodes, ... }: - let - strPort = builtins.toString port; - in - '' - start_all() + environment.systemPackages = [ pkgs.git ]; + }; + }; - server.wait_for_unit("network.target") - server.wait_for_unit("legit.service") + testScript = + { nodes, ... }: + let + strPort = builtins.toString port; + in + '' + start_all() - server.wait_until_succeeds( - "curl -f http://localhost:${strPort}" - ) + server.wait_for_unit("network.target") + server.wait_for_unit("legit.service") - server.succeed("${pkgs.writeShellScript "setup-legit-test-repo" '' - set -e - git init --bare -b master ${scanPath}/some-repo - git init -b master reference - cd reference - git remote add origin ${scanPath}/some-repo - date > date.txt - git add date.txt - git -c user.name=test -c user.email=test@localhost commit -m 'add date' - git push -u origin master - ''}") + server.wait_until_succeeds( + "curl -f http://localhost:${strPort}" + ) - server.wait_until_succeeds( - "curl -f http://localhost:${strPort}/some-repo" - ) - ''; - } -) + server.succeed("${pkgs.writeShellScript "setup-legit-test-repo" '' + set -e + git init --bare -b master ${scanPath}/some-repo + git init -b master reference + cd reference + git remote add origin ${scanPath}/some-repo + date > date.txt + git add date.txt + git -c user.name=test -c user.email=test@localhost commit -m 'add date' + git push -u origin master + ''}") + + server.wait_until_succeeds( + "curl -f http://localhost:${strPort}/some-repo" + ) + ''; +} diff --git a/nixos/tests/lemmy.nix b/nixos/tests/lemmy.nix index 5dc8cf107316..31eef526f29c 100644 --- a/nixos/tests/lemmy.nix +++ b/nixos/tests/lemmy.nix @@ -1,104 +1,102 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - uiPort = 1234; - backendPort = 5678; - lemmyNodeName = "server"; - in - { - name = "lemmy"; - meta = with lib.maintainers; { - maintainers = [ mightyiam ]; - }; +{ pkgs, lib, ... }: +let + uiPort = 1234; + backendPort = 5678; + lemmyNodeName = "server"; +in +{ + name = "lemmy"; + meta = with lib.maintainers; { + maintainers = [ mightyiam ]; + }; - nodes = { - client = { }; + nodes = { + client = { }; - "${lemmyNodeName}" = { - services.lemmy = { - enable = true; - ui.port = uiPort; - database.createLocally = true; - settings = { - hostname = "http://${lemmyNodeName}"; - port = backendPort; - # Without setup, the /feeds/* and /nodeinfo/* API endpoints won't return 200 - setup = { - admin_username = "mightyiam"; - site_name = "Lemmy FTW"; - admin_email = "mightyiam@example.com"; - }; + "${lemmyNodeName}" = { + services.lemmy = { + enable = true; + ui.port = uiPort; + database.createLocally = true; + settings = { + hostname = "http://${lemmyNodeName}"; + port = backendPort; + # Without setup, the /feeds/* and /nodeinfo/* API endpoints won't return 200 + setup = { + admin_username = "mightyiam"; + site_name = "Lemmy FTW"; + admin_email = "mightyiam@example.com"; }; - adminPasswordFile = /etc/lemmy-admin-password.txt; - caddy.enable = true; }; - - environment.etc."lemmy-admin-password.txt".text = "ThisIsWhatIUseEverywhereTryIt"; - - networking.firewall.allowedTCPPorts = [ 80 ]; - - # pict-rs seems to need more than 1025114112 bytes - virtualisation.memorySize = 2000; + adminPasswordFile = /etc/lemmy-admin-password.txt; + caddy.enable = true; }; + + environment.etc."lemmy-admin-password.txt".text = "ThisIsWhatIUseEverywhereTryIt"; + + networking.firewall.allowedTCPPorts = [ 80 ]; + + # pict-rs seems to need more than 1025114112 bytes + virtualisation.memorySize = 2000; }; + }; - testScript = '' - server = ${lemmyNodeName} + testScript = '' + server = ${lemmyNodeName} - with subtest("the merged config is secure"): - server.wait_for_unit("lemmy.service") - config_permissions = server.succeed("stat --format %A /run/lemmy/config.hjson").rstrip() - assert config_permissions == "-rw-------", f"merged config permissions {config_permissions} are insecure" - directory_permissions = server.succeed("stat --format %A /run/lemmy").rstrip() - assert directory_permissions[5] == directory_permissions[8] == "-", "merged config can be replaced" + with subtest("the merged config is secure"): + server.wait_for_unit("lemmy.service") + config_permissions = server.succeed("stat --format %A /run/lemmy/config.hjson").rstrip() + assert config_permissions == "-rw-------", f"merged config permissions {config_permissions} are insecure" + directory_permissions = server.succeed("stat --format %A /run/lemmy").rstrip() + assert directory_permissions[5] == directory_permissions[8] == "-", "merged config can be replaced" - with subtest("the backend starts and responds"): - server.wait_for_open_port(${toString backendPort}) - # wait until succeeds, it just needs few seconds for migrations, but lets give it 50s max - server.wait_until_succeeds("curl --fail localhost:${toString backendPort}/api/v3/site", 50) + with subtest("the backend starts and responds"): + server.wait_for_open_port(${toString backendPort}) + # wait until succeeds, it just needs few seconds for migrations, but lets give it 50s max + server.wait_until_succeeds("curl --fail localhost:${toString backendPort}/api/v3/site", 50) - with subtest("the UI starts and responds"): - server.wait_for_unit("lemmy-ui.service") - server.wait_for_open_port(${toString uiPort}) - server.succeed("curl --fail localhost:${toString uiPort}") + with subtest("the UI starts and responds"): + server.wait_for_unit("lemmy-ui.service") + server.wait_for_open_port(${toString uiPort}) + server.succeed("curl --fail localhost:${toString uiPort}") - with subtest("Lemmy-UI responds through the caddy reverse proxy"): - server.systemctl("start network-online.target") - server.wait_for_unit("network-online.target") - server.wait_for_unit("caddy.service") - server.wait_for_open_port(80) - body = server.execute("curl --fail --location ${lemmyNodeName}")[1] - assert "Lemmy" in body, f"String Lemmy not found in response for ${lemmyNodeName}: \n{body}" + with subtest("Lemmy-UI responds through the caddy reverse proxy"): + server.systemctl("start network-online.target") + server.wait_for_unit("network-online.target") + server.wait_for_unit("caddy.service") + server.wait_for_open_port(80) + body = server.execute("curl --fail --location ${lemmyNodeName}")[1] + assert "Lemmy" in body, f"String Lemmy not found in response for ${lemmyNodeName}: \n{body}" - with subtest("the server is exposed externally"): - client.systemctl("start network-online.target") - client.wait_for_unit("network-online.target") - client.succeed("curl -v --fail ${lemmyNodeName}") + with subtest("the server is exposed externally"): + client.systemctl("start network-online.target") + client.wait_for_unit("network-online.target") + client.succeed("curl -v --fail ${lemmyNodeName}") - with subtest("caddy correctly routes backend requests"): - # Make sure we are not hitting frontend - server.execute("systemctl stop lemmy-ui.service") + with subtest("caddy correctly routes backend requests"): + # Make sure we are not hitting frontend + server.execute("systemctl stop lemmy-ui.service") - def assert_http_code(url, expected_http_code, extra_curl_args=""): - _, http_code = server.execute(f'curl --location --silent -o /dev/null {extra_curl_args} --fail --write-out "%{{http_code}}" {url}') - assert http_code == str(expected_http_code), f"expected http code {expected_http_code}, got {http_code}" + def assert_http_code(url, expected_http_code, extra_curl_args=""): + _, http_code = server.execute(f'curl --location --silent -o /dev/null {extra_curl_args} --fail --write-out "%{{http_code}}" {url}') + assert http_code == str(expected_http_code), f"expected http code {expected_http_code}, got {http_code}" - # Caddy responds with HTTP code 502 if it cannot handle the requested path - assert_http_code("${lemmyNodeName}/obviously-wrong-path/", 502) + # Caddy responds with HTTP code 502 if it cannot handle the requested path + assert_http_code("${lemmyNodeName}/obviously-wrong-path/", 502) - assert_http_code("${lemmyNodeName}/static/js/client.js", 200) - assert_http_code("${lemmyNodeName}/api/v3/site", 200) + assert_http_code("${lemmyNodeName}/static/js/client.js", 200) + assert_http_code("${lemmyNodeName}/api/v3/site", 200) - # A 404 confirms that the request goes to the backend - # No path can return 200 until after we upload an image to pict-rs - assert_http_code("${lemmyNodeName}/pictrs/", 404) + # A 404 confirms that the request goes to the backend + # No path can return 200 until after we upload an image to pict-rs + assert_http_code("${lemmyNodeName}/pictrs/", 404) - assert_http_code("${lemmyNodeName}/feeds/all.xml", 200) - assert_http_code("${lemmyNodeName}/nodeinfo/2.0.json", 200) + assert_http_code("${lemmyNodeName}/feeds/all.xml", 200) + assert_http_code("${lemmyNodeName}/nodeinfo/2.0.json", 200) - assert_http_code("${lemmyNodeName}/some-other-made-up-path/", 404, "-X POST") - assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/activity+json'") - assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"'") - ''; - } -) + assert_http_code("${lemmyNodeName}/some-other-made-up-path/", 404, "-X POST") + assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/activity+json'") + assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"'") + ''; +} diff --git a/nixos/tests/libinput.nix b/nixos/tests/libinput.nix index 32858c80da85..47084fca53d2 100644 --- a/nixos/tests/libinput.nix +++ b/nixos/tests/libinput.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { ... }: +{ ... }: - { - name = "libinput"; +{ + name = "libinput"; - nodes.machine = - { ... }: - { - imports = [ - ./common/x11.nix - ./common/user-account.nix - ]; + nodes.machine = + { ... }: + { + imports = [ + ./common/x11.nix + ./common/user-account.nix + ]; - test-support.displayManager.auto.user = "alice"; + test-support.displayManager.auto.user = "alice"; - services.libinput = { - enable = true; - mouse = { - naturalScrolling = true; - leftHanded = true; - middleEmulation = false; - horizontalScrolling = false; - }; + services.libinput = { + enable = true; + mouse = { + naturalScrolling = true; + leftHanded = true; + middleEmulation = false; + horizontalScrolling = false; }; }; + }; - testScript = '' - def expect_xserver_option(option, value): - machine.succeed(f"""cat /var/log/X.0.log | grep -F 'Option "{option}" "{value}"'""") + testScript = '' + def expect_xserver_option(option, value): + machine.succeed(f"""cat /var/log/X.0.log | grep -F 'Option "{option}" "{value}"'""") - machine.start() - machine.wait_for_x() - machine.succeed("""cat /var/log/X.0.log | grep -F "Using input driver 'libinput'" """) - expect_xserver_option("NaturalScrolling", "on") - expect_xserver_option("LeftHanded", "on") - expect_xserver_option("MiddleEmulation", "off") - expect_xserver_option("HorizontalScrolling", "off") - ''; - } -) + machine.start() + machine.wait_for_x() + machine.succeed("""cat /var/log/X.0.log | grep -F "Using input driver 'libinput'" """) + expect_xserver_option("NaturalScrolling", "on") + expect_xserver_option("LeftHanded", "on") + expect_xserver_option("MiddleEmulation", "off") + expect_xserver_option("HorizontalScrolling", "off") + ''; +} diff --git a/nixos/tests/libresprite.nix b/nixos/tests/libresprite.nix index d03911788750..76417b80ee2e 100644 --- a/nixos/tests/libresprite.nix +++ b/nixos/tests/libresprite.nix @@ -1,34 +1,32 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "libresprite"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "libresprite"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ + pkgs.imagemagick + pkgs.libresprite + ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ - pkgs.imagemagick - pkgs.libresprite - ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - machine.succeed("convert -font DejaVu-Sans +antialias label:'IT WORKS' image.png") - machine.execute("libresprite image.png >&2 &") - machine.wait_for_window("LibreSprite ${pkgs.libresprite.version}-dev") - machine.wait_for_text("IT WORKS") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.succeed("convert -font DejaVu-Sans +antialias label:'IT WORKS' image.png") + machine.execute("libresprite image.png >&2 &") + machine.wait_for_window("LibreSprite ${pkgs.libresprite.version}-dev") + machine.wait_for_text("IT WORKS") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/libuiohook.nix b/nixos/tests/libuiohook.nix index bd0cdcc7696a..34a200bd0349 100644 --- a/nixos/tests/libuiohook.nix +++ b/nixos/tests/libuiohook.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "libuiohook"; - meta = with lib.maintainers; { - maintainers = [ anoa ]; +{ pkgs, lib, ... }: +{ + name = "libuiohook"; + meta = with lib.maintainers; { + maintainers = [ anoa ]; + }; + + nodes.client = + { nodes, ... }: + let + user = nodes.client.config.users.users.alice; + in + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; + + environment.systemPackages = [ pkgs.libuiohook.test ]; + + test-support.displayManager.auto.user = user.name; }; - nodes.client = - { nodes, ... }: - let - user = nodes.client.config.users.users.alice; - in - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; - - environment.systemPackages = [ pkgs.libuiohook.test ]; - - test-support.displayManager.auto.user = user.name; - }; - - testScript = - { nodes, ... }: - let - user = nodes.client.config.users.users.alice; - in - '' - client.wait_for_x() - client.succeed("su - alice -c ${pkgs.libuiohook.test}/share/uiohook_tests >&2 &") - ''; - } -) + testScript = + { nodes, ... }: + let + user = nodes.client.config.users.users.alice; + in + '' + client.wait_for_x() + client.succeed("su - alice -c ${pkgs.libuiohook.test}/share/uiohook_tests >&2 &") + ''; +} diff --git a/nixos/tests/libvirtd.nix b/nixos/tests/libvirtd.nix index 3f996cdcde4a..d7df4c560177 100644 --- a/nixos/tests/libvirtd.nix +++ b/nixos/tests/libvirtd.nix @@ -1,78 +1,76 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "libvirtd"; - meta.maintainers = with pkgs.lib.maintainers; [ fpletz ]; +{ pkgs, ... }: +{ + name = "libvirtd"; + meta.maintainers = with pkgs.lib.maintainers; [ fpletz ]; - nodes = { - virthost = - { pkgs, ... }: - { - virtualisation = { - cores = 2; - memorySize = 2048; + nodes = { + virthost = + { pkgs, ... }: + { + virtualisation = { + cores = 2; + memorySize = 2048; - libvirtd.enable = true; - libvirtd.hooks.qemu.is_working = "${pkgs.writeShellScript "testHook.sh" '' - touch /tmp/qemu_hook_is_working - ''}"; - libvirtd.nss.enable = true; - }; - boot.supportedFilesystems = [ "zfs" ]; - networking.hostId = "deadbeef"; # needed for zfs - security.polkit.enable = true; - environment.systemPackages = with pkgs; [ virt-manager ]; - - # This adds `resolve` to the `hosts` line of /etc/nsswitch.conf; NSS modules placed after it - # will not be consulted. Therefore this tests that the libvirtd NSS modules will be - # be placed early enough for name resolution to work. - services.resolved.enable = true; + libvirtd.enable = true; + libvirtd.hooks.qemu.is_working = "${pkgs.writeShellScript "testHook.sh" '' + touch /tmp/qemu_hook_is_working + ''}"; + libvirtd.nss.enable = true; }; - }; + boot.supportedFilesystems = [ "zfs" ]; + networking.hostId = "deadbeef"; # needed for zfs + security.polkit.enable = true; + environment.systemPackages = with pkgs; [ virt-manager ]; - testScript = - let - nixosInstallISO = (import ../release.nix { }).iso_minimal.${pkgs.stdenv.hostPlatform.system}; - virshShutdownCmd = if pkgs.stdenv.hostPlatform.isx86_64 then "shutdown" else "destroy"; - in - '' - start_all() + # This adds `resolve` to the `hosts` line of /etc/nsswitch.conf; NSS modules placed after it + # will not be consulted. Therefore this tests that the libvirtd NSS modules will be + # be placed early enough for name resolution to work. + services.resolved.enable = true; + }; + }; + testScript = + let + nixosInstallISO = (import ../release.nix { }).iso_minimal.${pkgs.stdenv.hostPlatform.system}; + virshShutdownCmd = if pkgs.stdenv.hostPlatform.isx86_64 then "shutdown" else "destroy"; + in + '' + start_all() + + virthost.wait_for_unit("multi-user.target") + + with subtest("enable default network"): + virthost.succeed("virsh net-start default") + virthost.succeed("virsh net-autostart default") + virthost.succeed("virsh net-info default") + + with subtest("check if partition disk pools works with parted"): + virthost.succeed("fallocate -l100m /tmp/foo; losetup /dev/loop0 /tmp/foo; echo 'label: dos' | sfdisk /dev/loop0") + virthost.succeed("virsh pool-create-as foo disk --source-dev /dev/loop0 --target /dev") + virthost.succeed("virsh vol-create-as foo loop0p1 25MB") + virthost.succeed("virsh vol-create-as foo loop0p2 50MB") + + with subtest("check if virsh zfs pools work"): + virthost.succeed("fallocate -l100m /tmp/zfs; losetup /dev/loop1 /tmp/zfs;") + virthost.succeed("zpool create zfs_loop /dev/loop1") + virthost.succeed("virsh pool-define-as --name zfs_storagepool --source-name zfs_loop --type zfs") + virthost.succeed("virsh pool-start zfs_storagepool") + virthost.succeed("virsh vol-create-as zfs_storagepool disk1 25MB") + + with subtest("check if nixos install iso boots, network and autostart works"): + virthost.succeed( + "virt-install -n nixos --osinfo nixos-unstable --memory 1024 --graphics none --disk `find ${nixosInstallISO}/iso -type f | head -n1`,readonly=on --import --noautoconsole --autostart" + ) + virthost.succeed("virsh domstate nixos | grep running") + virthost.wait_until_succeeds("ping -c 1 nixos") + virthost.succeed("virsh ${virshShutdownCmd} nixos") + virthost.wait_until_succeeds("virsh domstate nixos | grep 'shut off'") + virthost.shutdown() virthost.wait_for_unit("multi-user.target") + virthost.wait_until_succeeds("ping -c 1 nixos") - with subtest("enable default network"): - virthost.succeed("virsh net-start default") - virthost.succeed("virsh net-autostart default") - virthost.succeed("virsh net-info default") - - with subtest("check if partition disk pools works with parted"): - virthost.succeed("fallocate -l100m /tmp/foo; losetup /dev/loop0 /tmp/foo; echo 'label: dos' | sfdisk /dev/loop0") - virthost.succeed("virsh pool-create-as foo disk --source-dev /dev/loop0 --target /dev") - virthost.succeed("virsh vol-create-as foo loop0p1 25MB") - virthost.succeed("virsh vol-create-as foo loop0p2 50MB") - - with subtest("check if virsh zfs pools work"): - virthost.succeed("fallocate -l100m /tmp/zfs; losetup /dev/loop1 /tmp/zfs;") - virthost.succeed("zpool create zfs_loop /dev/loop1") - virthost.succeed("virsh pool-define-as --name zfs_storagepool --source-name zfs_loop --type zfs") - virthost.succeed("virsh pool-start zfs_storagepool") - virthost.succeed("virsh vol-create-as zfs_storagepool disk1 25MB") - - with subtest("check if nixos install iso boots, network and autostart works"): - virthost.succeed( - "virt-install -n nixos --osinfo nixos-unstable --memory 1024 --graphics none --disk `find ${nixosInstallISO}/iso -type f | head -n1`,readonly=on --import --noautoconsole --autostart" - ) - virthost.succeed("virsh domstate nixos | grep running") - virthost.wait_until_succeeds("ping -c 1 nixos") - virthost.succeed("virsh ${virshShutdownCmd} nixos") - virthost.wait_until_succeeds("virsh domstate nixos | grep 'shut off'") - virthost.shutdown() - virthost.wait_for_unit("multi-user.target") - virthost.wait_until_succeeds("ping -c 1 nixos") - - with subtest("test if hooks are linked and run"): - virthost.succeed("ls /var/lib/libvirt/hooks/qemu.d/is_working") - virthost.succeed("ls /tmp/qemu_hook_is_working") - ''; - } -) + with subtest("test if hooks are linked and run"): + virthost.succeed("ls /var/lib/libvirt/hooks/qemu.d/is_working") + virthost.succeed("ls /tmp/qemu_hook_is_working") + ''; +} diff --git a/nixos/tests/lidarr.nix b/nixos/tests/lidarr.nix index 411d4a782390..8f32ce14673f 100644 --- a/nixos/tests/lidarr.nix +++ b/nixos/tests/lidarr.nix @@ -1,22 +1,20 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "lidarr"; - meta.maintainers = with lib.maintainers; [ etu ]; +{ + name = "lidarr"; + meta.maintainers = with lib.maintainers; [ etu ]; - nodes.machine = - { pkgs, ... }: - { - services.lidarr.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.lidarr.enable = true; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("lidarr.service") - machine.wait_for_open_port(8686) - machine.succeed("curl --fail http://localhost:8686/") - ''; - } -) + machine.wait_for_unit("lidarr.service") + machine.wait_for_open_port(8686) + machine.succeed("curl --fail http://localhost:8686/") + ''; +} diff --git a/nixos/tests/lightdm.nix b/nixos/tests/lightdm.nix index 8490f0a6ffaa..3d51924af8de 100644 --- a/nixos/tests/lightdm.nix +++ b/nixos/tests/lightdm.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "lightdm"; - meta = with pkgs.lib.maintainers; { - maintainers = [ aszlig ]; +{ pkgs, ... }: +{ + name = "lightdm"; + meta = with pkgs.lib.maintainers; { + maintainers = [ aszlig ]; + }; + + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.xserver.displayManager.lightdm.enable = true; + services.displayManager.defaultSession = "none+icewm"; + services.xserver.windowManager.icewm.enable = true; }; - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.xserver.displayManager.lightdm.enable = true; - services.displayManager.defaultSession = "none+icewm"; - services.xserver.windowManager.icewm.enable = true; - }; + enableOCR = true; - enableOCR = true; - - testScript = - { nodes, ... }: - let - user = nodes.machine.config.users.users.alice; - in - '' - start_all() - machine.wait_for_text("${user.description}") - machine.screenshot("lightdm") - machine.send_chars("${user.password}\n") - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") - machine.wait_for_window("^IceWM ") - ''; - } -) + testScript = + { nodes, ... }: + let + user = nodes.machine.config.users.users.alice; + in + '' + start_all() + machine.wait_for_text("${user.description}") + machine.screenshot("lightdm") + machine.send_chars("${user.password}\n") + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") + machine.wait_for_window("^IceWM ") + ''; +} diff --git a/nixos/tests/limesurvey.nix b/nixos/tests/limesurvey.nix index af6092b40193..9f90b4d16820 100644 --- a/nixos/tests/limesurvey.nix +++ b/nixos/tests/limesurvey.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "limesurvey"; - meta.maintainers = [ lib.maintainers.aanderse ]; +{ lib, pkgs, ... }: +{ + name = "limesurvey"; + meta.maintainers = [ lib.maintainers.aanderse ]; - nodes.machine = - { ... }: - { - services.limesurvey = { - enable = true; - virtualHost = { - hostName = "example.local"; - adminAddr = "root@example.local"; - }; - encryptionKeyFile = pkgs.writeText "key" (lib.strings.replicate 32 "0"); - encryptionNonceFile = pkgs.writeText "nonce" (lib.strings.replicate 24 "0"); + nodes.machine = + { ... }: + { + services.limesurvey = { + enable = true; + virtualHost = { + hostName = "example.local"; + adminAddr = "root@example.local"; }; - - # limesurvey won't work without a dot in the hostname - networking.hosts."127.0.0.1" = [ "example.local" ]; + encryptionKeyFile = pkgs.writeText "key" (lib.strings.replicate 32 "0"); + encryptionNonceFile = pkgs.writeText "nonce" (lib.strings.replicate 24 "0"); }; - testScript = '' - start_all() + # limesurvey won't work without a dot in the hostname + networking.hosts."127.0.0.1" = [ "example.local" ]; + }; - machine.wait_for_unit("phpfpm-limesurvey.service") - assert "The following surveys are available" in machine.succeed( - "curl -f http://example.local/" - ) - ''; - } -) + testScript = '' + start_all() + + machine.wait_for_unit("phpfpm-limesurvey.service") + assert "The following surveys are available" in machine.succeed( + "curl -f http://example.local/" + ) + ''; +} diff --git a/nixos/tests/litestream.nix b/nixos/tests/litestream.nix index 24fe804f3a9e..1ecf08cf5014 100644 --- a/nixos/tests/litestream.nix +++ b/nixos/tests/litestream.nix @@ -1,114 +1,112 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "litestream"; - meta = with pkgs.lib.maintainers; { - maintainers = [ jwygoda ]; - }; +{ pkgs, ... }: +{ + name = "litestream"; + meta = with pkgs.lib.maintainers; { + maintainers = [ jwygoda ]; + }; - nodes.machine = - { pkgs, ... }: - { - services.litestream = { - enable = true; - settings = { - dbs = [ - { - path = "/var/lib/grafana/data/grafana.db"; - replicas = [ - { - url = "sftp://foo:bar@127.0.0.1:22/home/foo/grafana"; - } - ]; - } - ]; - }; - }; - systemd.services.grafana.serviceConfig.ExecStartPost = - "+" - + pkgs.writeShellScript "grant-grafana-permissions" '' - timeout=10 - - while [ ! -f /var/lib/grafana/data/grafana.db ]; - do - if [ "$timeout" == 0 ]; then - echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db." - exit 1 - fi - - sleep 1 - - ((timeout--)) - done - - find /var/lib/grafana -type d -exec chmod -v 775 {} \; - find /var/lib/grafana -type f -exec chmod -v 660 {} \; - ''; - services.openssh = { - enable = true; - allowSFTP = true; - listenAddresses = [ + nodes.machine = + { pkgs, ... }: + { + services.litestream = { + enable = true; + settings = { + dbs = [ { - addr = "127.0.0.1"; - port = 22; + path = "/var/lib/grafana/data/grafana.db"; + replicas = [ + { + url = "sftp://foo:bar@127.0.0.1:22/home/foo/grafana"; + } + ]; } ]; }; - services.grafana = { - enable = true; - settings = { - security = { - admin_user = "admin"; - admin_password = "admin"; - }; + }; + systemd.services.grafana.serviceConfig.ExecStartPost = + "+" + + pkgs.writeShellScript "grant-grafana-permissions" '' + timeout=10 - server = { - http_addr = "localhost"; - http_port = 3000; - }; + while [ ! -f /var/lib/grafana/data/grafana.db ]; + do + if [ "$timeout" == 0 ]; then + echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db." + exit 1 + fi - database = { - type = "sqlite3"; - path = "/var/lib/grafana/data/grafana.db"; - wal = true; - }; + sleep 1 + + ((timeout--)) + done + + find /var/lib/grafana -type d -exec chmod -v 775 {} \; + find /var/lib/grafana -type f -exec chmod -v 660 {} \; + ''; + services.openssh = { + enable = true; + allowSFTP = true; + listenAddresses = [ + { + addr = "127.0.0.1"; + port = 22; + } + ]; + }; + services.grafana = { + enable = true; + settings = { + security = { + admin_user = "admin"; + admin_password = "admin"; + }; + + server = { + http_addr = "localhost"; + http_port = 3000; + }; + + database = { + type = "sqlite3"; + path = "/var/lib/grafana/data/grafana.db"; + wal = true; }; }; - users.users.foo = { - isNormalUser = true; - password = "bar"; - }; - users.users.litestream.extraGroups = [ "grafana" ]; }; + users.users.foo = { + isNormalUser = true; + password = "bar"; + }; + users.users.litestream.extraGroups = [ "grafana" ]; + }; - testScript = '' - start_all() - machine.wait_until_succeeds("test -d /home/foo/grafana") - machine.wait_for_open_port(3000) - machine.succeed(""" - curl -sSfN -X PUT -H "Content-Type: application/json" -d '{ - "oldPassword": "admin", - "newPassword": "newpass", - "confirmNew": "newpass" - }' http://admin:admin@127.0.0.1:3000/api/user/password - """) - # https://litestream.io/guides/systemd/#simulating-a-disaster - machine.systemctl("stop litestream.service") - machine.succeed( - "rm -f /var/lib/grafana/data/grafana.db " - "/var/lib/grafana/data/grafana.db-shm " - "/var/lib/grafana/data/grafana.db-wal" - ) - machine.succeed( - "litestream restore /var/lib/grafana/data/grafana.db " - "&& chown grafana:grafana /var/lib/grafana/data/grafana.db " - "&& chmod 660 /var/lib/grafana/data/grafana.db" - ) - machine.systemctl("restart grafana.service") - machine.wait_for_open_port(3000) - machine.succeed( - "curl -sSfN -u admin:newpass http://127.0.0.1:3000/api/org/users | grep admin\@localhost" - ) - ''; - } -) + testScript = '' + start_all() + machine.wait_until_succeeds("test -d /home/foo/grafana") + machine.wait_for_open_port(3000) + machine.succeed(""" + curl -sSfN -X PUT -H "Content-Type: application/json" -d '{ + "oldPassword": "admin", + "newPassword": "newpass", + "confirmNew": "newpass" + }' http://admin:admin@127.0.0.1:3000/api/user/password + """) + # https://litestream.io/guides/systemd/#simulating-a-disaster + machine.systemctl("stop litestream.service") + machine.succeed( + "rm -f /var/lib/grafana/data/grafana.db " + "/var/lib/grafana/data/grafana.db-shm " + "/var/lib/grafana/data/grafana.db-wal" + ) + machine.succeed( + "litestream restore /var/lib/grafana/data/grafana.db " + "&& chown grafana:grafana /var/lib/grafana/data/grafana.db " + "&& chmod 660 /var/lib/grafana/data/grafana.db" + ) + machine.systemctl("restart grafana.service") + machine.wait_for_open_port(3000) + machine.succeed( + "curl -sSfN -u admin:newpass http://127.0.0.1:3000/api/org/users | grep admin\@localhost" + ) + ''; +} diff --git a/nixos/tests/livebook-service.nix b/nixos/tests/livebook-service.nix index 140b056c48d4..b84fd894692c 100644 --- a/nixos/tests/livebook-service.nix +++ b/nixos/tests/livebook-service.nix @@ -1,48 +1,46 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "livebook-service"; +{ lib, pkgs, ... }: +{ + name = "livebook-service"; - nodes = { - machine = - { config, pkgs, ... }: - { - imports = [ - ./common/user-account.nix - ]; - - services.livebook = { - enableUserService = true; - environment = { - LIVEBOOK_PORT = 20123; - }; - environmentFile = pkgs.writeText "livebook.env" '' - LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - ''; - }; - }; - }; - - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - sudo = lib.concatStringsSep " " [ - "XDG_RUNTIME_DIR=/run/user/${toString user.uid}" - "sudo" - "--preserve-env=XDG_RUNTIME_DIR" - "-u" - "alice" + nodes = { + machine = + { config, pkgs, ... }: + { + imports = [ + ./common/user-account.nix ]; - in - '' - machine.wait_for_unit("multi-user.target") - machine.succeed("loginctl enable-linger alice") - machine.wait_until_succeeds("${sudo} systemctl --user is-active livebook.service") - machine.wait_for_open_port(20123, timeout=10) + services.livebook = { + enableUserService = true; + environment = { + LIVEBOOK_PORT = 20123; + }; + environmentFile = pkgs.writeText "livebook.env" '' + LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + ''; + }; + }; + }; - machine.succeed("curl -L localhost:20123 | grep 'Type password'") - ''; - } -) + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + sudo = lib.concatStringsSep " " [ + "XDG_RUNTIME_DIR=/run/user/${toString user.uid}" + "sudo" + "--preserve-env=XDG_RUNTIME_DIR" + "-u" + "alice" + ]; + in + '' + machine.wait_for_unit("multi-user.target") + + machine.succeed("loginctl enable-linger alice") + machine.wait_until_succeeds("${sudo} systemctl --user is-active livebook.service") + machine.wait_for_open_port(20123, timeout=10) + + machine.succeed("curl -L localhost:20123 | grep 'Type password'") + ''; +} diff --git a/nixos/tests/lldap.nix b/nixos/tests/lldap.nix index 610bda27b1cc..c2e48525a5f3 100644 --- a/nixos/tests/lldap.nix +++ b/nixos/tests/lldap.nix @@ -1,31 +1,29 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "lldap"; +{ ... }: +{ + name = "lldap"; - nodes.machine = - { pkgs, ... }: - { - services.lldap = { - enable = true; - settings = { - verbose = true; - ldap_base_dn = "dc=example,dc=com"; - }; + nodes.machine = + { pkgs, ... }: + { + services.lldap = { + enable = true; + settings = { + verbose = true; + ldap_base_dn = "dc=example,dc=com"; }; - environment.systemPackages = [ pkgs.openldap ]; }; + environment.systemPackages = [ pkgs.openldap ]; + }; - testScript = '' - machine.wait_for_unit("lldap.service") - machine.wait_for_open_port(3890) - machine.wait_for_open_port(17170) + testScript = '' + machine.wait_for_unit("lldap.service") + machine.wait_for_open_port(3890) + machine.wait_for_open_port(17170) - machine.succeed("curl --location --fail http://localhost:17170/") + machine.succeed("curl --location --fail http://localhost:17170/") - print( - machine.succeed('ldapsearch -H ldap://localhost:3890 -D uid=admin,ou=people,dc=example,dc=com -b "ou=people,dc=example,dc=com" -w password') - ) - ''; - } -) + print( + machine.succeed('ldapsearch -H ldap://localhost:3890 -D uid=admin,ou=people,dc=example,dc=com -b "ou=people,dc=example,dc=com" -w password') + ) + ''; +} diff --git a/nixos/tests/localsend.nix b/nixos/tests/localsend.nix index 551b6dd73ce8..047a3d1598c9 100644 --- a/nixos/tests/localsend.nix +++ b/nixos/tests/localsend.nix @@ -1,22 +1,20 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "localsend"; +{ ... }: +{ + name = "localsend"; - nodes.machine = - { ... }: - { - imports = [ ./common/x11.nix ]; - programs.localsend.enable = true; - }; + nodes.machine = + { ... }: + { + imports = [ ./common/x11.nix ]; + programs.localsend.enable = true; + }; - testScript = '' - machine.wait_for_x() - machine.succeed("localsend_app >&2 &") - machine.wait_for_open_port(53317) - machine.wait_for_window("LocalSend", 10) - machine.succeed("netstat --listening --program --tcp | grep -P 'tcp.*53317.*localsend'") - machine.succeed("netstat --listening --program --udp | grep -P 'udp.*53317.*localsend'") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.succeed("localsend_app >&2 &") + machine.wait_for_open_port(53317) + machine.wait_for_window("LocalSend", 10) + machine.succeed("netstat --listening --program --tcp | grep -P 'tcp.*53317.*localsend'") + machine.succeed("netstat --listening --program --udp | grep -P 'udp.*53317.*localsend'") + ''; +} diff --git a/nixos/tests/locate.nix b/nixos/tests/locate.nix index ce66d7fd3afc..559868128560 100644 --- a/nixos/tests/locate.nix +++ b/nixos/tests/locate.nix @@ -1,66 +1,64 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; - in - { - name = "locate"; - meta.maintainers = with pkgs.lib.maintainers; [ chkno ]; +{ lib, pkgs, ... }: +let + inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; +in +{ + name = "locate"; + meta.maintainers = with pkgs.lib.maintainers; [ chkno ]; - nodes = rec { - a = { - environment.systemPackages = with pkgs; [ sshfs ]; - virtualisation.fileSystems = { - "/ssh" = { - device = "alice@b:/"; - fsType = "fuse.sshfs"; - options = [ - "allow_other" - "IdentityFile=/privkey" - "noauto" - "StrictHostKeyChecking=no" - "UserKnownHostsFile=/dev/null" - ]; - }; - }; - services.locate = { - enable = true; - interval = "*:*:0/5"; + nodes = rec { + a = { + environment.systemPackages = with pkgs; [ sshfs ]; + virtualisation.fileSystems = { + "/ssh" = { + device = "alice@b:/"; + fsType = "fuse.sshfs"; + options = [ + "allow_other" + "IdentityFile=/privkey" + "noauto" + "StrictHostKeyChecking=no" + "UserKnownHostsFile=/dev/null" + ]; }; }; - b = { - services.openssh.enable = true; - users.users.alice = { - isNormalUser = true; - openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; + services.locate = { + enable = true; + interval = "*:*:0/5"; }; }; + b = { + services.openssh.enable = true; + users.users.alice = { + isNormalUser = true; + openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - # Set up sshfs mount - a.succeed( - "(umask 077; cat ${snakeOilPrivateKey} > /privkey)" - ) - b.succeed("touch /file-on-b-machine") - b.wait_for_open_port(22) - a.succeed("mkdir /ssh") - a.succeed("mount /ssh") + # Set up sshfs mount + a.succeed( + "(umask 077; cat ${snakeOilPrivateKey} > /privkey)" + ) + b.succeed("touch /file-on-b-machine") + b.wait_for_open_port(22) + a.succeed("mkdir /ssh") + a.succeed("mount /ssh") - # Core locatedb functionality - a.succeed("touch /file-on-a-machine-1") - a.wait_for_file("/var/cache/locatedb") - a.wait_until_succeeds("locate file-on-a-machine-1") + # Core locatedb functionality + a.succeed("touch /file-on-a-machine-1") + a.wait_for_file("/var/cache/locatedb") + a.wait_until_succeeds("locate file-on-a-machine-1") - # Wait for a second update to make sure we're using a locatedb from a run - # that began after the sshfs mount - a.succeed("touch /file-on-a-machine-2") - a.wait_until_succeeds("locate file-on-a-machine-2") + # Wait for a second update to make sure we're using a locatedb from a run + # that began after the sshfs mount + a.succeed("touch /file-on-a-machine-2") + a.wait_until_succeeds("locate file-on-a-machine-2") - # We shouldn't be able to see files on the other machine - a.fail("locate file-on-b-machine") - ''; - } -) + # We shouldn't be able to see files on the other machine + a.fail("locate file-on-b-machine") + ''; +} diff --git a/nixos/tests/login.nix b/nixos/tests/login.nix index acfb270193ab..b5b710215ca2 100644 --- a/nixos/tests/login.nix +++ b/nixos/tests/login.nix @@ -1,74 +1,72 @@ -import ./make-test-python.nix ( - { - pkgs, - latestKernel ? false, - ... - }: +{ + pkgs, + latestKernel ? false, + ... +}: - { - name = "login"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; +{ + name = "login"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + + nodes.machine = + { pkgs, lib, ... }: + { + boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest; }; - nodes.machine = - { pkgs, lib, ... }: - { - boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest; - }; + testScript = '' + machine.start(allow_reboot = True) - testScript = '' - machine.start(allow_reboot = True) + machine.wait_for_unit("multi-user.target") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + machine.screenshot("postboot") - machine.wait_for_unit("multi-user.target") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - machine.screenshot("postboot") + with subtest("create user"): + machine.succeed("useradd -m alice") + machine.succeed("(echo foobar; echo foobar) | passwd alice") - with subtest("create user"): - machine.succeed("useradd -m alice") - machine.succeed("(echo foobar; echo foobar) | passwd alice") + with subtest("Check whether switching VTs works"): + machine.fail("pgrep -f 'agetty.*tty2'") + machine.send_key("alt-f2") + machine.wait_until_succeeds("[ $(fgconsole) = 2 ]") + machine.wait_for_unit("getty@tty2.service") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'") - with subtest("Check whether switching VTs works"): - machine.fail("pgrep -f 'agetty.*tty2'") - machine.send_key("alt-f2") - machine.wait_until_succeeds("[ $(fgconsole) = 2 ]") - machine.wait_for_unit("getty@tty2.service") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'") + with subtest("Log in as alice on a virtual console"): + machine.wait_until_tty_matches("2", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("2", "login: alice") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches("2", "Password: ") + machine.send_chars("foobar\n") + machine.wait_until_succeeds("pgrep -u alice bash") + machine.send_chars("touch done\n") + machine.wait_for_file("/home/alice/done") - with subtest("Log in as alice on a virtual console"): - machine.wait_until_tty_matches("2", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("2", "login: alice") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches("2", "Password: ") - machine.send_chars("foobar\n") - machine.wait_until_succeeds("pgrep -u alice bash") - machine.send_chars("touch done\n") - machine.wait_for_file("/home/alice/done") + with subtest("Systemd gives and removes device ownership as needed"): + machine.succeed("getfacl /dev/snd/timer | grep -q alice") + machine.send_key("alt-f1") + machine.wait_until_succeeds("[ $(fgconsole) = 1 ]") + machine.fail("getfacl /dev/snd/timer | grep -q alice") + machine.succeed("chvt 2") + machine.wait_until_succeeds("getfacl /dev/snd/timer | grep -q alice") - with subtest("Systemd gives and removes device ownership as needed"): - machine.succeed("getfacl /dev/snd/timer | grep -q alice") - machine.send_key("alt-f1") - machine.wait_until_succeeds("[ $(fgconsole) = 1 ]") - machine.fail("getfacl /dev/snd/timer | grep -q alice") - machine.succeed("chvt 2") - machine.wait_until_succeeds("getfacl /dev/snd/timer | grep -q alice") + with subtest("Virtual console logout"): + machine.send_chars("exit\n") + machine.wait_until_fails("pgrep -u alice bash") + machine.screenshot("getty") - with subtest("Virtual console logout"): - machine.send_chars("exit\n") - machine.wait_until_fails("pgrep -u alice bash") - machine.screenshot("getty") + with subtest("Check whether ctrl-alt-delete works"): + boot_id1 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip() + assert boot_id1 != "" - with subtest("Check whether ctrl-alt-delete works"): - boot_id1 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip() - assert boot_id1 != "" + machine.reboot() - machine.reboot() + boot_id2 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip() + assert boot_id2 != "" - boot_id2 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip() - assert boot_id2 != "" - - assert boot_id1 != boot_id2 - ''; - } -) + assert boot_id1 != boot_id2 + ''; +} diff --git a/nixos/tests/loki.nix b/nixos/tests/loki.nix index 8f5e23c3ac08..9c08c4312c97 100644 --- a/nixos/tests/loki.nix +++ b/nixos/tests/loki.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "loki"; +{ + name = "loki"; - meta = with lib.maintainers; { - maintainers = [ willibutz ]; - }; + meta = with lib.maintainers; { + maintainers = [ willibutz ]; + }; - nodes.machine = - { ... }: - { - services.loki = { - enable = true; + nodes.machine = + { ... }: + { + services.loki = { + enable = true; - # FIXME(globin) revert to original file when upstream fix released - # configFile = "${pkgs.grafana-loki.src}/cmd/loki/loki-local-config.yaml"; - configFile = pkgs.runCommandNoCC "patched-loki-cfg.yml" { } '' - sed '/metric_aggregation/!b;n;/enable/d' "${pkgs.grafana-loki.src}/cmd/loki/loki-local-config.yaml" > $out - ''; - }; - services.promtail = { - enable = true; - configuration = { - server = { - http_listen_port = 9080; - grpc_listen_port = 0; - }; - clients = [ { url = "http://localhost:3100/loki/api/v1/push"; } ]; - scrape_configs = [ - { - job_name = "system"; - static_configs = [ - { - targets = [ "localhost" ]; - labels = { - job = "varlogs"; - __path__ = "/var/log/*log"; - }; - } - ]; - } - ]; + # FIXME(globin) revert to original file when upstream fix released + # configFile = "${pkgs.grafana-loki.src}/cmd/loki/loki-local-config.yaml"; + configFile = pkgs.runCommandNoCC "patched-loki-cfg.yml" { } '' + sed '/metric_aggregation/!b;n;/enable/d' "${pkgs.grafana-loki.src}/cmd/loki/loki-local-config.yaml" > $out + ''; + }; + services.promtail = { + enable = true; + configuration = { + server = { + http_listen_port = 9080; + grpc_listen_port = 0; }; + clients = [ { url = "http://localhost:3100/loki/api/v1/push"; } ]; + scrape_configs = [ + { + job_name = "system"; + static_configs = [ + { + targets = [ "localhost" ]; + labels = { + job = "varlogs"; + __path__ = "/var/log/*log"; + }; + } + ]; + } + ]; }; }; + }; - testScript = '' - machine.start - machine.wait_for_unit("loki.service") - machine.wait_for_unit("promtail.service") - machine.wait_for_open_port(3100) - machine.wait_for_open_port(9080) - machine.succeed("echo 'Loki Ingestion Test' > /var/log/testlog") - # should not have access to journal unless specified - machine.fail( - "systemctl show --property=SupplementaryGroups promtail | grep -q systemd-journal" - ) - machine.wait_until_succeeds( - "${pkgs.grafana-loki}/bin/logcli --addr='http://localhost:3100' query --no-labels '{job=\"varlogs\",filename=\"/var/log/testlog\"}' | grep -q 'Loki Ingestion Test'" - ) - ''; - } -) + testScript = '' + machine.start + machine.wait_for_unit("loki.service") + machine.wait_for_unit("promtail.service") + machine.wait_for_open_port(3100) + machine.wait_for_open_port(9080) + machine.succeed("echo 'Loki Ingestion Test' > /var/log/testlog") + # should not have access to journal unless specified + machine.fail( + "systemctl show --property=SupplementaryGroups promtail | grep -q systemd-journal" + ) + machine.wait_until_succeeds( + "${pkgs.grafana-loki}/bin/logcli --addr='http://localhost:3100' query --no-labels '{job=\"varlogs\",filename=\"/var/log/testlog\"}' | grep -q 'Loki Ingestion Test'" + ) + ''; +} diff --git a/nixos/tests/luks.nix b/nixos/tests/luks.nix index 15489343b707..685643c4c9dc 100644 --- a/nixos/tests/luks.nix +++ b/nixos/tests/luks.nix @@ -1,81 +1,79 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "luks"; +{ lib, pkgs, ... }: +{ + name = "luks"; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/auto-format-root-device.nix ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/auto-format-root-device.nix ]; - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ - 512 - 512 - ]; - useBootLoader = true; - useEFIBoot = true; - # To boot off the encrypted disk, we need to have a init script which comes from the Nix store - mountHostNixStore = true; - }; - boot.loader.systemd-boot.enable = true; - - boot.kernelParams = lib.mkOverride 5 [ "console=tty1" ]; - - environment.systemPackages = with pkgs; [ cryptsetup ]; - - specialisation = rec { - boot-luks.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - # We have two disks and only type one password - key reuse is in place - cryptroot.device = "/dev/vdb"; - cryptroot2.device = "/dev/vdc"; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - }; - boot-luks-custom-keymap.configuration = lib.mkMerge [ - boot-luks.configuration - { - console.keyMap = "neo"; - } - ]; - }; + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ + 512 + 512 + ]; + useBootLoader = true; + useEFIBoot = true; + # To boot off the encrypted disk, we need to have a init script which comes from the Nix store + mountHostNixStore = true; }; + boot.loader.systemd-boot.enable = true; - enableOCR = true; + boot.kernelParams = lib.mkOverride 5 [ "console=tty1" ]; - testScript = '' - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") - machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -") + environment.systemPackages = with pkgs; [ cryptsetup ]; - # Boot from the encrypted disk - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") - machine.succeed("sync") - machine.crash() + specialisation = rec { + boot-luks.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + # We have two disks and only type one password - key reuse is in place + cryptroot.device = "/dev/vdb"; + cryptroot2.device = "/dev/vdc"; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + }; + boot-luks-custom-keymap.configuration = lib.mkMerge [ + boot-luks.configuration + { + console.keyMap = "neo"; + } + ]; + }; + }; - # Boot and decrypt the disk - machine.start() - machine.wait_for_text("Passphrase for") - machine.send_chars("supersecret\n") - machine.wait_for_unit("multi-user.target") + enableOCR = true; - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + testScript = '' + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") + machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -") - # Boot from the encrypted disk with custom keymap - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-custom-keymap.conf") - machine.succeed("sync") - machine.crash() + # Boot from the encrypted disk + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") + machine.succeed("sync") + machine.crash() - # Boot and decrypt the disk - machine.start() - machine.wait_for_text("Passphrase for") - machine.send_chars("havfkhfrkfl\n") - machine.wait_for_unit("multi-user.target") + # Boot and decrypt the disk + machine.start() + machine.wait_for_text("Passphrase for") + machine.send_chars("supersecret\n") + machine.wait_for_unit("multi-user.target") - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") - ''; - } -) + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + + # Boot from the encrypted disk with custom keymap + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-custom-keymap.conf") + machine.succeed("sync") + machine.crash() + + # Boot and decrypt the disk + machine.start() + machine.wait_for_text("Passphrase for") + machine.send_chars("havfkhfrkfl\n") + machine.wait_for_unit("multi-user.target") + + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/lxd-image-server.nix b/nixos/tests/lxd-image-server.nix index 4b0b1259f404..498f17ef56a2 100644 --- a/nixos/tests/lxd-image-server.nix +++ b/nixos/tests/lxd-image-server.nix @@ -1,102 +1,100 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - lxd-image = import ../release.nix { - configuration = { - # Building documentation makes the test unnecessarily take a longer time: - documentation.enable = lib.mkForce false; - }; +let + lxd-image = import ../release.nix { + configuration = { + # Building documentation makes the test unnecessarily take a longer time: + documentation.enable = lib.mkForce false; }; + }; - lxd-image-metadata = lxd-image.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system}; - lxd-image-rootfs = lxd-image.lxdContainerImage.${pkgs.stdenv.hostPlatform.system}; + lxd-image-metadata = lxd-image.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system}; + lxd-image-rootfs = lxd-image.lxdContainerImage.${pkgs.stdenv.hostPlatform.system}; - in - { - name = "lxd-image-server"; +in +{ + name = "lxd-image-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - mkg20001 - patryk27 + meta = with pkgs.lib.maintainers; { + maintainers = [ + mkg20001 + patryk27 + ]; + }; + + nodes.machine = + { lib, ... }: + { + virtualisation = { + cores = 2; + + memorySize = 2048; + diskSize = 4096; + + lxc.lxcfs.enable = true; + lxd.enable = true; + }; + + security.pki.certificates = [ + (builtins.readFile ./common/acme/server/ca.cert.pem) ]; - }; - nodes.machine = - { lib, ... }: - { - virtualisation = { - cores = 2; + services.nginx = { + enable = true; + }; - memorySize = 2048; - diskSize = 4096; - - lxc.lxcfs.enable = true; - lxd.enable = true; - }; - - security.pki.certificates = [ - (builtins.readFile ./common/acme/server/ca.cert.pem) - ]; - - services.nginx = { + services.lxd-image-server = { + enable = true; + nginx = { enable = true; - }; - - services.lxd-image-server = { - enable = true; - nginx = { - enable = true; - domain = "acme.test"; - }; - }; - - services.nginx.virtualHosts."acme.test" = { - enableACME = false; - sslCertificate = ./common/acme/server/acme.test.cert.pem; - sslCertificateKey = ./common/acme/server/acme.test.key.pem; - }; - - networking.hosts = { - "::1" = [ "acme.test" ]; + domain = "acme.test"; }; }; - testScript = '' - machine.wait_for_unit("sockets.target") - machine.wait_for_unit("lxd.service") - machine.wait_for_file("/var/lib/lxd/unix.socket") + services.nginx.virtualHosts."acme.test" = { + enableACME = false; + sslCertificate = ./common/acme/server/acme.test.cert.pem; + sslCertificateKey = ./common/acme/server/acme.test.key.pem; + }; - # Wait for lxd to settle - machine.succeed("lxd waitready") + networking.hosts = { + "::1" = [ "acme.test" ]; + }; + }; - # lxd expects the pool's directory to already exist - machine.succeed("mkdir /var/lxd-pool") + testScript = '' + machine.wait_for_unit("sockets.target") + machine.wait_for_unit("lxd.service") + machine.wait_for_file("/var/lib/lxd/unix.socket") - machine.succeed( - "lxd init --minimal" - ) + # Wait for lxd to settle + machine.succeed("lxd waitready") - machine.succeed( - "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos" - ) + # lxd expects the pool's directory to already exist + machine.succeed("mkdir /var/lxd-pool") - loc = "/var/www/simplestreams/images/iats/nixos/amd64/default/v1" + machine.succeed( + "lxd init --minimal" + ) - with subtest("push image to server"): - machine.succeed("lxc launch nixos test") - machine.sleep(5) - machine.succeed("lxc stop -f test") - machine.succeed("lxc publish --public test --alias=testimg") - machine.succeed("lxc image export testimg") - machine.succeed("ls >&2") - machine.succeed("mkdir -p " + loc) - machine.succeed("mv *.tar.gz " + loc) + machine.succeed( + "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos" + ) - with subtest("pull image from server"): - machine.succeed("lxc remote add img https://acme.test --protocol=simplestreams") - machine.succeed("lxc image list img: >&2") - ''; - } -) + loc = "/var/www/simplestreams/images/iats/nixos/amd64/default/v1" + + with subtest("push image to server"): + machine.succeed("lxc launch nixos test") + machine.sleep(5) + machine.succeed("lxc stop -f test") + machine.succeed("lxc publish --public test --alias=testimg") + machine.succeed("lxc image export testimg") + machine.succeed("ls >&2") + machine.succeed("mkdir -p " + loc) + machine.succeed("mv *.tar.gz " + loc) + + with subtest("pull image from server"): + machine.succeed("lxc remote add img https://acme.test --protocol=simplestreams") + machine.succeed("lxc image list img: >&2") + ''; +} diff --git a/nixos/tests/lxqt.nix b/nixos/tests/lxqt.nix index f2d5bb513ce8..a685a21536bb 100644 --- a/nixos/tests/lxqt.nix +++ b/nixos/tests/lxqt.nix @@ -1,80 +1,78 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "lxqt"; +{ + name = "lxqt"; - meta.maintainers = lib.teams.lxqt.members ++ [ lib.maintainers.bobby285271 ]; + meta.maintainers = lib.teams.lxqt.members ++ [ lib.maintainers.bobby285271 ]; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ ./common/user-account.nix ]; + { + imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.xserver.desktopManager.lxqt.enable = true; + services.xserver.enable = true; + services.xserver.desktopManager.lxqt.enable = true; - services.displayManager = { - sddm.enable = true; - defaultSession = "lxqt"; - autoLogin = { - enable = true; - user = "alice"; - }; + services.displayManager = { + sddm.enable = true; + defaultSession = "lxqt"; + autoLogin = { + enable = true; + user = "alice"; }; }; + }; - enableOCR = true; + enableOCR = true; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - '' - machine.wait_for_unit("display-manager.service") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + '' + machine.wait_for_unit("display-manager.service") - with subtest("Wait for login"): - machine.wait_for_x() - machine.wait_for_file("/tmp/xauth_*") - machine.succeed("xauth merge /tmp/xauth_*") - machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'") + with subtest("Wait for login"): + machine.wait_for_x() + machine.wait_for_file("/tmp/xauth_*") + machine.succeed("xauth merge /tmp/xauth_*") + machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'") - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Check if LXQt components actually start"): - for i in ["openbox", "lxqt-session", "pcmanfm-qt", "lxqt-panel", "lxqt-runner"]: - machine.wait_until_succeeds(f"pgrep {i}") - machine.wait_for_window("pcmanfm-desktop0") - machine.wait_for_window("lxqt-panel") - machine.wait_for_text("(Computer|Network|Trash)") + with subtest("Check if LXQt components actually start"): + for i in ["openbox", "lxqt-session", "pcmanfm-qt", "lxqt-panel", "lxqt-runner"]: + machine.wait_until_succeeds(f"pgrep {i}") + machine.wait_for_window("pcmanfm-desktop0") + machine.wait_for_window("lxqt-panel") + machine.wait_for_text("(Computer|Network|Trash)") - with subtest("Open QTerminal"): - machine.succeed("su - ${user.name} -c 'DISPLAY=:0 qterminal >&2 &'") - machine.wait_until_succeeds("pgrep qterminal") - machine.wait_for_window("${user.name}@machine: ~") + with subtest("Open QTerminal"): + machine.succeed("su - ${user.name} -c 'DISPLAY=:0 qterminal >&2 &'") + machine.wait_until_succeeds("pgrep qterminal") + machine.wait_for_window("${user.name}@machine: ~") - with subtest("Open PCManFM-Qt"): - machine.succeed("mkdir -p /tmp/test/test") - machine.succeed("su - ${user.name} -c 'DISPLAY=:0 QT_SCALE_FACTOR=2 pcmanfm-qt /tmp/test >&2 &'") - machine.wait_for_window("test") - machine.wait_for_text("(test|Bookmarks|Reload)") + with subtest("Open PCManFM-Qt"): + machine.succeed("mkdir -p /tmp/test/test") + machine.succeed("su - ${user.name} -c 'DISPLAY=:0 QT_SCALE_FACTOR=2 pcmanfm-qt /tmp/test >&2 &'") + machine.wait_for_window("test") + machine.wait_for_text("(test|Bookmarks|Reload)") - with subtest("Check if various environment variables are set"): - cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/lxqt-panel)/environ" - machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP=LXQt'") - machine.succeed(f"{cmd} | grep 'QT_PLATFORM_PLUGIN=lxqt'") - # From login shell. - machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE=1'") - # See the nixos/lxqt module. - machine.succeed(f"{cmd} | grep 'XDG_CONFIG_DIRS' | grep '${nodes.machine.system.path}'") + with subtest("Check if various environment variables are set"): + cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/lxqt-panel)/environ" + machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP=LXQt'") + machine.succeed(f"{cmd} | grep 'QT_PLATFORM_PLUGIN=lxqt'") + # From login shell. + machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE=1'") + # See the nixos/lxqt module. + machine.succeed(f"{cmd} | grep 'XDG_CONFIG_DIRS' | grep '${nodes.machine.system.path}'") - with subtest("Check if any coredumps are found"): - machine.succeed("(coredumpctl --json=short 2>&1 || true) | grep 'No coredumps found'") - machine.sleep(10) - machine.screenshot("screen") - ''; - } -) + with subtest("Check if any coredumps are found"): + machine.succeed("(coredumpctl --json=short 2>&1 || true) | grep 'No coredumps found'") + machine.sleep(10) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/ly.nix b/nixos/tests/ly.nix index 04c6ed9c7774..4791bf8056f2 100644 --- a/nixos/tests/ly.nix +++ b/nixos/tests/ly.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { ... }: +{ ... }: - { - name = "ly"; +{ + name = "ly"; - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; - services.displayManager.ly = { - enable = true; - settings = { - load = false; - save = false; - }; + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; + services.displayManager.ly = { + enable = true; + settings = { + load = false; + save = false; }; - services.xserver.enable = true; - services.displayManager.defaultSession = "none+icewm"; - services.xserver.windowManager.icewm.enable = true; }; + services.xserver.enable = true; + services.displayManager.defaultSession = "none+icewm"; + services.xserver.windowManager.icewm.enable = true; + }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - '' - start_all() - machine.wait_until_tty_matches("2", "password:") - machine.send_key("ctrl-alt-f2") - machine.sleep(1) - machine.screenshot("ly") - machine.send_chars("alice") - machine.send_key("tab") - machine.send_chars("${user.password}") - machine.send_key("ret") - machine.wait_for_file("/run/user/${toString user.uid}/lyxauth") - machine.succeed("xauth merge /run/user/${toString user.uid}/lyxauth") - machine.wait_for_window("^IceWM ") - machine.screenshot("icewm") - ''; - } -) + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + '' + start_all() + machine.wait_until_tty_matches("2", "password:") + machine.send_key("ctrl-alt-f2") + machine.sleep(1) + machine.screenshot("ly") + machine.send_chars("alice") + machine.send_key("tab") + machine.send_chars("${user.password}") + machine.send_key("ret") + machine.wait_for_file("/run/user/${toString user.uid}/lyxauth") + machine.succeed("xauth merge /run/user/${toString user.uid}/lyxauth") + machine.wait_for_window("^IceWM ") + machine.screenshot("icewm") + ''; +} diff --git a/nixos/tests/maestral.nix b/nixos/tests/maestral.nix index aa510897d0d7..9b42b2d59e28 100644 --- a/nixos/tests/maestral.nix +++ b/nixos/tests/maestral.nix @@ -1,83 +1,81 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "maestral"; - meta = with pkgs.lib.maintainers; { - maintainers = [ peterhoeg ]; +{ pkgs, ... }: +{ + name = "maestral"; + meta = with pkgs.lib.maintainers; { + maintainers = [ peterhoeg ]; + }; + + nodes = + let + common = + attrs: + pkgs.lib.recursiveUpdate { + imports = [ ./common/user-account.nix ]; + systemd.user.services.maestral = { + description = "Maestral Dropbox Client"; + serviceConfig.Type = "exec"; + }; + } attrs; + + in + { + cli = + { ... }: + common { + systemd.user.services.maestral = { + wantedBy = [ "default.target" ]; + serviceConfig.ExecStart = "${pkgs.maestral}/bin/maestral start --foreground"; + }; + }; + + gui = + { ... }: + common { + services.xserver = { + enable = true; + desktopManager.plasma5.enable = true; + desktopManager.plasma5.runUsingSystemd = true; + }; + + services.displayManager = { + sddm.enable = true; + defaultSession = "plasma"; + autoLogin = { + enable = true; + user = "alice"; + }; + }; + + systemd.user.services = { + maestral = { + wantedBy = [ "graphical-session.target" ]; + serviceConfig.ExecStart = "${pkgs.maestral-gui}/bin/maestral_qt"; + }; + # PowerDevil doesn't like our VM + plasma-powerdevil.enable = false; + }; + }; }; - nodes = - let - common = - attrs: - pkgs.lib.recursiveUpdate { - imports = [ ./common/user-account.nix ]; - systemd.user.services.maestral = { - description = "Maestral Dropbox Client"; - serviceConfig.Type = "exec"; - }; - } attrs; + testScript = + { nodes, ... }: + let + user = nodes.cli.users.users.alice; + in + '' + start_all() - in - { - cli = - { ... }: - common { - systemd.user.services.maestral = { - wantedBy = [ "default.target" ]; - serviceConfig.ExecStart = "${pkgs.maestral}/bin/maestral start --foreground"; - }; - }; + with subtest("CLI"): + # we need SOME way to give the user an active login session + cli.execute("loginctl enable-linger ${user.name}") + cli.systemctl("start user@${toString user.uid}") + cli.wait_for_unit("maestral.service", "${user.name}") - gui = - { ... }: - common { - services.xserver = { - enable = true; - desktopManager.plasma5.enable = true; - desktopManager.plasma5.runUsingSystemd = true; - }; - - services.displayManager = { - sddm.enable = true; - defaultSession = "plasma"; - autoLogin = { - enable = true; - user = "alice"; - }; - }; - - systemd.user.services = { - maestral = { - wantedBy = [ "graphical-session.target" ]; - serviceConfig.ExecStart = "${pkgs.maestral-gui}/bin/maestral_qt"; - }; - # PowerDevil doesn't like our VM - plasma-powerdevil.enable = false; - }; - }; - }; - - testScript = - { nodes, ... }: - let - user = nodes.cli.users.users.alice; - in - '' - start_all() - - with subtest("CLI"): - # we need SOME way to give the user an active login session - cli.execute("loginctl enable-linger ${user.name}") - cli.systemctl("start user@${toString user.uid}") - cli.wait_for_unit("maestral.service", "${user.name}") - - with subtest("GUI"): - gui.wait_for_x() - gui.wait_for_file("/tmp/xauth_*") - gui.succeed("xauth merge /tmp/xauth_*") - gui.wait_for_window("^Desktop ") - gui.wait_for_unit("maestral.service", "${user.name}") - ''; - } -) + with subtest("GUI"): + gui.wait_for_x() + gui.wait_for_file("/tmp/xauth_*") + gui.succeed("xauth merge /tmp/xauth_*") + gui.wait_for_window("^Desktop ") + gui.wait_for_unit("maestral.service", "${user.name}") + ''; +} diff --git a/nixos/tests/magnetico.nix b/nixos/tests/magnetico.nix index 279d19d87778..294762fe146b 100644 --- a/nixos/tests/magnetico.nix +++ b/nixos/tests/magnetico.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - port = 8081; - in - { - name = "magnetico"; - meta = with pkgs.lib.maintainers; { - maintainers = [ rnhmjoj ]; +let + port = 8081; +in +{ + name = "magnetico"; + meta = with pkgs.lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; + + nodes.machine = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + + networking.firewall.allowedTCPPorts = [ 9000 ]; + + services.magnetico = { + enable = true; + crawler.port = 9000; + web.port = port; + web.credentials.user = "$2y$12$P88ZF6soFthiiAeXnz64aOWDsY3Dw7Yw8fZ6GtiqFNjknD70zDmNe"; + }; }; - nodes.machine = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - - networking.firewall.allowedTCPPorts = [ 9000 ]; - - services.magnetico = { - enable = true; - crawler.port = 9000; - web.port = port; - web.credentials.user = "$2y$12$P88ZF6soFthiiAeXnz64aOWDsY3Dw7Yw8fZ6GtiqFNjknD70zDmNe"; - }; - }; - - testScript = '' - start_all() - machine.wait_for_unit("magneticod") - machine.wait_for_unit("magneticow") - machine.wait_for_open_port(${toString port}) - machine.succeed( - "${pkgs.curl}/bin/curl --fail " - + "-u user:password http://localhost:${toString port}" - ) - machine.fail( - "${pkgs.curl}/bin/curl --fail " - + "-u user:wrongpwd http://localhost:${toString port}" - ) - machine.shutdown() - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("magneticod") + machine.wait_for_unit("magneticow") + machine.wait_for_open_port(${toString port}) + machine.succeed( + "${pkgs.curl}/bin/curl --fail " + + "-u user:password http://localhost:${toString port}" + ) + machine.fail( + "${pkgs.curl}/bin/curl --fail " + + "-u user:wrongpwd http://localhost:${toString port}" + ) + machine.shutdown() + ''; +} diff --git a/nixos/tests/marytts.nix b/nixos/tests/marytts.nix index 4c80cf5cc4e1..101f20662ea5 100644 --- a/nixos/tests/marytts.nix +++ b/nixos/tests/marytts.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { lib, ... }: - let - port = 59126; - in - { - name = "marytts"; - meta.maintainers = with lib.maintainers; [ pluiedev ]; +{ lib, ... }: +let + port = 59126; +in +{ + name = "marytts"; + meta.maintainers = with lib.maintainers; [ pluiedev ]; - nodes.machine = - { pkgs, ... }: - { - networking.firewall.enable = false; - networking.useDHCP = false; + nodes.machine = + { pkgs, ... }: + { + networking.firewall.enable = false; + networking.useDHCP = false; - services.marytts = { - enable = true; - inherit port; + services.marytts = { + enable = true; + inherit port; - voices = [ - (pkgs.fetchzip { - url = "https://github.com/marytts/voice-bits1-hsmm/releases/download/v5.2/voice-bits1-hsmm-5.2.zip"; - hash = "sha256-1nK+qZxjumMev7z5lgKr660NCKH5FDwvZ9sw/YYYeaA="; - }) - ]; + voices = [ + (pkgs.fetchzip { + url = "https://github.com/marytts/voice-bits1-hsmm/releases/download/v5.2/voice-bits1-hsmm-5.2.zip"; + hash = "sha256-1nK+qZxjumMev7z5lgKr660NCKH5FDwvZ9sw/YYYeaA="; + }) + ]; - userDictionaries = [ - (pkgs.writeTextFile { - name = "userdict-en_US.txt"; - destination = "/userdict-en_US.txt"; - text = '' - amogus | @ - ' m @U - g @ s - Nixpkgs | n I k s - ' p { - k @ - dZ @ s - ''; - }) - ]; - }; + userDictionaries = [ + (pkgs.writeTextFile { + name = "userdict-en_US.txt"; + destination = "/userdict-en_US.txt"; + text = '' + amogus | @ - ' m @U - g @ s + Nixpkgs | n I k s - ' p { - k @ - dZ @ s + ''; + }) + ]; }; + }; - testScript = '' - from xml.etree import ElementTree - from urllib.parse import urlencode + testScript = '' + from xml.etree import ElementTree + from urllib.parse import urlencode - machine.wait_for_unit("marytts.service") + machine.wait_for_unit("marytts.service") - with subtest("Checking health of MaryTTS server"): - machine.wait_for_open_port(${toString port}) - assert 'Mary TTS server' in machine.succeed("curl 'localhost:${toString port}/version'") + with subtest("Checking health of MaryTTS server"): + machine.wait_for_open_port(${toString port}) + assert 'Mary TTS server' in machine.succeed("curl 'localhost:${toString port}/version'") - with subtest("Generating example MaryXML"): - query = urlencode({ - 'datatype': 'RAWMARYXML', - 'locale': 'en_US', - }) - xml = machine.succeed(f"curl 'localhost:${toString port}/exampletext?{query}'") - root = ElementTree.fromstring(xml) - text = " ".join(root.itertext()).strip() - assert text == "Welcome to the world of speech synthesis!" + with subtest("Generating example MaryXML"): + query = urlencode({ + 'datatype': 'RAWMARYXML', + 'locale': 'en_US', + }) + xml = machine.succeed(f"curl 'localhost:${toString port}/exampletext?{query}'") + root = ElementTree.fromstring(xml) + text = " ".join(root.itertext()).strip() + assert text == "Welcome to the world of speech synthesis!" - with subtest("Detecting custom voice"): - assert "bits1-hsmm" in machine.succeed("curl 'localhost:${toString port}/voices'") + with subtest("Detecting custom voice"): + assert "bits1-hsmm" in machine.succeed("curl 'localhost:${toString port}/voices'") - with subtest("Finding user dictionary"): - query = urlencode({ - 'INPUT_TEXT': 'amogus', - 'INPUT_TYPE': 'TEXT', - 'OUTPUT_TYPE': 'PHONEMES', - 'LOCALE': 'en_US', - }) - phonemes = machine.succeed(f"curl 'localhost:${toString port}/process?{query}'") - phonemes_tree = ElementTree.fromstring(phonemes) - print([i.get('ph') for i in phonemes_tree.iter('{http://mary.dfki.de/2002/MaryXML}t')]) - assert ["@ - ' m @U - g @ s"] == [i.get('ph') for i in phonemes_tree.iter('{http://mary.dfki.de/2002/MaryXML}t')] + with subtest("Finding user dictionary"): + query = urlencode({ + 'INPUT_TEXT': 'amogus', + 'INPUT_TYPE': 'TEXT', + 'OUTPUT_TYPE': 'PHONEMES', + 'LOCALE': 'en_US', + }) + phonemes = machine.succeed(f"curl 'localhost:${toString port}/process?{query}'") + phonemes_tree = ElementTree.fromstring(phonemes) + print([i.get('ph') for i in phonemes_tree.iter('{http://mary.dfki.de/2002/MaryXML}t')]) + assert ["@ - ' m @U - g @ s"] == [i.get('ph') for i in phonemes_tree.iter('{http://mary.dfki.de/2002/MaryXML}t')] - with subtest("Synthesizing"): - query = urlencode({ - 'INPUT_TEXT': 'Nixpkgs is a collection of over 100,000 software packages that can be installed with the Nix package manager.', - 'INPUT_TYPE': 'TEXT', - 'OUTPUT_TYPE': 'AUDIO', - 'AUDIO': 'WAVE_FILE', - 'LOCALE': 'en_US', - }) - machine.succeed(f"curl 'localhost:${toString port}/process?{query}' -o ./audio.wav") - machine.copy_from_vm("./audio.wav") - ''; - } -) + with subtest("Synthesizing"): + query = urlencode({ + 'INPUT_TEXT': 'Nixpkgs is a collection of over 100,000 software packages that can be installed with the Nix package manager.', + 'INPUT_TYPE': 'TEXT', + 'OUTPUT_TYPE': 'AUDIO', + 'AUDIO': 'WAVE_FILE', + 'LOCALE': 'en_US', + }) + machine.succeed(f"curl 'localhost:${toString port}/process?{query}' -o ./audio.wav") + machine.copy_from_vm("./audio.wav") + ''; +} diff --git a/nixos/tests/mate-wayland.nix b/nixos/tests/mate-wayland.nix index bf0989b2cf1d..42b88439357a 100644 --- a/nixos/tests/mate-wayland.nix +++ b/nixos/tests/mate-wayland.nix @@ -1,67 +1,65 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "mate-wayland"; +{ pkgs, lib, ... }: +{ + name = "mate-wayland"; - meta.maintainers = lib.teams.mate.members; + meta.maintainers = lib.teams.mate.members; - nodes.machine = - { ... }: - { - imports = [ - ./common/user-account.nix - ]; + nodes.machine = + { ... }: + { + imports = [ + ./common/user-account.nix + ]; - services.xserver.enable = true; - services.displayManager = { - sddm.enable = true; # https://github.com/canonical/lightdm/issues/63 - sddm.wayland.enable = true; - defaultSession = "MATE"; - autoLogin = { - enable = true; - user = "alice"; - }; + services.xserver.enable = true; + services.displayManager = { + sddm.enable = true; # https://github.com/canonical/lightdm/issues/63 + sddm.wayland.enable = true; + defaultSession = "MATE"; + autoLogin = { + enable = true; + user = "alice"; }; - services.xserver.desktopManager.mate.enableWaylandSession = true; - - # Need to switch to a different GPU driver than the default one (-vga std) so that wayfire can launch: - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; }; + services.xserver.desktopManager.mate.enableWaylandSession = true; - enableOCR = true; + # Need to switch to a different GPU driver than the default one (-vga std) so that wayfire can launch: + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - '' - machine.wait_for_unit("display-manager.service") + enableOCR = true; - with subtest("Wait for Wayland server"): - machine.wait_for_file("/run/user/${toString user.uid}/wayland-1") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + '' + machine.wait_for_unit("display-manager.service") - with subtest("Check if MATE session components actually start"): - for i in ["wayfire", "mate-panel", "mate-wayland.sh"]: - machine.wait_until_succeeds(f"pgrep {i}") - machine.wait_until_succeeds("pgrep -f mate-wayland-components.sh") - # It is expected that WorkspaceSwitcherApplet doesn't work in Wayland - machine.wait_for_text('(panel|Factory|Workspace|Switcher|Applet|configuration)') + with subtest("Wait for Wayland server"): + machine.wait_for_file("/run/user/${toString user.uid}/wayland-1") - with subtest("Check if various environment variables are set"): - cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf mate-panel)/environ" - machine.succeed(f"{cmd} | grep 'XDG_SESSION_TYPE' | grep 'wayland'") - machine.succeed(f"{cmd} | grep 'XDG_SESSION_DESKTOP' | grep 'MATE'") - machine.succeed(f"{cmd} | grep 'MATE_PANEL_APPLETS_DIR' | grep '${pkgs.mate.mate-panel-with-applets.pname}'") + with subtest("Check if MATE session components actually start"): + for i in ["wayfire", "mate-panel", "mate-wayland.sh"]: + machine.wait_until_succeeds(f"pgrep {i}") + machine.wait_until_succeeds("pgrep -f mate-wayland-components.sh") + # It is expected that WorkspaceSwitcherApplet doesn't work in Wayland + machine.wait_for_text('(panel|Factory|Workspace|Switcher|Applet|configuration)') - with subtest("Check if Wayfire config is properly configured"): - for i in ["button_style = mate", "firedecor", "mate-wayland-components.sh"]: - machine.wait_until_succeeds(f"cat /home/${user.name}/.config/mate/wayfire.ini | grep '{i}'") + with subtest("Check if various environment variables are set"): + cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf mate-panel)/environ" + machine.succeed(f"{cmd} | grep 'XDG_SESSION_TYPE' | grep 'wayland'") + machine.succeed(f"{cmd} | grep 'XDG_SESSION_DESKTOP' | grep 'MATE'") + machine.succeed(f"{cmd} | grep 'MATE_PANEL_APPLETS_DIR' | grep '${pkgs.mate.mate-panel-with-applets.pname}'") - with subtest("Check if Wayfire has ever coredumped"): - machine.fail("coredumpctl --json=short | grep wayfire") - machine.sleep(10) - machine.screenshot("screen") - ''; - } -) + with subtest("Check if Wayfire config is properly configured"): + for i in ["button_style = mate", "firedecor", "mate-wayland-components.sh"]: + machine.wait_until_succeeds(f"cat /home/${user.name}/.config/mate/wayfire.ini | grep '{i}'") + + with subtest("Check if Wayfire has ever coredumped"): + machine.fail("coredumpctl --json=short | grep wayfire") + machine.sleep(10) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/mate.nix b/nixos/tests/mate.nix index d0847be5ffa9..6112044837db 100644 --- a/nixos/tests/mate.nix +++ b/nixos/tests/mate.nix @@ -1,92 +1,90 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "mate"; +{ pkgs, lib, ... }: +{ + name = "mate"; - meta = { - maintainers = lib.teams.mate.members; - }; + meta = { + maintainers = lib.teams.mate.members; + }; - nodes.machine = - { ... }: - { - imports = [ - ./common/user-account.nix - ]; + nodes.machine = + { ... }: + { + imports = [ + ./common/user-account.nix + ]; - services.xserver.enable = true; + services.xserver.enable = true; - services.xserver.displayManager = { - lightdm.enable = true; - autoLogin = { - enable = true; - user = "alice"; - }; + services.xserver.displayManager = { + lightdm.enable = true; + autoLogin = { + enable = true; + user = "alice"; }; - - services.xserver.desktopManager.mate.enable = true; }; - enableOCR = true; + services.xserver.desktopManager.mate.enable = true; + }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - env = "DISPLAY=:0.0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus"; - in - '' - with subtest("Wait for login"): - machine.wait_for_x() - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") + enableOCR = true; - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + env = "DISPLAY=:0.0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus"; + in + '' + with subtest("Wait for login"): + machine.wait_for_x() + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") - with subtest("Check if MATE session components actually start"): - machine.wait_until_succeeds("pgrep marco") - machine.wait_for_window("marco") - machine.wait_until_succeeds("pgrep mate-panel") - machine.wait_for_window("Top Panel") - machine.wait_for_window("Bottom Panel") - machine.wait_until_succeeds("pgrep caja") - machine.wait_for_window("Caja") - machine.wait_for_text('(Applications|Places|System)') - machine.wait_for_text('(Computer|Home|Trash)') + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Check if various environment variables are set"): - machine.succeed("xargs --null --max-args=1 echo < /proc/$(pgrep -xf marco)/environ | grep 'XDG_CURRENT_DESKTOP' | grep 'MATE'") - # From mate-panel-with-applets packaging - machine.succeed("xargs --null --max-args=1 echo < /proc/$(pgrep -xf mate-panel)/environ | grep 'MATE_PANEL_APPLETS_DIR' | grep '${pkgs.mate.mate-panel-with-applets.pname}'") + with subtest("Check if MATE session components actually start"): + machine.wait_until_succeeds("pgrep marco") + machine.wait_for_window("marco") + machine.wait_until_succeeds("pgrep mate-panel") + machine.wait_for_window("Top Panel") + machine.wait_for_window("Bottom Panel") + machine.wait_until_succeeds("pgrep caja") + machine.wait_for_window("Caja") + machine.wait_for_text('(Applications|Places|System)') + machine.wait_for_text('(Computer|Home|Trash)') - with subtest("Check if applets are built with in-process support"): - # This is needed for Wayland support - machine.fail("pgrep -fa clock-applet") + with subtest("Check if various environment variables are set"): + machine.succeed("xargs --null --max-args=1 echo < /proc/$(pgrep -xf marco)/environ | grep 'XDG_CURRENT_DESKTOP' | grep 'MATE'") + # From mate-panel-with-applets packaging + machine.succeed("xargs --null --max-args=1 echo < /proc/$(pgrep -xf mate-panel)/environ | grep 'MATE_PANEL_APPLETS_DIR' | grep '${pkgs.mate.mate-panel-with-applets.pname}'") - with subtest("Lock the screen"): - machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'") - machine.succeed("su - ${user.name} -c '${env} mate-screensaver-command -l >&2 &'") - machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is active'") - machine.sleep(2) - machine.send_chars("${user.password}", delay=0.2) - machine.wait_for_text("${user.description}") - machine.screenshot("screensaver") - machine.send_chars("\n") - machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'") + with subtest("Check if applets are built with in-process support"): + # This is needed for Wayland support + machine.fail("pgrep -fa clock-applet") - with subtest("Open MATE control center"): - machine.succeed("su - ${user.name} -c '${env} mate-control-center >&2 &'") - machine.wait_for_window("Control Center") - machine.wait_for_text('(Groups|Administration|Hardware)') + with subtest("Lock the screen"): + machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'") + machine.succeed("su - ${user.name} -c '${env} mate-screensaver-command -l >&2 &'") + machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is active'") + machine.sleep(2) + machine.send_chars("${user.password}", delay=0.2) + machine.wait_for_text("${user.description}") + machine.screenshot("screensaver") + machine.send_chars("\n") + machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'") - with subtest("Open MATE terminal"): - machine.succeed("su - ${user.name} -c '${env} mate-terminal >&2 &'") - machine.wait_for_window("Terminal") + with subtest("Open MATE control center"): + machine.succeed("su - ${user.name} -c '${env} mate-control-center >&2 &'") + machine.wait_for_window("Control Center") + machine.wait_for_text('(Groups|Administration|Hardware)') - with subtest("Check if MATE has ever coredumped"): - machine.fail("coredumpctl --json=short | grep -E 'mate|marco|caja'") - machine.screenshot("screen") - ''; - } -) + with subtest("Open MATE terminal"): + machine.succeed("su - ${user.name} -c '${env} mate-terminal >&2 &'") + machine.wait_for_window("Terminal") + + with subtest("Check if MATE has ever coredumped"): + machine.fail("coredumpctl --json=short | grep -E 'mate|marco|caja'") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/matrix/conduit.nix b/nixos/tests/matrix/conduit.nix index b38549b23ab4..874e7672812b 100644 --- a/nixos/tests/matrix/conduit.nix +++ b/nixos/tests/matrix/conduit.nix @@ -1,98 +1,96 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - name = "conduit"; - in - { - name = "matrix-conduit"; +{ pkgs, ... }: +let + name = "conduit"; +in +{ + name = "matrix-conduit"; - nodes = { - conduit = args: { - services.matrix-conduit = { - enable = true; - settings.global.server_name = name; - settings.global.allow_registration = true; - extraEnvironment.RUST_BACKTRACE = "yes"; - }; - services.nginx = { - enable = true; - virtualHosts.${name} = { - enableACME = false; - forceSSL = false; - enableSSL = false; + nodes = { + conduit = args: { + services.matrix-conduit = { + enable = true; + settings.global.server_name = name; + settings.global.allow_registration = true; + extraEnvironment.RUST_BACKTRACE = "yes"; + }; + services.nginx = { + enable = true; + virtualHosts.${name} = { + enableACME = false; + forceSSL = false; + enableSSL = false; - locations."/_matrix" = { - proxyPass = "http://[::1]:6167"; - }; + locations."/_matrix" = { + proxyPass = "http://[::1]:6167"; }; }; - networking.firewall.allowedTCPPorts = [ 80 ]; }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } '' - import asyncio - - from nio import AsyncClient - - - async def main() -> None: - # Connect to conduit - client = AsyncClient("http://conduit:80", "alice") - - # Register as user alice - response = await client.register("alice", "my-secret-password") - - # Log in as user alice - response = await client.login("my-secret-password") - - # Create a new room - response = await client.room_create(federate=False) - room_id = response.room_id - - # Join the room - response = await client.join(room_id) - - # Send a message to the room - response = await client.room_send( - room_id=room_id, - message_type="m.room.message", - content={ - "msgtype": "m.text", - "body": "Hello conduit!" - } - ) - - # Sync responses - response = await client.sync(timeout=30000) - - # Check the message was received by conduit - last_message = response.rooms.join[room_id].timeline.events[-1].body - assert last_message == "Hello conduit!" - - # Leave the room - response = await client.room_leave(room_id) - - # Close the client - await client.close() - - asyncio.get_event_loop().run_until_complete(main()) - '') - ]; - }; + networking.firewall.allowedTCPPorts = [ 80 ]; }; + client = + { pkgs, ... }: + { + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } '' + import asyncio - testScript = '' - start_all() + from nio import AsyncClient - with subtest("start conduit"): - conduit.wait_for_unit("conduit.service") - conduit.wait_for_open_port(80) - with subtest("ensure messages can be exchanged"): - client.succeed("do_test") - ''; - } -) + async def main() -> None: + # Connect to conduit + client = AsyncClient("http://conduit:80", "alice") + + # Register as user alice + response = await client.register("alice", "my-secret-password") + + # Log in as user alice + response = await client.login("my-secret-password") + + # Create a new room + response = await client.room_create(federate=False) + room_id = response.room_id + + # Join the room + response = await client.join(room_id) + + # Send a message to the room + response = await client.room_send( + room_id=room_id, + message_type="m.room.message", + content={ + "msgtype": "m.text", + "body": "Hello conduit!" + } + ) + + # Sync responses + response = await client.sync(timeout=30000) + + # Check the message was received by conduit + last_message = response.rooms.join[room_id].timeline.events[-1].body + assert last_message == "Hello conduit!" + + # Leave the room + response = await client.room_leave(room_id) + + # Close the client + await client.close() + + asyncio.get_event_loop().run_until_complete(main()) + '') + ]; + }; + }; + + testScript = '' + start_all() + + with subtest("start conduit"): + conduit.wait_for_unit("conduit.service") + conduit.wait_for_open_port(80) + + with subtest("ensure messages can be exchanged"): + client.succeed("do_test") + ''; +} diff --git a/nixos/tests/matrix/dendrite.nix b/nixos/tests/matrix/dendrite.nix index 9796423a5bb7..4477f522a37b 100644 --- a/nixos/tests/matrix/dendrite.nix +++ b/nixos/tests/matrix/dendrite.nix @@ -1,102 +1,100 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - homeserverUrl = "http://homeserver:8008"; +{ pkgs, ... }: +let + homeserverUrl = "http://homeserver:8008"; - private_key = pkgs.runCommand "matrix_key.pem" { - buildInputs = [ pkgs.dendrite ]; - } "generate-keys --private-key $out"; - in - { - name = "dendrite"; - meta = with pkgs.lib; { - maintainers = teams.matrix.members; - }; + private_key = pkgs.runCommand "matrix_key.pem" { + buildInputs = [ pkgs.dendrite ]; + } "generate-keys --private-key $out"; +in +{ + name = "dendrite"; + meta = with pkgs.lib; { + maintainers = teams.matrix.members; + }; - nodes = { - homeserver = - { pkgs, ... }: - { - services.dendrite = { - enable = true; - loadCredential = [ "test_private_key:${private_key}" ]; - openRegistration = true; - settings = { - global.server_name = "test-dendrite-server.com"; - global.private_key = "$CREDENTIALS_DIRECTORY/test_private_key"; - client_api.registration_disabled = false; - }; + nodes = { + homeserver = + { pkgs, ... }: + { + services.dendrite = { + enable = true; + loadCredential = [ "test_private_key:${private_key}" ]; + openRegistration = true; + settings = { + global.server_name = "test-dendrite-server.com"; + global.private_key = "$CREDENTIALS_DIRECTORY/test_private_key"; + client_api.registration_disabled = false; }; - - networking.firewall.allowedTCPPorts = [ 8008 ]; }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } '' - import asyncio + networking.firewall.allowedTCPPorts = [ 8008 ]; + }; - from nio import AsyncClient + client = + { pkgs, ... }: + { + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } '' + import asyncio + + from nio import AsyncClient - async def main() -> None: - # Connect to dendrite - client = AsyncClient("http://homeserver:8008", "alice") + async def main() -> None: + # Connect to dendrite + client = AsyncClient("http://homeserver:8008", "alice") - # Register as user alice - response = await client.register("alice", "my-secret-password") + # Register as user alice + response = await client.register("alice", "my-secret-password") - # Log in as user alice - response = await client.login("my-secret-password") + # Log in as user alice + response = await client.login("my-secret-password") - # Create a new room - response = await client.room_create(federate=False) - room_id = response.room_id + # Create a new room + response = await client.room_create(federate=False) + room_id = response.room_id - # Join the room - response = await client.join(room_id) + # Join the room + response = await client.join(room_id) - # Send a message to the room - response = await client.room_send( - room_id=room_id, - message_type="m.room.message", - content={ - "msgtype": "m.text", - "body": "Hello world!" - } - ) + # Send a message to the room + response = await client.room_send( + room_id=room_id, + message_type="m.room.message", + content={ + "msgtype": "m.text", + "body": "Hello world!" + } + ) - # Sync responses - response = await client.sync(timeout=30000) + # Sync responses + response = await client.sync(timeout=30000) - # Check the message was received by dendrite - last_message = response.rooms.join[room_id].timeline.events[-1].body - assert last_message == "Hello world!" + # Check the message was received by dendrite + last_message = response.rooms.join[room_id].timeline.events[-1].body + assert last_message == "Hello world!" - # Leave the room - response = await client.room_leave(room_id) + # Leave the room + response = await client.room_leave(room_id) - # Close the client - await client.close() + # Close the client + await client.close() - asyncio.get_event_loop().run_until_complete(main()) - '') - ]; - }; - }; + asyncio.get_event_loop().run_until_complete(main()) + '') + ]; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("start the homeserver"): - homeserver.wait_for_unit("dendrite.service") - homeserver.wait_for_open_port(8008) + with subtest("start the homeserver"): + homeserver.wait_for_unit("dendrite.service") + homeserver.wait_for_open_port(8008) - with subtest("ensure messages can be exchanged"): - client.succeed("do_test") - ''; + with subtest("ensure messages can be exchanged"): + client.succeed("do_test") + ''; - } -) +} diff --git a/nixos/tests/matrix/mautrix-meta-postgres.nix b/nixos/tests/matrix/mautrix-meta-postgres.nix index 99ed56477df3..72f12aa8db6a 100644 --- a/nixos/tests/matrix/mautrix-meta-postgres.nix +++ b/nixos/tests/matrix/mautrix-meta-postgres.nix @@ -1,236 +1,234 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - homeserverDomain = "server"; - homeserverUrl = "http://server:8008"; - userName = "alice"; - botUserName = "instagrambot"; +{ pkgs, ... }: +let + homeserverDomain = "server"; + homeserverUrl = "http://server:8008"; + userName = "alice"; + botUserName = "instagrambot"; - asToken = "this-is-my-totally-randomly-generated-as-token"; - hsToken = "this-is-my-totally-randomly-generated-hs-token"; - in - { - name = "mautrix-meta-postgres"; - meta.maintainers = pkgs.mautrix-meta.meta.maintainers; + asToken = "this-is-my-totally-randomly-generated-as-token"; + hsToken = "this-is-my-totally-randomly-generated-hs-token"; +in +{ + name = "mautrix-meta-postgres"; + meta.maintainers = pkgs.mautrix-meta.meta.maintainers; - nodes = { - server = - { config, pkgs, ... }: - { - services.postgresql = { - enable = true; + nodes = { + server = + { config, pkgs, ... }: + { + services.postgresql = { + enable = true; - ensureUsers = [ - { - name = "mautrix-meta-instagram"; - ensureDBOwnership = true; - } - ]; + ensureUsers = [ + { + name = "mautrix-meta-instagram"; + ensureDBOwnership = true; + } + ]; - ensureDatabases = [ - "mautrix-meta-instagram" - ]; - }; - - systemd.services.mautrix-meta-instagram = { - wants = [ "postgres.service" ]; - after = [ "postgres.service" ]; - }; - - services.matrix-synapse = { - enable = true; - settings = { - database.name = "sqlite3"; - - enable_registration = true; - - # don't use this in production, always use some form of verification - enable_registration_without_verification = true; - - listeners = [ - { - # The default but tls=false - bind_addresses = [ - "0.0.0.0" - ]; - port = 8008; - resources = [ - { - "compress" = true; - "names" = [ "client" ]; - } - { - "compress" = false; - "names" = [ "federation" ]; - } - ]; - tls = false; - type = "http"; - } - ]; - }; - }; - - services.mautrix-meta.instances.instagram = { - enable = true; - - environmentFile = pkgs.writeText ''my-secrets'' '' - AS_TOKEN=${asToken} - HS_TOKEN=${hsToken} - ''; - - settings = { - homeserver = { - address = homeserverUrl; - domain = homeserverDomain; - }; - - appservice = { - port = 8009; - - as_token = "$AS_TOKEN"; - hs_token = "$HS_TOKEN"; - - database = { - type = "postgres"; - uri = "postgres:///mautrix-meta-instagram?host=/var/run/postgresql"; - }; - - bot.username = botUserName; - }; - - bridge.permissions."@${userName}:server" = "user"; - }; - }; - - networking.firewall.allowedTCPPorts = [ - 8008 - 8009 + ensureDatabases = [ + "mautrix-meta-instagram" ]; }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "do_test" + systemd.services.mautrix-meta-instagram = { + wants = [ "postgres.service" ]; + after = [ "postgres.service" ]; + }; + + services.matrix-synapse = { + enable = true; + settings = { + database.name = "sqlite3"; + + enable_registration = true; + + # don't use this in production, always use some form of verification + enable_registration_without_verification = true; + + listeners = [ { - libraries = [ pkgs.python3Packages.matrix-nio ]; - flakeIgnore = [ - # We don't live in the dark ages anymore. - # Languages like Python that are whitespace heavy will overrun - # 79 characters.. - "E501" + # The default but tls=false + bind_addresses = [ + "0.0.0.0" ]; + port = 8008; + resources = [ + { + "compress" = true; + "names" = [ "client" ]; + } + { + "compress" = false; + "names" = [ "federation" ]; + } + ]; + tls = false; + type = "http"; } - '' - import sys - import functools - import asyncio - - from nio import AsyncClient, RoomMessageNotice, RoomCreateResponse, RoomInviteResponse - - - async def message_callback(matrix: AsyncClient, msg: str, _r, e): - print("Received matrix text message: ", e) - assert msg in e.body - exit(0) # Success! - - - async def run(homeserver: str): - matrix = AsyncClient(homeserver) - response = await matrix.register("${userName}", "foobar") - print("Matrix register response: ", response) - - # Open a DM with the bridge bot - response = await matrix.room_create() - print("Matrix create room response:", response) - assert isinstance(response, RoomCreateResponse) - room_id = response.room_id - - response = await matrix.room_invite(room_id, "@${botUserName}:${homeserverDomain}") - assert isinstance(response, RoomInviteResponse) - - callback = functools.partial( - message_callback, matrix, "Hello, I'm an Instagram bridge bot." - ) - matrix.add_event_callback(callback, RoomMessageNotice) - - print("Waiting for matrix message...") - await matrix.sync_forever(timeout=30000) - - - if __name__ == "__main__": - asyncio.run(run(sys.argv[1])) - '' - ) - ]; + ]; + }; }; - }; - testScript = '' - def extract_token(data): - stdout = data[1] - stdout = stdout.strip() - line = stdout.split('\n')[-1] - return line.split(':')[-1].strip("\" '\n") + services.mautrix-meta.instances.instagram = { + enable = true; - def get_token_from(token, file): - data = server.execute(f"cat {file} | grep {token}") - return extract_token(data) + environmentFile = pkgs.writeText ''my-secrets'' '' + AS_TOKEN=${asToken} + HS_TOKEN=${hsToken} + ''; - def get_as_token_from(file): - return get_token_from("as_token", file) + settings = { + homeserver = { + address = homeserverUrl; + domain = homeserverDomain; + }; - def get_hs_token_from(file): - return get_token_from("hs_token", file) + appservice = { + port = 8009; - config_yaml = "/var/lib/mautrix-meta-instagram/config.yaml" - registration_yaml = "/var/lib/mautrix-meta-instagram/meta-registration.yaml" + as_token = "$AS_TOKEN"; + hs_token = "$HS_TOKEN"; - expected_as_token = "${asToken}" - expected_hs_token = "${hsToken}" + database = { + type = "postgres"; + uri = "postgres:///mautrix-meta-instagram?host=/var/run/postgresql"; + }; - start_all() + bot.username = botUserName; + }; - with subtest("start the server"): - # bridge - server.wait_for_unit("mautrix-meta-instagram.service") + bridge.permissions."@${userName}:server" = "user"; + }; + }; - # homeserver - server.wait_for_unit("matrix-synapse.service") + networking.firewall.allowedTCPPorts = [ + 8008 + 8009 + ]; + }; - server.wait_for_open_port(8008) - # Bridge only opens the port after it contacts the homeserver - server.wait_for_open_port(8009) + client = + { pkgs, ... }: + { + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "do_test" + { + libraries = [ pkgs.python3Packages.matrix-nio ]; + flakeIgnore = [ + # We don't live in the dark ages anymore. + # Languages like Python that are whitespace heavy will overrun + # 79 characters.. + "E501" + ]; + } + '' + import sys + import functools + import asyncio - with subtest("ensure messages can be exchanged"): - client.succeed("do_test ${homeserverUrl} >&2") + from nio import AsyncClient, RoomMessageNotice, RoomCreateResponse, RoomInviteResponse - with subtest("ensure as_token, hs_token match from environment file"): - as_token = get_as_token_from(config_yaml) - hs_token = get_hs_token_from(config_yaml) - as_token_registration = get_as_token_from(registration_yaml) - hs_token_registration = get_hs_token_from(registration_yaml) - assert as_token == expected_as_token, f"as_token in config should match the one specified (is: {as_token}, expected: {expected_as_token})" - assert hs_token == expected_hs_token, f"hs_token in config should match the one specified (is: {hs_token}, expected: {expected_hs_token})" - assert as_token_registration == expected_as_token, f"as_token in registration should match the one specified (is: {as_token_registration}, expected: {expected_as_token})" - assert hs_token_registration == expected_hs_token, f"hs_token in registration should match the one specified (is: {hs_token_registration}, expected: {expected_hs_token})" + async def message_callback(matrix: AsyncClient, msg: str, _r, e): + print("Received matrix text message: ", e) + assert msg in e.body + exit(0) # Success! - with subtest("ensure as_token and hs_token stays same after restart"): - server.systemctl("restart mautrix-meta-instagram") - server.wait_for_open_port(8009) - as_token = get_as_token_from(config_yaml) - hs_token = get_hs_token_from(config_yaml) - as_token_registration = get_as_token_from(registration_yaml) - hs_token_registration = get_hs_token_from(registration_yaml) + async def run(homeserver: str): + matrix = AsyncClient(homeserver) + response = await matrix.register("${userName}", "foobar") + print("Matrix register response: ", response) - assert as_token == expected_as_token, f"as_token in config should match the one specified (is: {as_token}, expected: {expected_as_token})" - assert hs_token == expected_hs_token, f"hs_token in config should match the one specified (is: {hs_token}, expected: {expected_hs_token})" - assert as_token_registration == expected_as_token, f"as_token in registration should match the one specified (is: {as_token_registration}, expected: {expected_as_token})" - assert hs_token_registration == expected_hs_token, f"hs_token in registration should match the one specified (is: {hs_token_registration}, expected: {expected_hs_token})" - ''; - } -) + # Open a DM with the bridge bot + response = await matrix.room_create() + print("Matrix create room response:", response) + assert isinstance(response, RoomCreateResponse) + room_id = response.room_id + + response = await matrix.room_invite(room_id, "@${botUserName}:${homeserverDomain}") + assert isinstance(response, RoomInviteResponse) + + callback = functools.partial( + message_callback, matrix, "Hello, I'm an Instagram bridge bot." + ) + matrix.add_event_callback(callback, RoomMessageNotice) + + print("Waiting for matrix message...") + await matrix.sync_forever(timeout=30000) + + + if __name__ == "__main__": + asyncio.run(run(sys.argv[1])) + '' + ) + ]; + }; + }; + + testScript = '' + def extract_token(data): + stdout = data[1] + stdout = stdout.strip() + line = stdout.split('\n')[-1] + return line.split(':')[-1].strip("\" '\n") + + def get_token_from(token, file): + data = server.execute(f"cat {file} | grep {token}") + return extract_token(data) + + def get_as_token_from(file): + return get_token_from("as_token", file) + + def get_hs_token_from(file): + return get_token_from("hs_token", file) + + config_yaml = "/var/lib/mautrix-meta-instagram/config.yaml" + registration_yaml = "/var/lib/mautrix-meta-instagram/meta-registration.yaml" + + expected_as_token = "${asToken}" + expected_hs_token = "${hsToken}" + + start_all() + + with subtest("start the server"): + # bridge + server.wait_for_unit("mautrix-meta-instagram.service") + + # homeserver + server.wait_for_unit("matrix-synapse.service") + + server.wait_for_open_port(8008) + # Bridge only opens the port after it contacts the homeserver + server.wait_for_open_port(8009) + + with subtest("ensure messages can be exchanged"): + client.succeed("do_test ${homeserverUrl} >&2") + + with subtest("ensure as_token, hs_token match from environment file"): + as_token = get_as_token_from(config_yaml) + hs_token = get_hs_token_from(config_yaml) + as_token_registration = get_as_token_from(registration_yaml) + hs_token_registration = get_hs_token_from(registration_yaml) + + assert as_token == expected_as_token, f"as_token in config should match the one specified (is: {as_token}, expected: {expected_as_token})" + assert hs_token == expected_hs_token, f"hs_token in config should match the one specified (is: {hs_token}, expected: {expected_hs_token})" + assert as_token_registration == expected_as_token, f"as_token in registration should match the one specified (is: {as_token_registration}, expected: {expected_as_token})" + assert hs_token_registration == expected_hs_token, f"hs_token in registration should match the one specified (is: {hs_token_registration}, expected: {expected_hs_token})" + + with subtest("ensure as_token and hs_token stays same after restart"): + server.systemctl("restart mautrix-meta-instagram") + server.wait_for_open_port(8009) + + as_token = get_as_token_from(config_yaml) + hs_token = get_hs_token_from(config_yaml) + as_token_registration = get_as_token_from(registration_yaml) + hs_token_registration = get_hs_token_from(registration_yaml) + + assert as_token == expected_as_token, f"as_token in config should match the one specified (is: {as_token}, expected: {expected_as_token})" + assert hs_token == expected_hs_token, f"hs_token in config should match the one specified (is: {hs_token}, expected: {expected_hs_token})" + assert as_token_registration == expected_as_token, f"as_token in registration should match the one specified (is: {as_token_registration}, expected: {expected_as_token})" + assert hs_token_registration == expected_hs_token, f"hs_token in registration should match the one specified (is: {hs_token_registration}, expected: {expected_hs_token})" + ''; +} diff --git a/nixos/tests/matrix/mautrix-meta-sqlite.nix b/nixos/tests/matrix/mautrix-meta-sqlite.nix index ddd38e8f0b00..d4692dec718e 100644 --- a/nixos/tests/matrix/mautrix-meta-sqlite.nix +++ b/nixos/tests/matrix/mautrix-meta-sqlite.nix @@ -1,260 +1,258 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - homeserverDomain = "server"; - homeserverUrl = "http://server:8008"; - username = "alice"; - instagramBotUsername = "instagrambot"; - facebookBotUsername = "facebookbot"; - in - { - name = "mautrix-meta-sqlite"; - meta.maintainers = pkgs.mautrix-meta.meta.maintainers; +{ pkgs, ... }: +let + homeserverDomain = "server"; + homeserverUrl = "http://server:8008"; + username = "alice"; + instagramBotUsername = "instagrambot"; + facebookBotUsername = "facebookbot"; +in +{ + name = "mautrix-meta-sqlite"; + meta.maintainers = pkgs.mautrix-meta.meta.maintainers; - nodes = { - server = - { config, pkgs, ... }: - { - services.matrix-synapse = { - enable = true; - settings = { - database.name = "sqlite3"; + nodes = { + server = + { config, pkgs, ... }: + { + services.matrix-synapse = { + enable = true; + settings = { + database.name = "sqlite3"; - enable_registration = true; + enable_registration = true; - # don't use this in production, always use some form of verification - enable_registration_without_verification = true; + # don't use this in production, always use some form of verification + enable_registration_without_verification = true; - listeners = [ - { - # The default but tls=false - bind_addresses = [ - "0.0.0.0" - ]; - port = 8008; - resources = [ - { - "compress" = true; - "names" = [ "client" ]; - } - { - "compress" = false; - "names" = [ "federation" ]; - } - ]; - tls = false; - type = "http"; - } + listeners = [ + { + # The default but tls=false + bind_addresses = [ + "0.0.0.0" + ]; + port = 8008; + resources = [ + { + "compress" = true; + "names" = [ "client" ]; + } + { + "compress" = false; + "names" = [ "federation" ]; + } + ]; + tls = false; + type = "http"; + } + ]; + }; + }; + + services.mautrix-meta.instances.facebook = { + enable = true; + + settings = { + homeserver = { + address = homeserverUrl; + domain = homeserverDomain; + }; + + appservice = { + port = 8009; + + bot.username = facebookBotUsername; + }; + + bridge.permissions."@${username}:server" = "user"; + }; + }; + + services.mautrix-meta.instances.instagram = { + enable = true; + + settings = { + homeserver = { + address = homeserverUrl; + domain = homeserverDomain; + }; + + appservice = { + port = 8010; + + bot.username = instagramBotUsername; + }; + + bridge.permissions."@${username}:server" = "user"; + }; + }; + + networking.firewall.allowedTCPPorts = [ 8008 ]; + }; + + client = + { pkgs, ... }: + { + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "register_user" + { + libraries = [ pkgs.python3Packages.matrix-nio ]; + flakeIgnore = [ + # We don't live in the dark ages anymore. + # Languages like Python that are whitespace heavy will overrun + # 79 characters.. + "E501" ]; - }; - }; + } + '' + import sys + import asyncio - services.mautrix-meta.instances.facebook = { - enable = true; - - settings = { - homeserver = { - address = homeserverUrl; - domain = homeserverDomain; - }; - - appservice = { - port = 8009; - - bot.username = facebookBotUsername; - }; - - bridge.permissions."@${username}:server" = "user"; - }; - }; - - services.mautrix-meta.instances.instagram = { - enable = true; - - settings = { - homeserver = { - address = homeserverUrl; - domain = homeserverDomain; - }; - - appservice = { - port = 8010; - - bot.username = instagramBotUsername; - }; - - bridge.permissions."@${username}:server" = "user"; - }; - }; - - networking.firewall.allowedTCPPorts = [ 8008 ]; - }; - - client = - { pkgs, ... }: - { - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "register_user" - { - libraries = [ pkgs.python3Packages.matrix-nio ]; - flakeIgnore = [ - # We don't live in the dark ages anymore. - # Languages like Python that are whitespace heavy will overrun - # 79 characters.. - "E501" - ]; - } - '' - import sys - import asyncio - - from nio import AsyncClient + from nio import AsyncClient - async def run(username: str, homeserver: str): - matrix = AsyncClient(homeserver) + async def run(username: str, homeserver: str): + matrix = AsyncClient(homeserver) - response = await matrix.register(username, "foobar") - print("Matrix register response: ", response) + response = await matrix.register(username, "foobar") + print("Matrix register response: ", response) - if __name__ == "__main__": - asyncio.run(run(sys.argv[1], sys.argv[2])) - '' - ) - (pkgs.writers.writePython3Bin "do_test" - { - libraries = [ pkgs.python3Packages.matrix-nio ]; - flakeIgnore = [ - # We don't live in the dark ages anymore. - # Languages like Python that are whitespace heavy will overrun - # 79 characters.. - "E501" - ]; - } - '' - import sys - import functools - import asyncio + if __name__ == "__main__": + asyncio.run(run(sys.argv[1], sys.argv[2])) + '' + ) + (pkgs.writers.writePython3Bin "do_test" + { + libraries = [ pkgs.python3Packages.matrix-nio ]; + flakeIgnore = [ + # We don't live in the dark ages anymore. + # Languages like Python that are whitespace heavy will overrun + # 79 characters.. + "E501" + ]; + } + '' + import sys + import functools + import asyncio - from nio import AsyncClient, RoomMessageNotice, RoomCreateResponse, RoomInviteResponse + from nio import AsyncClient, RoomMessageNotice, RoomCreateResponse, RoomInviteResponse - async def message_callback(matrix: AsyncClient, msg: str, _r, e): - print("Received matrix text message: ", e) - assert msg in e.body - exit(0) # Success! + async def message_callback(matrix: AsyncClient, msg: str, _r, e): + print("Received matrix text message: ", e) + assert msg in e.body + exit(0) # Success! - async def run(username: str, bot_username: str, homeserver: str): - matrix = AsyncClient(homeserver, f"@{username}:${homeserverDomain}") + async def run(username: str, bot_username: str, homeserver: str): + matrix = AsyncClient(homeserver, f"@{username}:${homeserverDomain}") - response = await matrix.login("foobar") - print("Matrix login response: ", response) + response = await matrix.login("foobar") + print("Matrix login response: ", response) - # Open a DM with the bridge bot - response = await matrix.room_create() - print("Matrix create room response:", response) - assert isinstance(response, RoomCreateResponse) - room_id = response.room_id + # Open a DM with the bridge bot + response = await matrix.room_create() + print("Matrix create room response:", response) + assert isinstance(response, RoomCreateResponse) + room_id = response.room_id - response = await matrix.room_invite(room_id, f"@{bot_username}:${homeserverDomain}") - assert isinstance(response, RoomInviteResponse) + response = await matrix.room_invite(room_id, f"@{bot_username}:${homeserverDomain}") + assert isinstance(response, RoomInviteResponse) - callback = functools.partial( - message_callback, matrix, "Hello, I'm an Instagram bridge bot." - ) - matrix.add_event_callback(callback, RoomMessageNotice) + callback = functools.partial( + message_callback, matrix, "Hello, I'm an Instagram bridge bot." + ) + matrix.add_event_callback(callback, RoomMessageNotice) - print("Waiting for matrix message...") - await matrix.sync_forever(timeout=30000) + print("Waiting for matrix message...") + await matrix.sync_forever(timeout=30000) - if __name__ == "__main__": - asyncio.run(run(sys.argv[1], sys.argv[2], sys.argv[3])) - '' - ) - ]; - }; - }; + if __name__ == "__main__": + asyncio.run(run(sys.argv[1], sys.argv[2], sys.argv[3])) + '' + ) + ]; + }; + }; - testScript = '' - def extract_token(data): - stdout = data[1] - stdout = stdout.strip() - line = stdout.split('\n')[-1] - return line.split(':')[-1].strip("\" '\n") + testScript = '' + def extract_token(data): + stdout = data[1] + stdout = stdout.strip() + line = stdout.split('\n')[-1] + return line.split(':')[-1].strip("\" '\n") - def get_token_from(token, file): - data = server.execute(f"cat {file} | grep {token}") - return extract_token(data) + def get_token_from(token, file): + data = server.execute(f"cat {file} | grep {token}") + return extract_token(data) - def get_as_token_from(file): - return get_token_from("as_token", file) + def get_as_token_from(file): + return get_token_from("as_token", file) - def get_hs_token_from(file): - return get_token_from("hs_token", file) + def get_hs_token_from(file): + return get_token_from("hs_token", file) - config_yaml = "/var/lib/mautrix-meta-facebook/config.yaml" - registration_yaml = "/var/lib/mautrix-meta-facebook/meta-registration.yaml" + config_yaml = "/var/lib/mautrix-meta-facebook/config.yaml" + registration_yaml = "/var/lib/mautrix-meta-facebook/meta-registration.yaml" - start_all() + start_all() - with subtest("wait for bridges and homeserver"): - # bridge - server.wait_for_unit("mautrix-meta-facebook.service") - server.wait_for_unit("mautrix-meta-instagram.service") + with subtest("wait for bridges and homeserver"): + # bridge + server.wait_for_unit("mautrix-meta-facebook.service") + server.wait_for_unit("mautrix-meta-instagram.service") - # homeserver - server.wait_for_unit("matrix-synapse.service") + # homeserver + server.wait_for_unit("matrix-synapse.service") - server.wait_for_open_port(8008) - # Bridges only open the port after they contact the homeserver - server.wait_for_open_port(8009) - server.wait_for_open_port(8010) + server.wait_for_open_port(8008) + # Bridges only open the port after they contact the homeserver + server.wait_for_open_port(8009) + server.wait_for_open_port(8010) - with subtest("register user"): - client.succeed("register_user ${username} ${homeserverUrl} >&2") + with subtest("register user"): + client.succeed("register_user ${username} ${homeserverUrl} >&2") - with subtest("ensure messages can be exchanged"): - client.succeed("do_test ${username} ${facebookBotUsername} ${homeserverUrl} >&2") - client.succeed("do_test ${username} ${instagramBotUsername} ${homeserverUrl} >&2") + with subtest("ensure messages can be exchanged"): + client.succeed("do_test ${username} ${facebookBotUsername} ${homeserverUrl} >&2") + client.succeed("do_test ${username} ${instagramBotUsername} ${homeserverUrl} >&2") - with subtest("ensure as_token and hs_token stays same after restart"): - generated_as_token_facebook = get_as_token_from(config_yaml) - generated_hs_token_facebook = get_hs_token_from(config_yaml) + with subtest("ensure as_token and hs_token stays same after restart"): + generated_as_token_facebook = get_as_token_from(config_yaml) + generated_hs_token_facebook = get_hs_token_from(config_yaml) - generated_as_token_facebook_registration = get_as_token_from(registration_yaml) - generated_hs_token_facebook_registration = get_hs_token_from(registration_yaml) + generated_as_token_facebook_registration = get_as_token_from(registration_yaml) + generated_hs_token_facebook_registration = get_hs_token_from(registration_yaml) - # Indirectly checks the as token is not set to something like empty string or "null" - assert len(generated_as_token_facebook) > 20, f"as_token ({generated_as_token_facebook}) is too short, something went wrong" - assert len(generated_hs_token_facebook) > 20, f"hs_token ({generated_hs_token_facebook}) is too short, something went wrong" + # Indirectly checks the as token is not set to something like empty string or "null" + assert len(generated_as_token_facebook) > 20, f"as_token ({generated_as_token_facebook}) is too short, something went wrong" + assert len(generated_hs_token_facebook) > 20, f"hs_token ({generated_hs_token_facebook}) is too short, something went wrong" - assert generated_as_token_facebook == generated_as_token_facebook_registration, f"as_token should be the same in registration ({generated_as_token_facebook_registration}) and configuration ({generated_as_token_facebook}) files" - assert generated_hs_token_facebook == generated_hs_token_facebook_registration, f"hs_token should be the same in registration ({generated_hs_token_facebook_registration}) and configuration ({generated_hs_token_facebook}) files" + assert generated_as_token_facebook == generated_as_token_facebook_registration, f"as_token should be the same in registration ({generated_as_token_facebook_registration}) and configuration ({generated_as_token_facebook}) files" + assert generated_hs_token_facebook == generated_hs_token_facebook_registration, f"hs_token should be the same in registration ({generated_hs_token_facebook_registration}) and configuration ({generated_hs_token_facebook}) files" - server.systemctl("restart mautrix-meta-facebook") - server.systemctl("restart mautrix-meta-instagram") + server.systemctl("restart mautrix-meta-facebook") + server.systemctl("restart mautrix-meta-instagram") - server.wait_for_open_port(8009) - server.wait_for_open_port(8010) + server.wait_for_open_port(8009) + server.wait_for_open_port(8010) - new_as_token_facebook = get_as_token_from(config_yaml) - new_hs_token_facebook = get_hs_token_from(config_yaml) + new_as_token_facebook = get_as_token_from(config_yaml) + new_hs_token_facebook = get_hs_token_from(config_yaml) - assert generated_as_token_facebook == new_as_token_facebook, f"as_token should stay the same after restart inside the configuration file (is: {new_as_token_facebook}, was: {generated_as_token_facebook})" - assert generated_hs_token_facebook == new_hs_token_facebook, f"hs_token should stay the same after restart inside the configuration file (is: {new_hs_token_facebook}, was: {generated_hs_token_facebook})" + assert generated_as_token_facebook == new_as_token_facebook, f"as_token should stay the same after restart inside the configuration file (is: {new_as_token_facebook}, was: {generated_as_token_facebook})" + assert generated_hs_token_facebook == new_hs_token_facebook, f"hs_token should stay the same after restart inside the configuration file (is: {new_hs_token_facebook}, was: {generated_hs_token_facebook})" - new_as_token_facebook = get_as_token_from(registration_yaml) - new_hs_token_facebook = get_hs_token_from(registration_yaml) + new_as_token_facebook = get_as_token_from(registration_yaml) + new_hs_token_facebook = get_hs_token_from(registration_yaml) - assert generated_as_token_facebook == new_as_token_facebook, f"as_token should stay the same after restart inside the registration file (is: {new_as_token_facebook}, was: {generated_as_token_facebook})" - assert generated_hs_token_facebook == new_hs_token_facebook, f"hs_token should stay the same after restart inside the registration file (is: {new_hs_token_facebook}, was: {generated_hs_token_facebook})" + assert generated_as_token_facebook == new_as_token_facebook, f"as_token should stay the same after restart inside the registration file (is: {new_as_token_facebook}, was: {generated_as_token_facebook})" + assert generated_hs_token_facebook == new_hs_token_facebook, f"hs_token should stay the same after restart inside the registration file (is: {new_hs_token_facebook}, was: {generated_hs_token_facebook})" - with subtest("ensure messages can be exchanged after restart"): - client.succeed("do_test ${username} ${instagramBotUsername} ${homeserverUrl} >&2") - client.succeed("do_test ${username} ${facebookBotUsername} ${homeserverUrl} >&2") - ''; - } -) + with subtest("ensure messages can be exchanged after restart"): + client.succeed("do_test ${username} ${instagramBotUsername} ${homeserverUrl} >&2") + client.succeed("do_test ${username} ${facebookBotUsername} ${homeserverUrl} >&2") + ''; +} diff --git a/nixos/tests/matrix/mjolnir.nix b/nixos/tests/matrix/mjolnir.nix index a4a4426758d3..bc74902c7f20 100644 --- a/nixos/tests/matrix/mjolnir.nix +++ b/nixos/tests/matrix/mjolnir.nix @@ -1,187 +1,185 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - # Set up SSL certs for Synapse to be happy. - runWithOpenSSL = - file: cmd: - pkgs.runCommand file { - buildInputs = [ pkgs.openssl ]; - } cmd; +{ pkgs, ... }: +let + # Set up SSL certs for Synapse to be happy. + runWithOpenSSL = + file: cmd: + pkgs.runCommand file { + buildInputs = [ pkgs.openssl ]; + } cmd; - ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048"; - ca_pem = runWithOpenSSL "ca.pem" '' - openssl req \ - -x509 -new -nodes -key ${ca_key} \ - -days 10000 -out $out -subj "/CN=snakeoil-ca" - ''; - key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048"; - csr = runWithOpenSSL "matrix.csr" '' - openssl req \ - -new -key ${key} \ - -out $out -subj "/CN=localhost" \ - ''; - cert = runWithOpenSSL "matrix_cert.pem" '' - openssl x509 \ - -req -in ${csr} \ - -CA ${ca_pem} -CAkey ${ca_key} \ - -CAcreateserial -out $out \ - -days 365 - ''; - in - { - name = "mjolnir"; - meta = { - inherit (pkgs.mjolnir.meta) maintainers; - }; + ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048"; + ca_pem = runWithOpenSSL "ca.pem" '' + openssl req \ + -x509 -new -nodes -key ${ca_key} \ + -days 10000 -out $out -subj "/CN=snakeoil-ca" + ''; + key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048"; + csr = runWithOpenSSL "matrix.csr" '' + openssl req \ + -new -key ${key} \ + -out $out -subj "/CN=localhost" \ + ''; + cert = runWithOpenSSL "matrix_cert.pem" '' + openssl x509 \ + -req -in ${csr} \ + -CA ${ca_pem} -CAkey ${ca_key} \ + -CAcreateserial -out $out \ + -days 365 + ''; +in +{ + name = "mjolnir"; + meta = { + inherit (pkgs.mjolnir.meta) maintainers; + }; - nodes = { - homeserver = - { pkgs, ... }: - { - services.matrix-synapse = { - enable = true; - settings = { - database.name = "sqlite3"; - tls_certificate_path = "${cert}"; - tls_private_key_path = "${key}"; - enable_registration = true; - enable_registration_without_verification = true; - registration_shared_secret = "supersecret-registration"; + nodes = { + homeserver = + { pkgs, ... }: + { + services.matrix-synapse = { + enable = true; + settings = { + database.name = "sqlite3"; + tls_certificate_path = "${cert}"; + tls_private_key_path = "${key}"; + enable_registration = true; + enable_registration_without_verification = true; + registration_shared_secret = "supersecret-registration"; - listeners = [ - { - # The default but tls=false - bind_addresses = [ - "0.0.0.0" - ]; - port = 8448; - resources = [ - { - compress = true; - names = [ "client" ]; - } - { - compress = false; - names = [ "federation" ]; - } - ]; - tls = false; - type = "http"; - x_forwarded = false; - } - ]; - }; - }; - - networking.firewall.allowedTCPPorts = [ 8448 ]; - - environment.systemPackages = [ - (pkgs.writeShellScriptBin "register_mjolnir_user" '' - exec ${pkgs.matrix-synapse}/bin/register_new_matrix_user \ - -u mjolnir \ - -p mjolnir-password \ - --admin \ - --shared-secret supersecret-registration \ - http://localhost:8448 - '') - (pkgs.writeShellScriptBin "register_moderator_user" '' - exec ${pkgs.matrix-synapse}/bin/register_new_matrix_user \ - -u moderator \ - -p moderator-password \ - --no-admin \ - --shared-secret supersecret-registration \ - http://localhost:8448 - '') - ]; - }; - - mjolnir = - { pkgs, ... }: - { - services.mjolnir = { - enable = true; - homeserverUrl = "http://homeserver:8448"; - pantalaimon = { - enable = true; - username = "mjolnir"; - passwordFile = pkgs.writeText "password.txt" "mjolnir-password"; - # otherwise mjolnir tries to connect to ::1, which is not listened by pantalaimon - options.listenAddress = "127.0.0.1"; - }; - managementRoom = "#moderators:homeserver"; - }; - }; - - client = - { pkgs, ... }: - { - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "create_management_room_and_invite_mjolnir" + listeners = [ { - libraries = with pkgs.python3Packages; [ - (matrix-nio.override { withOlm = true; }) + # The default but tls=false + bind_addresses = [ + "0.0.0.0" ]; + port = 8448; + resources = [ + { + compress = true; + names = [ "client" ]; + } + { + compress = false; + names = [ "federation" ]; + } + ]; + tls = false; + type = "http"; + x_forwarded = false; } - '' - import asyncio - - from nio import ( - AsyncClient, - EnableEncryptionBuilder - ) - - - async def main() -> None: - client = AsyncClient("http://homeserver:8448", "moderator") - - await client.login("moderator-password") - - room = await client.room_create( - name="Moderators", - alias="moderators", - initial_state=[EnableEncryptionBuilder().as_dict()], - ) - - await client.join(room.room_id) - await client.room_invite(room.room_id, "@mjolnir:homeserver") - - asyncio.run(main()) - '' - ) - ]; + ]; + }; }; - }; - testScript = '' - with subtest("start homeserver"): - homeserver.start() + networking.firewall.allowedTCPPorts = [ 8448 ]; - homeserver.wait_for_unit("matrix-synapse.service") - homeserver.wait_until_succeeds("curl --fail -L http://localhost:8448/") + environment.systemPackages = [ + (pkgs.writeShellScriptBin "register_mjolnir_user" '' + exec ${pkgs.matrix-synapse}/bin/register_new_matrix_user \ + -u mjolnir \ + -p mjolnir-password \ + --admin \ + --shared-secret supersecret-registration \ + http://localhost:8448 + '') + (pkgs.writeShellScriptBin "register_moderator_user" '' + exec ${pkgs.matrix-synapse}/bin/register_new_matrix_user \ + -u moderator \ + -p moderator-password \ + --no-admin \ + --shared-secret supersecret-registration \ + http://localhost:8448 + '') + ]; + }; - with subtest("register users"): - # register mjolnir user - homeserver.succeed("register_mjolnir_user") - # register moderator user - homeserver.succeed("register_moderator_user") + mjolnir = + { pkgs, ... }: + { + services.mjolnir = { + enable = true; + homeserverUrl = "http://homeserver:8448"; + pantalaimon = { + enable = true; + username = "mjolnir"; + passwordFile = pkgs.writeText "password.txt" "mjolnir-password"; + # otherwise mjolnir tries to connect to ::1, which is not listened by pantalaimon + options.listenAddress = "127.0.0.1"; + }; + managementRoom = "#moderators:homeserver"; + }; + }; - with subtest("start mjolnir"): - mjolnir.start() + client = + { pkgs, ... }: + { + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "create_management_room_and_invite_mjolnir" + { + libraries = with pkgs.python3Packages; [ + (matrix-nio.override { withOlm = true; }) + ]; + } + '' + import asyncio - # wait for pantalaimon to be ready - mjolnir.wait_for_unit("pantalaimon-mjolnir.service") - mjolnir.wait_for_unit("mjolnir.service") + from nio import ( + AsyncClient, + EnableEncryptionBuilder + ) - mjolnir.wait_until_succeeds("curl --fail -L http://localhost:8009/") - with subtest("ensure mjolnir can be invited to the management room"): - client.start() + async def main() -> None: + client = AsyncClient("http://homeserver:8448", "moderator") - client.wait_until_succeeds("curl --fail -L http://homeserver:8448/") + await client.login("moderator-password") - client.succeed("create_management_room_and_invite_mjolnir") + room = await client.room_create( + name="Moderators", + alias="moderators", + initial_state=[EnableEncryptionBuilder().as_dict()], + ) - mjolnir.wait_for_console_text("Startup complete. Now monitoring rooms") - ''; - } -) + await client.join(room.room_id) + await client.room_invite(room.room_id, "@mjolnir:homeserver") + + asyncio.run(main()) + '' + ) + ]; + }; + }; + + testScript = '' + with subtest("start homeserver"): + homeserver.start() + + homeserver.wait_for_unit("matrix-synapse.service") + homeserver.wait_until_succeeds("curl --fail -L http://localhost:8448/") + + with subtest("register users"): + # register mjolnir user + homeserver.succeed("register_mjolnir_user") + # register moderator user + homeserver.succeed("register_moderator_user") + + with subtest("start mjolnir"): + mjolnir.start() + + # wait for pantalaimon to be ready + mjolnir.wait_for_unit("pantalaimon-mjolnir.service") + mjolnir.wait_for_unit("mjolnir.service") + + mjolnir.wait_until_succeeds("curl --fail -L http://localhost:8009/") + + with subtest("ensure mjolnir can be invited to the management room"): + client.start() + + client.wait_until_succeeds("curl --fail -L http://homeserver:8448/") + + client.succeed("create_management_room_and_invite_mjolnir") + + mjolnir.wait_for_console_text("Startup complete. Now monitoring rooms") + ''; +} diff --git a/nixos/tests/matrix/pantalaimon.nix b/nixos/tests/matrix/pantalaimon.nix index 6ff4cf8cda06..eb2123ee3ca6 100644 --- a/nixos/tests/matrix/pantalaimon.nix +++ b/nixos/tests/matrix/pantalaimon.nix @@ -1,95 +1,93 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - pantalaimonInstanceName = "testing"; +{ pkgs, ... }: +let + pantalaimonInstanceName = "testing"; - # Set up SSL certs for Synapse to be happy. - runWithOpenSSL = - file: cmd: - pkgs.runCommand file { - buildInputs = [ pkgs.openssl ]; - } cmd; + # Set up SSL certs for Synapse to be happy. + runWithOpenSSL = + file: cmd: + pkgs.runCommand file { + buildInputs = [ pkgs.openssl ]; + } cmd; - ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048"; - ca_pem = runWithOpenSSL "ca.pem" '' - openssl req \ - -x509 -new -nodes -key ${ca_key} \ - -days 10000 -out $out -subj "/CN=snakeoil-ca" - ''; - key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048"; - csr = runWithOpenSSL "matrix.csr" '' - openssl req \ - -new -key ${key} \ - -out $out -subj "/CN=localhost" \ - ''; - cert = runWithOpenSSL "matrix_cert.pem" '' - openssl x509 \ - -req -in ${csr} \ - -CA ${ca_pem} -CAkey ${ca_key} \ - -CAcreateserial -out $out \ - -days 365 - ''; - in - { - name = "pantalaimon"; - meta = with pkgs.lib; { - maintainers = teams.matrix.members; - }; + ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048"; + ca_pem = runWithOpenSSL "ca.pem" '' + openssl req \ + -x509 -new -nodes -key ${ca_key} \ + -days 10000 -out $out -subj "/CN=snakeoil-ca" + ''; + key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048"; + csr = runWithOpenSSL "matrix.csr" '' + openssl req \ + -new -key ${key} \ + -out $out -subj "/CN=localhost" \ + ''; + cert = runWithOpenSSL "matrix_cert.pem" '' + openssl x509 \ + -req -in ${csr} \ + -CA ${ca_pem} -CAkey ${ca_key} \ + -CAcreateserial -out $out \ + -days 365 + ''; +in +{ + name = "pantalaimon"; + meta = with pkgs.lib; { + maintainers = teams.matrix.members; + }; - nodes.machine = - { pkgs, ... }: - { - services.pantalaimon-headless.instances.${pantalaimonInstanceName} = { - homeserver = "https://localhost:8448"; - listenAddress = "0.0.0.0"; - listenPort = 8888; - logLevel = "debug"; - ssl = false; - }; - - services.matrix-synapse = { - enable = true; - settings = { - listeners = [ - { - port = 8448; - bind_addresses = [ - "127.0.0.1" - "::1" - ]; - type = "http"; - tls = true; - x_forwarded = false; - resources = [ - { - names = [ - "client" - ]; - compress = true; - } - { - names = [ - "federation" - ]; - compress = false; - } - ]; - } - ]; - database.name = "sqlite3"; - tls_certificate_path = "${cert}"; - tls_private_key_path = "${key}"; - }; - }; + nodes.machine = + { pkgs, ... }: + { + services.pantalaimon-headless.instances.${pantalaimonInstanceName} = { + homeserver = "https://localhost:8448"; + listenAddress = "0.0.0.0"; + listenPort = 8888; + logLevel = "debug"; + ssl = false; }; - testScript = '' - start_all() - machine.wait_for_unit("pantalaimon-${pantalaimonInstanceName}.service") - machine.wait_for_unit("matrix-synapse.service") - machine.wait_until_succeeds( - "curl --fail -L http://localhost:8888/" - ) - ''; - } -) + services.matrix-synapse = { + enable = true; + settings = { + listeners = [ + { + port = 8448; + bind_addresses = [ + "127.0.0.1" + "::1" + ]; + type = "http"; + tls = true; + x_forwarded = false; + resources = [ + { + names = [ + "client" + ]; + compress = true; + } + { + names = [ + "federation" + ]; + compress = false; + } + ]; + } + ]; + database.name = "sqlite3"; + tls_certificate_path = "${cert}"; + tls_private_key_path = "${key}"; + }; + }; + }; + + testScript = '' + start_all() + machine.wait_for_unit("pantalaimon-${pantalaimonInstanceName}.service") + machine.wait_for_unit("matrix-synapse.service") + machine.wait_until_succeeds( + "curl --fail -L http://localhost:8888/" + ) + ''; +} diff --git a/nixos/tests/matrix/synapse-workers.nix b/nixos/tests/matrix/synapse-workers.nix index f75a0528769a..949658caac47 100644 --- a/nixos/tests/matrix/synapse-workers.nix +++ b/nixos/tests/matrix/synapse-workers.nix @@ -1,55 +1,53 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - { - name = "matrix-synapse-workers"; - meta = { - inherit (pkgs.matrix-synapse.meta) maintainers; - }; +{ pkgs, ... }: +{ + name = "matrix-synapse-workers"; + meta = { + inherit (pkgs.matrix-synapse.meta) maintainers; + }; - nodes = { - homeserver = - { - pkgs, - nodes, - ... - }: - { - services.postgresql = { - enable = true; - initialScript = pkgs.writeText "synapse-init.sql" '' - CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; - CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" - TEMPLATE template0 - LC_COLLATE = "C" - LC_CTYPE = "C"; - ''; + nodes = { + homeserver = + { + pkgs, + nodes, + ... + }: + { + services.postgresql = { + enable = true; + initialScript = pkgs.writeText "synapse-init.sql" '' + CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; + CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" + TEMPLATE template0 + LC_COLLATE = "C" + LC_CTYPE = "C"; + ''; + }; + + services.matrix-synapse = { + enable = true; + settings = { + database = { + name = "psycopg2"; + args.password = "synapse"; + }; + enable_registration = true; + enable_registration_without_verification = true; + + federation_sender_instances = [ "federation_sender" ]; }; - - services.matrix-synapse = { - enable = true; - settings = { - database = { - name = "psycopg2"; - args.password = "synapse"; - }; - enable_registration = true; - enable_registration_without_verification = true; - - federation_sender_instances = [ "federation_sender" ]; - }; - configureRedisLocally = true; - workers = { - "federation_sender" = { }; - }; + configureRedisLocally = true; + workers = { + "federation_sender" = { }; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - homeserver.wait_for_unit("matrix-synapse.service"); - homeserver.wait_for_unit("matrix-synapse-worker-federation_sender.service"); - ''; - } -) + homeserver.wait_for_unit("matrix-synapse.service"); + homeserver.wait_for_unit("matrix-synapse-worker-federation_sender.service"); + ''; +} diff --git a/nixos/tests/matrix/synapse.nix b/nixos/tests/matrix/synapse.nix index c16182a46cd2..323fa25ccb6c 100644 --- a/nixos/tests/matrix/synapse.nix +++ b/nixos/tests/matrix/synapse.nix @@ -1,248 +1,246 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let +{ pkgs, ... }: +let - ca_key = mailerCerts.ca.key; - ca_pem = mailerCerts.ca.cert; + ca_key = mailerCerts.ca.key; + ca_pem = mailerCerts.ca.cert; - bundle = - pkgs.runCommand "bundle" - { - nativeBuildInputs = [ pkgs.minica ]; - } - '' - minica -ca-cert ${ca_pem} -ca-key ${ca_key} \ - -domains localhost - install -Dm444 -t $out localhost/{key,cert}.pem - ''; - - mailerCerts = import ../common/acme/server/snakeoil-certs.nix; - mailerDomain = mailerCerts.domain; - registrationSharedSecret = "unsecure123"; - testUser = "alice"; - testPassword = "alicealice"; - testEmail = "alice@example.com"; - - listeners = [ + bundle = + pkgs.runCommand "bundle" { - port = 8448; - bind_addresses = [ - "127.0.0.1" - "::1" - ]; - type = "http"; - tls = true; - x_forwarded = false; - resources = [ - { - names = [ - "client" - ]; - compress = true; - } - { - names = [ - "federation" - ]; - compress = false; - } - ]; + nativeBuildInputs = [ pkgs.minica ]; } - ]; + '' + minica -ca-cert ${ca_pem} -ca-key ${ca_key} \ + -domains localhost + install -Dm444 -t $out localhost/{key,cert}.pem + ''; - in - { + mailerCerts = import ../common/acme/server/snakeoil-certs.nix; + mailerDomain = mailerCerts.domain; + registrationSharedSecret = "unsecure123"; + testUser = "alice"; + testPassword = "alicealice"; + testEmail = "alice@example.com"; - name = "matrix-synapse"; - meta = { - inherit (pkgs.matrix-synapse.meta) maintainers; - }; - - nodes = { - # Since 0.33.0, matrix-synapse doesn't allow underscores in server names - serverpostgres = + listeners = [ + { + port = 8448; + bind_addresses = [ + "127.0.0.1" + "::1" + ]; + type = "http"; + tls = true; + x_forwarded = false; + resources = [ { - pkgs, - nodes, - config, - ... - }: - let - mailserverIP = nodes.mailserver.config.networking.primaryIPAddress; - in - { - services.matrix-synapse = { - enable = true; - settings = { - inherit listeners; - database = { - name = "psycopg2"; - args.password = "synapse"; - }; - redis = { - enabled = true; - host = "localhost"; - port = config.services.redis.servers.matrix-synapse.port; - }; - tls_certificate_path = "${bundle}/cert.pem"; - tls_private_key_path = "${bundle}/key.pem"; - registration_shared_secret = registrationSharedSecret; - public_baseurl = "https://example.com"; - email = { - smtp_host = mailerDomain; - smtp_port = 25; - require_transport_security = true; - notif_from = "matrix "; - app_name = "Matrix"; - }; - }; - }; - services.postgresql = { - enable = true; - - # The database name and user are configured by the following options: - # - services.matrix-synapse.database_name - # - services.matrix-synapse.database_user - # - # The values used here represent the default values of the module. - initialScript = pkgs.writeText "synapse-init.sql" '' - CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; - CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" - TEMPLATE template0 - LC_COLLATE = "C" - LC_CTYPE = "C"; - ''; - }; - - services.redis.servers.matrix-synapse = { - enable = true; - port = 6380; - }; - - networking.extraHosts = '' - ${mailserverIP} ${mailerDomain} - ''; - - security.pki.certificateFiles = [ - mailerCerts.ca.cert - ca_pem + names = [ + "client" ]; - - environment.systemPackages = - let - sendTestMailStarttls = pkgs.writeScriptBin "send-testmail-starttls" '' - #!${pkgs.python3.interpreter} - import smtplib - import ssl - - ctx = ssl.create_default_context() - - with smtplib.SMTP('${mailerDomain}') as smtp: - smtp.ehlo() - smtp.starttls(context=ctx) - smtp.ehlo() - smtp.sendmail('matrix@${mailerDomain}', '${testEmail}', 'Subject: Test STARTTLS\n\nTest data.') - smtp.quit() - ''; - - obtainTokenAndRegisterEmail = - let - # adding the email through the API is quite complicated as it involves more than one step and some - # client-side calculation - insertEmailForAlice = pkgs.writeText "alice-email.sql" '' - INSERT INTO user_threepids (user_id, medium, address, validated_at, added_at) VALUES ('${testUser}@serverpostgres', 'email', '${testEmail}', '1629149927271', '1629149927270'); - ''; - in - pkgs.writeScriptBin "obtain-token-and-register-email" '' - #!${pkgs.runtimeShell} - set -o errexit - set -o pipefail - set -o nounset - su postgres -c "psql -d matrix-synapse -f ${insertEmailForAlice}" - curl --fail -XPOST 'https://localhost:8448/_matrix/client/r0/account/password/email/requestToken' -d '{"email":"${testEmail}","client_secret":"foobar","send_attempt":1}' -v - ''; - in - [ - sendTestMailStarttls - pkgs.matrix-synapse - obtainTokenAndRegisterEmail - ]; - }; - - # test mail delivery - mailserver = - args: - let - in + compress = true; + } { - security.pki.certificateFiles = [ - mailerCerts.ca.cert + names = [ + "federation" ]; + compress = false; + } + ]; + } + ]; - networking.firewall.enable = false; +in +{ - services.postfix = { - enable = true; - hostname = "${mailerDomain}"; - # open relay for subnet - networksStyle = "subnet"; - enableSubmission = true; - tlsTrustedAuthorities = "${mailerCerts.ca.cert}"; - sslCert = "${mailerCerts.${mailerDomain}.cert}"; - sslKey = "${mailerCerts.${mailerDomain}.key}"; + name = "matrix-synapse"; + meta = { + inherit (pkgs.matrix-synapse.meta) maintainers; + }; - # blackhole transport - transport = "example.com discard:silently"; - - config = { - debug_peer_level = "10"; - smtpd_relay_restrictions = [ - "permit_mynetworks" - "reject_unauth_destination" - ]; - - # disable obsolete protocols, something old versions of twisted are still using - smtpd_tls_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; - smtp_tls_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; - smtpd_tls_mandatory_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; - smtp_tls_mandatory_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; - }; - }; - }; - - serversqlite = args: { + nodes = { + # Since 0.33.0, matrix-synapse doesn't allow underscores in server names + serverpostgres = + { + pkgs, + nodes, + config, + ... + }: + let + mailserverIP = nodes.mailserver.config.networking.primaryIPAddress; + in + { services.matrix-synapse = { enable = true; settings = { inherit listeners; - database.name = "sqlite3"; + database = { + name = "psycopg2"; + args.password = "synapse"; + }; + redis = { + enabled = true; + host = "localhost"; + port = config.services.redis.servers.matrix-synapse.port; + }; tls_certificate_path = "${bundle}/cert.pem"; tls_private_key_path = "${bundle}/key.pem"; + registration_shared_secret = registrationSharedSecret; + public_baseurl = "https://example.com"; + email = { + smtp_host = mailerDomain; + smtp_port = 25; + require_transport_security = true; + notif_from = "matrix "; + app_name = "Matrix"; + }; + }; + }; + services.postgresql = { + enable = true; + + # The database name and user are configured by the following options: + # - services.matrix-synapse.database_name + # - services.matrix-synapse.database_user + # + # The values used here represent the default values of the module. + initialScript = pkgs.writeText "synapse-init.sql" '' + CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; + CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" + TEMPLATE template0 + LC_COLLATE = "C" + LC_CTYPE = "C"; + ''; + }; + + services.redis.servers.matrix-synapse = { + enable = true; + port = 6380; + }; + + networking.extraHosts = '' + ${mailserverIP} ${mailerDomain} + ''; + + security.pki.certificateFiles = [ + mailerCerts.ca.cert + ca_pem + ]; + + environment.systemPackages = + let + sendTestMailStarttls = pkgs.writeScriptBin "send-testmail-starttls" '' + #!${pkgs.python3.interpreter} + import smtplib + import ssl + + ctx = ssl.create_default_context() + + with smtplib.SMTP('${mailerDomain}') as smtp: + smtp.ehlo() + smtp.starttls(context=ctx) + smtp.ehlo() + smtp.sendmail('matrix@${mailerDomain}', '${testEmail}', 'Subject: Test STARTTLS\n\nTest data.') + smtp.quit() + ''; + + obtainTokenAndRegisterEmail = + let + # adding the email through the API is quite complicated as it involves more than one step and some + # client-side calculation + insertEmailForAlice = pkgs.writeText "alice-email.sql" '' + INSERT INTO user_threepids (user_id, medium, address, validated_at, added_at) VALUES ('${testUser}@serverpostgres', 'email', '${testEmail}', '1629149927271', '1629149927270'); + ''; + in + pkgs.writeScriptBin "obtain-token-and-register-email" '' + #!${pkgs.runtimeShell} + set -o errexit + set -o pipefail + set -o nounset + su postgres -c "psql -d matrix-synapse -f ${insertEmailForAlice}" + curl --fail -XPOST 'https://localhost:8448/_matrix/client/r0/account/password/email/requestToken' -d '{"email":"${testEmail}","client_secret":"foobar","send_attempt":1}' -v + ''; + in + [ + sendTestMailStarttls + pkgs.matrix-synapse + obtainTokenAndRegisterEmail + ]; + }; + + # test mail delivery + mailserver = + args: + let + in + { + security.pki.certificateFiles = [ + mailerCerts.ca.cert + ]; + + networking.firewall.enable = false; + + services.postfix = { + enable = true; + hostname = "${mailerDomain}"; + # open relay for subnet + networksStyle = "subnet"; + enableSubmission = true; + tlsTrustedAuthorities = "${mailerCerts.ca.cert}"; + sslCert = "${mailerCerts.${mailerDomain}.cert}"; + sslKey = "${mailerCerts.${mailerDomain}.key}"; + + # blackhole transport + transport = "example.com discard:silently"; + + config = { + debug_peer_level = "10"; + smtpd_relay_restrictions = [ + "permit_mynetworks" + "reject_unauth_destination" + ]; + + # disable obsolete protocols, something old versions of twisted are still using + smtpd_tls_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; + smtp_tls_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; + smtpd_tls_mandatory_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; + smtp_tls_mandatory_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3"; }; }; }; + + serversqlite = args: { + services.matrix-synapse = { + enable = true; + settings = { + inherit listeners; + database.name = "sqlite3"; + tls_certificate_path = "${bundle}/cert.pem"; + tls_private_key_path = "${bundle}/key.pem"; + }; + }; }; + }; - testScript = '' - start_all() - mailserver.wait_for_unit("postfix.service") - serverpostgres.succeed("send-testmail-starttls") - serverpostgres.wait_for_unit("matrix-synapse.service") - serverpostgres.wait_until_succeeds( - "curl --fail -L --cacert ${ca_pem} https://localhost:8448/" - ) - serverpostgres.wait_until_succeeds( - "journalctl -u matrix-synapse.service | grep -q 'Connected to redis'" - ) - serverpostgres.require_unit_state("postgresql.service") - serverpostgres.succeed("REQUESTS_CA_BUNDLE=${ca_pem} register_new_matrix_user -u ${testUser} -p ${testPassword} -a -k ${registrationSharedSecret} https://localhost:8448/") - serverpostgres.succeed("obtain-token-and-register-email") - serversqlite.wait_for_unit("matrix-synapse.service") - serversqlite.wait_until_succeeds( - "curl --fail -L --cacert ${ca_pem} https://localhost:8448/" - ) - serversqlite.succeed("[ -e /var/lib/matrix-synapse/homeserver.db ]") - ''; + testScript = '' + start_all() + mailserver.wait_for_unit("postfix.service") + serverpostgres.succeed("send-testmail-starttls") + serverpostgres.wait_for_unit("matrix-synapse.service") + serverpostgres.wait_until_succeeds( + "curl --fail -L --cacert ${ca_pem} https://localhost:8448/" + ) + serverpostgres.wait_until_succeeds( + "journalctl -u matrix-synapse.service | grep -q 'Connected to redis'" + ) + serverpostgres.require_unit_state("postgresql.service") + serverpostgres.succeed("REQUESTS_CA_BUNDLE=${ca_pem} register_new_matrix_user -u ${testUser} -p ${testPassword} -a -k ${registrationSharedSecret} https://localhost:8448/") + serverpostgres.succeed("obtain-token-and-register-email") + serversqlite.wait_for_unit("matrix-synapse.service") + serversqlite.wait_until_succeeds( + "curl --fail -L --cacert ${ca_pem} https://localhost:8448/" + ) + serversqlite.succeed("[ -e /var/lib/matrix-synapse/homeserver.db ]") + ''; - } -) +} diff --git a/nixos/tests/matter-server.nix b/nixos/tests/matter-server.nix index e302a8e91427..167cee21e07b 100644 --- a/nixos/tests/matter-server.nix +++ b/nixos/tests/matter-server.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - chipVersion = pkgs.python311Packages.home-assistant-chip-core.version; - in +let + chipVersion = pkgs.python311Packages.home-assistant-chip-core.version; +in - { - name = "matter-server"; - meta.maintainers = with lib.maintainers; [ leonm1 ]; - meta.timeout = 120; # Timeout after two minutes +{ + name = "matter-server"; + meta.maintainers = with lib.maintainers; [ leonm1 ]; + meta.timeout = 120; # Timeout after two minutes - nodes = { - machine = - { config, ... }: - { - services.matter-server = { - enable = true; - port = 1234; - }; + nodes = { + machine = + { config, ... }: + { + services.matter-server = { + enable = true; + port = 1234; }; - }; + }; + }; - testScript = # python - '' - @polling_condition - def matter_server_running(): - machine.succeed("systemctl status matter-server") + testScript = # python + '' + @polling_condition + def matter_server_running(): + machine.succeed("systemctl status matter-server") - start_all() + start_all() - machine.wait_for_unit("matter-server.service", timeout=20) - machine.wait_for_open_port(1234, timeout=20) + machine.wait_for_unit("matter-server.service", timeout=20) + machine.wait_for_open_port(1234, timeout=20) - with matter_server_running: # type: ignore[union-attr] - with subtest("Check websocket server initialized"): - output = machine.succeed("echo \"\" | ${pkgs.websocat}/bin/websocat ws://localhost:1234/ws") - machine.log(output) + with matter_server_running: # type: ignore[union-attr] + with subtest("Check websocket server initialized"): + output = machine.succeed("echo \"\" | ${pkgs.websocat}/bin/websocat ws://localhost:1234/ws") + machine.log(output) - assert '"fabric_id": 1' in output, ( - "fabric_id not propagated to server" - ) + assert '"fabric_id": 1' in output, ( + "fabric_id not propagated to server" + ) - with subtest("Check storage directory is created"): - machine.succeed("ls /var/lib/matter-server/chip.json") + with subtest("Check storage directory is created"): + machine.succeed("ls /var/lib/matter-server/chip.json") - with subtest("Check systemd hardening"): - _, output = machine.execute("systemd-analyze security matter-server.service | grep -v '✓'") - machine.log(output) - ''; - } -) + with subtest("Check systemd hardening"): + _, output = machine.execute("systemd-analyze security matter-server.service | grep -v '✓'") + machine.log(output) + ''; +} diff --git a/nixos/tests/mealie.nix b/nixos/tests/mealie.nix index e296d5180ecd..97dd4d048f48 100644 --- a/nixos/tests/mealie.nix +++ b/nixos/tests/mealie.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "mealie"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - litchipi - anoa - ]; +{ + name = "mealie"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + litchipi + anoa + ]; + }; + + nodes = + let + sqlite = { + services.mealie = { + enable = true; + port = 9001; + }; + }; + postgres = { + imports = [ sqlite ]; + services.mealie.database.createLocally = true; + }; + in + { + inherit sqlite postgres; }; - nodes = - let - sqlite = { - services.mealie = { - enable = true; - port = 9001; - }; - }; - postgres = { - imports = [ sqlite ]; - services.mealie.database.createLocally = true; - }; - in - { - inherit sqlite postgres; - }; + testScript = '' + start_all() - testScript = '' - start_all() + def test_mealie(node): + node.wait_for_unit("mealie.service") + node.wait_for_open_port(9001) + node.succeed("curl --fail http://localhost:9001") - def test_mealie(node): - node.wait_for_unit("mealie.service") - node.wait_for_open_port(9001) - node.succeed("curl --fail http://localhost:9001") - - test_mealie(sqlite) - sqlite.send_monitor_command("quit") - sqlite.wait_for_shutdown() - test_mealie(postgres) - ''; - } -) + test_mealie(sqlite) + sqlite.send_monitor_command("quit") + sqlite.wait_for_shutdown() + test_mealie(postgres) + ''; +} diff --git a/nixos/tests/mediamtx.nix b/nixos/tests/mediamtx.nix index f40c4a8cb583..7f7d40d3e12a 100644 --- a/nixos/tests/mediamtx.nix +++ b/nixos/tests/mediamtx.nix @@ -1,60 +1,58 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "mediamtx"; - meta.maintainers = with lib.maintainers; [ fpletz ]; +{ + name = "mediamtx"; + meta.maintainers = with lib.maintainers; [ fpletz ]; - nodes = { - machine = { - services.mediamtx = { - enable = true; - settings = { - metrics = true; - paths.all.source = "publisher"; - }; + nodes = { + machine = { + services.mediamtx = { + enable = true; + settings = { + metrics = true; + paths.all.source = "publisher"; }; + }; - systemd.services.rtmp-publish = { - description = "Publish an RTMP stream to mediamtx"; - after = [ "mediamtx.service" ]; - bindsTo = [ "mediamtx.service" ]; - wantedBy = [ "multi-user.target" ]; - serviceConfig = { - DynamicUser = true; - Restart = "on-failure"; - RestartSec = "1s"; - TimeoutStartSec = "10s"; - ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -c libx264 -f flv rtmp://localhost:1935/test"; - }; + systemd.services.rtmp-publish = { + description = "Publish an RTMP stream to mediamtx"; + after = [ "mediamtx.service" ]; + bindsTo = [ "mediamtx.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + DynamicUser = true; + Restart = "on-failure"; + RestartSec = "1s"; + TimeoutStartSec = "10s"; + ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -c libx264 -f flv rtmp://localhost:1935/test"; }; + }; - systemd.services.rtmp-receive = { - description = "Receive an RTMP stream from mediamtx"; - after = [ "rtmp-publish.service" ]; - bindsTo = [ "rtmp-publish.service" ]; - wantedBy = [ "multi-user.target" ]; - serviceConfig = { - DynamicUser = true; - Restart = "on-failure"; - RestartSec = "1s"; - TimeoutStartSec = "10s"; - ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -y -re -i rtmp://localhost:1935/test -f flv /dev/null"; - }; + systemd.services.rtmp-receive = { + description = "Receive an RTMP stream from mediamtx"; + after = [ "rtmp-publish.service" ]; + bindsTo = [ "rtmp-publish.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + DynamicUser = true; + Restart = "on-failure"; + RestartSec = "1s"; + TimeoutStartSec = "10s"; + ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -y -re -i rtmp://localhost:1935/test -f flv /dev/null"; }; }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("mediamtx.service") - machine.wait_for_unit("rtmp-publish.service") - machine.sleep(10) - machine.wait_for_unit("rtmp-receive.service") - machine.wait_for_open_port(9998) - machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"publish\".*1$'") - machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"read\".*1$'") - ''; - } -) + machine.wait_for_unit("mediamtx.service") + machine.wait_for_unit("rtmp-publish.service") + machine.sleep(10) + machine.wait_for_unit("rtmp-receive.service") + machine.wait_for_open_port(9998) + machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"publish\".*1$'") + machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"read\".*1$'") + ''; +} diff --git a/nixos/tests/meilisearch.nix b/nixos/tests/meilisearch.nix index a73abbba3a48..ae88b8e07157 100644 --- a/nixos/tests/meilisearch.nix +++ b/nixos/tests/meilisearch.nix @@ -1,69 +1,67 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - listenAddress = "127.0.0.1"; - listenPort = 7700; - apiUrl = "http://${listenAddress}:${toString listenPort}"; - uid = "movies"; - indexJSON = pkgs.writeText "index.json" (builtins.toJSON { inherit uid; }); - moviesJSON = pkgs.fetchurl { - url = "https://github.com/meilisearch/meilisearch/raw/v0.23.1/datasets/movies/movies.json"; - sha256 = "1r3srld63dpmg9yrmysm6xl175661j5cspi93mk5q2wf8xwn50c5"; - }; - in - { - name = "meilisearch"; - meta.maintainers = with lib.maintainers; [ Br1ght0ne ]; +{ pkgs, lib, ... }: +let + listenAddress = "127.0.0.1"; + listenPort = 7700; + apiUrl = "http://${listenAddress}:${toString listenPort}"; + uid = "movies"; + indexJSON = pkgs.writeText "index.json" (builtins.toJSON { inherit uid; }); + moviesJSON = pkgs.fetchurl { + url = "https://github.com/meilisearch/meilisearch/raw/v0.23.1/datasets/movies/movies.json"; + sha256 = "1r3srld63dpmg9yrmysm6xl175661j5cspi93mk5q2wf8xwn50c5"; + }; +in +{ + name = "meilisearch"; + meta.maintainers = with lib.maintainers; [ Br1ght0ne ]; - nodes.machine = - { ... }: - { - environment.systemPackages = with pkgs; [ - curl - jq - ]; - services.meilisearch = { - enable = true; - inherit listenAddress listenPort; - }; + nodes.machine = + { ... }: + { + environment.systemPackages = with pkgs; [ + curl + jq + ]; + services.meilisearch = { + enable = true; + inherit listenAddress listenPort; }; + }; - testScript = '' - import json + testScript = '' + import json - start_all() + start_all() - machine.wait_for_unit("meilisearch") - machine.wait_for_open_port(7700) + machine.wait_for_unit("meilisearch") + machine.wait_for_open_port(7700) - with subtest("check version"): - version = json.loads(machine.succeed("curl ${apiUrl}/version")) - assert version["pkgVersion"] == "${pkgs.meilisearch.version}" + with subtest("check version"): + version = json.loads(machine.succeed("curl ${apiUrl}/version")) + assert version["pkgVersion"] == "${pkgs.meilisearch.version}" - with subtest("create index"): - machine.succeed( - "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes --data @${indexJSON}" - ) - indexes = json.loads(machine.succeed("curl ${apiUrl}/indexes")) - assert indexes["total"] == 1, "index wasn't created" + with subtest("create index"): + machine.succeed( + "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes --data @${indexJSON}" + ) + indexes = json.loads(machine.succeed("curl ${apiUrl}/indexes")) + assert indexes["total"] == 1, "index wasn't created" - with subtest("add documents"): - response = json.loads( - machine.succeed( - "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes/${uid}/documents --data-binary @${moviesJSON}" - ) - ) - task_uid = response["taskUid"] - machine.wait_until_succeeds( - f"curl ${apiUrl}/tasks/{task_uid} | jq -e '.status == \"succeeded\"'" - ) + with subtest("add documents"): + response = json.loads( + machine.succeed( + "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes/${uid}/documents --data-binary @${moviesJSON}" + ) + ) + task_uid = response["taskUid"] + machine.wait_until_succeeds( + f"curl ${apiUrl}/tasks/{task_uid} | jq -e '.status == \"succeeded\"'" + ) - with subtest("search"): - response = json.loads( - machine.succeed("curl ${apiUrl}/indexes/movies/search?q=hero") - ) - print(response) - assert len(response["hits"]) >= 1, "no results found" - ''; - } -) + with subtest("search"): + response = json.loads( + machine.succeed("curl ${apiUrl}/indexes/movies/search?q=hero") + ) + print(response) + assert len(response["hits"]) >= 1, "no results found" + ''; +} diff --git a/nixos/tests/merecat.nix b/nixos/tests/merecat.nix index 072d8dd933de..db790dff41e8 100644 --- a/nixos/tests/merecat.nix +++ b/nixos/tests/merecat.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "merecat"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; - }; +{ pkgs, ... }: +{ + name = "merecat"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; - nodes.machine = - { config, pkgs, ... }: - { - services.merecat = { - enable = true; - settings = { - hostname = "localhost"; - virtual-host = true; - directory = toString ( - pkgs.runCommand "merecat-webdir" { } '' - mkdir -p $out/foo.localhost $out/bar.localhost - echo '

Hello foo

' > $out/foo.localhost/index.html - echo '

Hello bar

' > $out/bar.localhost/index.html - '' - ); - }; + nodes.machine = + { config, pkgs, ... }: + { + services.merecat = { + enable = true; + settings = { + hostname = "localhost"; + virtual-host = true; + directory = toString ( + pkgs.runCommand "merecat-webdir" { } '' + mkdir -p $out/foo.localhost $out/bar.localhost + echo '

Hello foo

' > $out/foo.localhost/index.html + echo '

Hello bar

' > $out/bar.localhost/index.html + '' + ); }; }; + }; - testScript = '' - machine.wait_for_unit("merecat") - machine.wait_for_open_port(80) - machine.succeed("curl --fail foo.localhost | grep 'Hello foo'") - machine.succeed("curl --fail bar.localhost | grep 'Hello bar'") - ''; - } -) + testScript = '' + machine.wait_for_unit("merecat") + machine.wait_for_open_port(80) + machine.succeed("curl --fail foo.localhost | grep 'Hello foo'") + machine.succeed("curl --fail bar.localhost | grep 'Hello bar'") + ''; +} diff --git a/nixos/tests/metabase.nix b/nixos/tests/metabase.nix index f86c51b108fc..bf606d5d32cf 100644 --- a/nixos/tests/metabase.nix +++ b/nixos/tests/metabase.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "metabase"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mmahut ]; - }; +{ pkgs, ... }: +{ + name = "metabase"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mmahut ]; + }; - nodes = { - machine = - { ... }: - { - services.metabase.enable = true; - }; - }; + nodes = { + machine = + { ... }: + { + services.metabase.enable = true; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("metabase.service") - machine.wait_for_open_port(3000) - machine.wait_until_succeeds("curl -fL http://localhost:3000/setup | grep Metabase") - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("metabase.service") + machine.wait_for_open_port(3000) + machine.wait_until_succeeds("curl -fL http://localhost:3000/setup | grep Metabase") + ''; +} diff --git a/nixos/tests/mihomo.nix b/nixos/tests/mihomo.nix index 4e16750a63b5..aa6bde4810fd 100644 --- a/nixos/tests/mihomo.nix +++ b/nixos/tests/mihomo.nix @@ -1,49 +1,47 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "mihomo"; - meta.maintainers = with pkgs.lib.maintainers; [ Guanran928 ]; +{ pkgs, ... }: +{ + name = "mihomo"; + meta.maintainers = with pkgs.lib.maintainers; [ Guanran928 ]; - nodes.machine = { - environment.systemPackages = [ pkgs.curl ]; + nodes.machine = { + environment.systemPackages = [ pkgs.curl ]; - services.nginx = { - enable = true; - statusPage = true; - }; - - services.mihomo = { - enable = true; - configFile = pkgs.writeTextFile { - name = "config.yaml"; - text = '' - mixed-port: 7890 - external-controller: 127.0.0.1:9090 - authentication: - - "user:supersecret" - ''; - }; - }; + services.nginx = { + enable = true; + statusPage = true; }; - testScript = '' - # Wait until it starts - machine.wait_for_unit("nginx.service") - machine.wait_for_unit("mihomo.service") - machine.wait_for_open_port(80) - machine.wait_for_open_port(7890) - machine.wait_for_open_port(9090) + services.mihomo = { + enable = true; + configFile = pkgs.writeTextFile { + name = "config.yaml"; + text = '' + mixed-port: 7890 + external-controller: 127.0.0.1:9090 + authentication: + - "user:supersecret" + ''; + }; + }; + }; - # Proxy - machine.succeed("curl --fail --max-time 10 --proxy http://user:supersecret@localhost:7890 http://localhost") - machine.succeed("curl --fail --max-time 10 --proxy socks5://user:supersecret@localhost:7890 http://localhost") - machine.fail("curl --fail --max-time 10 --proxy http://user:supervillain@localhost:7890 http://localhost") - machine.fail("curl --fail --max-time 10 --proxy socks5://user:supervillain@localhost:7890 http://localhost") + testScript = '' + # Wait until it starts + machine.wait_for_unit("nginx.service") + machine.wait_for_unit("mihomo.service") + machine.wait_for_open_port(80) + machine.wait_for_open_port(7890) + machine.wait_for_open_port(9090) - # Web UI - result = machine.succeed("curl --fail http://localhost:9090") - target = '{"hello":"mihomo"}\n' - assert result == target, f"{result!r} != {target!r}" - ''; - } -) + # Proxy + machine.succeed("curl --fail --max-time 10 --proxy http://user:supersecret@localhost:7890 http://localhost") + machine.succeed("curl --fail --max-time 10 --proxy socks5://user:supersecret@localhost:7890 http://localhost") + machine.fail("curl --fail --max-time 10 --proxy http://user:supervillain@localhost:7890 http://localhost") + machine.fail("curl --fail --max-time 10 --proxy socks5://user:supervillain@localhost:7890 http://localhost") + + # Web UI + result = machine.succeed("curl --fail http://localhost:9090") + target = '{"hello":"mihomo"}\n' + assert result == target, f"{result!r} != {target!r}" + ''; +} diff --git a/nixos/tests/mimir.nix b/nixos/tests/mimir.nix index 981990b93a1d..9922487751df 100644 --- a/nixos/tests/mimir.nix +++ b/nixos/tests/mimir.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "mimir"; - nodes = { - server = - { ... }: - { - environment.systemPackages = [ pkgs.jq ]; - services.mimir.enable = true; - services.mimir.configuration = { - ingester.ring.replication_factor = 1; +{ pkgs, ... }: +{ + name = "mimir"; + nodes = { + server = + { ... }: + { + environment.systemPackages = [ pkgs.jq ]; + services.mimir.enable = true; + services.mimir.configuration = { + ingester.ring.replication_factor = 1; + }; + + services.telegraf.enable = true; + services.telegraf.extraConfig = { + agent.interval = "1s"; + agent.flush_interval = "1s"; + inputs.exec = { + commands = [ + "${pkgs.coreutils}/bin/echo 'foo i=42i'" + ]; + data_format = "influx"; }; + outputs = { + http = { + # test remote write + url = "http://localhost:8080/api/v1/push"; - services.telegraf.enable = true; - services.telegraf.extraConfig = { - agent.interval = "1s"; - agent.flush_interval = "1s"; - inputs.exec = { - commands = [ - "${pkgs.coreutils}/bin/echo 'foo i=42i'" - ]; - data_format = "influx"; - }; - outputs = { - http = { - # test remote write - url = "http://localhost:8080/api/v1/push"; + # Data format to output. + data_format = "prometheusremotewrite"; - # Data format to output. - data_format = "prometheusremotewrite"; - - headers = { - Content-Type = "application/x-protobuf"; - Content-Encoding = "snappy"; - X-Scope-OrgID = "nixos"; - X-Prometheus-Remote-Write-Version = "0.1.0"; - }; + headers = { + Content-Type = "application/x-protobuf"; + Content-Encoding = "snappy"; + X-Scope-OrgID = "nixos"; + X-Prometheus-Remote-Write-Version = "0.1.0"; }; }; }; }; - }; + }; + }; - testScript = '' - start_all() - server.wait_for_unit("mimir.service") - server.wait_for_unit("telegraf.service") - server.wait_for_open_port(8080) - server.wait_until_succeeds( - "curl -H 'X-Scope-OrgID: nixos' http://127.0.0.1:8080/prometheus/api/v1/label/host/values | jq -r '.data[0]' | grep server" - ) - ''; - } -) + testScript = '' + start_all() + server.wait_for_unit("mimir.service") + server.wait_for_unit("telegraf.service") + server.wait_for_open_port(8080) + server.wait_until_succeeds( + "curl -H 'X-Scope-OrgID: nixos' http://127.0.0.1:8080/prometheus/api/v1/label/host/values | jq -r '.data[0]' | grep server" + ) + ''; +} diff --git a/nixos/tests/mindustry.nix b/nixos/tests/mindustry.nix index 3ec5fb32a0d3..d140520b1d19 100644 --- a/nixos/tests/mindustry.nix +++ b/nixos/tests/mindustry.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "mindustry"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "mindustry"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.mindustry ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.mindustry ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - machine.execute("mindustry >&2 &") - machine.wait_for_window("Mindustry") - # Loading can take a while. Avoid wasting cycles on OCR during that time - machine.sleep(60) - machine.wait_for_text(r"(Play|Database|Editor|Mods|Settings|Quit)") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.execute("mindustry >&2 &") + machine.wait_for_window("Mindustry") + # Loading can take a while. Avoid wasting cycles on OCR during that time + machine.sleep(60) + machine.wait_for_text(r"(Play|Database|Editor|Mods|Settings|Quit)") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/minecraft-server.nix b/nixos/tests/minecraft-server.nix index f8edf0aefb7a..3630025ec224 100644 --- a/nixos/tests/minecraft-server.nix +++ b/nixos/tests/minecraft-server.nix @@ -3,46 +3,44 @@ let rcon-pass = "foobar"; rcon-port = 43000; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "minecraft-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ nequissimus ]; - }; +{ pkgs, ... }: +{ + name = "minecraft-server"; + meta = with pkgs.lib.maintainers; { + maintainers = [ nequissimus ]; + }; - nodes.server = - { ... }: - { - environment.systemPackages = [ pkgs.mcrcon ]; + nodes.server = + { ... }: + { + environment.systemPackages = [ pkgs.mcrcon ]; - nixpkgs.config.allowUnfree = true; + nixpkgs.config.allowUnfree = true; - services.minecraft-server = { - declarative = true; - enable = true; - eula = true; - serverProperties = { - enable-rcon = true; - level-seed = seed; - level-type = "flat"; - generate-structures = false; - online-mode = false; - "rcon.password" = rcon-pass; - "rcon.port" = rcon-port; - }; + services.minecraft-server = { + declarative = true; + enable = true; + eula = true; + serverProperties = { + enable-rcon = true; + level-seed = seed; + level-type = "flat"; + generate-structures = false; + online-mode = false; + "rcon.password" = rcon-pass; + "rcon.port" = rcon-port; }; - - virtualisation.memorySize = 2047; }; - testScript = '' - server.wait_for_unit("minecraft-server") - server.wait_for_open_port(${toString rcon-port}) - assert "${seed}" in server.succeed( - "mcrcon -H localhost -P ${toString rcon-port} -p '${rcon-pass}' -c 'seed'" - ) - server.succeed("systemctl stop minecraft-server") - ''; - } -) + virtualisation.memorySize = 2047; + }; + + testScript = '' + server.wait_for_unit("minecraft-server") + server.wait_for_open_port(${toString rcon-port}) + assert "${seed}" in server.succeed( + "mcrcon -H localhost -P ${toString rcon-port} -p '${rcon-pass}' -c 'seed'" + ) + server.succeed("systemctl stop minecraft-server") + ''; +} diff --git a/nixos/tests/minecraft.nix b/nixos/tests/minecraft.nix index 5f75669900a2..db577a573b23 100644 --- a/nixos/tests/minecraft.nix +++ b/nixos/tests/minecraft.nix @@ -1,42 +1,40 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "minecraft"; - meta = with lib.maintainers; { - maintainers = [ nequissimus ]; +{ pkgs, lib, ... }: +{ + name = "minecraft"; + meta = with lib.maintainers; { + maintainers = [ nequissimus ]; + }; + + nodes.client = + { nodes, ... }: + let + user = nodes.client.config.users.users.alice; + in + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; + + environment.systemPackages = [ pkgs.minecraft ]; + + nixpkgs.config.allowUnfree = true; + + test-support.displayManager.auto.user = user.name; }; - nodes.client = - { nodes, ... }: - let - user = nodes.client.config.users.users.alice; - in - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; + enableOCR = true; - environment.systemPackages = [ pkgs.minecraft ]; - - nixpkgs.config.allowUnfree = true; - - test-support.displayManager.auto.user = user.name; - }; - - enableOCR = true; - - testScript = - { nodes, ... }: - let - user = nodes.client.config.users.users.alice; - in - '' - client.wait_for_x() - client.execute("su - alice -c minecraft-launcher >&2 &") - client.wait_for_text("Create a new Microsoft account") - client.sleep(10) - client.screenshot("launcher") - ''; - } -) + testScript = + { nodes, ... }: + let + user = nodes.client.config.users.users.alice; + in + '' + client.wait_for_x() + client.execute("su - alice -c minecraft-launcher >&2 &") + client.wait_for_text("Create a new Microsoft account") + client.sleep(10) + client.screenshot("launcher") + ''; +} diff --git a/nixos/tests/minidlna.nix b/nixos/tests/minidlna.nix index a1add4ba4836..5e32190a9623 100644 --- a/nixos/tests/minidlna.nix +++ b/nixos/tests/minidlna.nix @@ -1,42 +1,40 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "minidlna"; +{ pkgs, ... }: +{ + name = "minidlna"; - nodes = { - server = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - services.minidlna.enable = true; - services.minidlna.openFirewall = true; - services.minidlna.settings = { - log_level = "error"; - media_dir = [ - "PV,/tmp/stuff" - ]; - friendly_name = "rpi3"; - root_container = "B"; - notify_interval = 60; - album_art_names = [ - "Cover.jpg/cover.jpg/AlbumArtSmall.jpg/albumartsmall.jpg" - "AlbumArt.jpg/albumart.jpg/Album.jpg/album.jpg" - "Folder.jpg/folder.jpg/Thumb.jpg/thumb.jpg" - ]; - }; + nodes = { + server = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + services.minidlna.enable = true; + services.minidlna.openFirewall = true; + services.minidlna.settings = { + log_level = "error"; + media_dir = [ + "PV,/tmp/stuff" + ]; + friendly_name = "rpi3"; + root_container = "B"; + notify_interval = 60; + album_art_names = [ + "Cover.jpg/cover.jpg/AlbumArtSmall.jpg/albumartsmall.jpg" + "AlbumArt.jpg/albumart.jpg/Album.jpg/album.jpg" + "Folder.jpg/folder.jpg/Thumb.jpg/thumb.jpg" + ]; }; - client = { ... }: { }; - }; + }; + client = { ... }: { }; + }; - testScript = '' - start_all() - server.succeed("mkdir -p /tmp/stuff && chown minidlna: /tmp/stuff") - server.wait_for_unit("minidlna") - server.wait_for_open_port(8200) - # requests must be made *by IP* to avoid triggering minidlna's - # DNS-rebinding protection - server.succeed("curl --fail http://$(getent ahostsv4 localhost | head -n1 | cut -f 1 -d ' '):8200/") - client.succeed("curl --fail http://$(getent ahostsv4 server | head -n1 | cut -f 1 -d ' '):8200/") - ''; - } -) + testScript = '' + start_all() + server.succeed("mkdir -p /tmp/stuff && chown minidlna: /tmp/stuff") + server.wait_for_unit("minidlna") + server.wait_for_open_port(8200) + # requests must be made *by IP* to avoid triggering minidlna's + # DNS-rebinding protection + server.succeed("curl --fail http://$(getent ahostsv4 localhost | head -n1 | cut -f 1 -d ' '):8200/") + client.succeed("curl --fail http://$(getent ahostsv4 server | head -n1 | cut -f 1 -d ' '):8200/") + ''; +} diff --git a/nixos/tests/miniflux.nix b/nixos/tests/miniflux.nix index f349a7a2c45c..e13214a148a8 100644 --- a/nixos/tests/miniflux.nix +++ b/nixos/tests/miniflux.nix @@ -1,132 +1,130 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - port = 3142; - username = "alice"; - password = "correcthorsebatterystaple"; - defaultPort = 8080; - defaultUsername = "admin"; - defaultPassword = "password"; - adminCredentialsFile = pkgs.writeText "admin-credentials" '' - ADMIN_USERNAME=${defaultUsername} - ADMIN_PASSWORD=${defaultPassword} - ''; - customAdminCredentialsFile = pkgs.writeText "admin-credentials" '' - ADMIN_USERNAME=${username} - ADMIN_PASSWORD=${password} - ''; - postgresPassword = "correcthorsebatterystaple"; - postgresPasswordFile = pkgs.writeText "pgpass" '' - *:*:*:*:${postgresPassword} - ''; +let + port = 3142; + username = "alice"; + password = "correcthorsebatterystaple"; + defaultPort = 8080; + defaultUsername = "admin"; + defaultPassword = "password"; + adminCredentialsFile = pkgs.writeText "admin-credentials" '' + ADMIN_USERNAME=${defaultUsername} + ADMIN_PASSWORD=${defaultPassword} + ''; + customAdminCredentialsFile = pkgs.writeText "admin-credentials" '' + ADMIN_USERNAME=${username} + ADMIN_PASSWORD=${password} + ''; + postgresPassword = "correcthorsebatterystaple"; + postgresPasswordFile = pkgs.writeText "pgpass" '' + *:*:*:*:${postgresPassword} + ''; - in - { - name = "miniflux"; - meta.maintainers = [ ]; +in +{ + name = "miniflux"; + meta.maintainers = [ ]; - nodes = { - default = - { ... }: - { - security.apparmor.enable = true; - services.miniflux = { - enable = true; - inherit adminCredentialsFile; - }; + nodes = { + default = + { ... }: + { + security.apparmor.enable = true; + services.miniflux = { + enable = true; + inherit adminCredentialsFile; }; + }; - withoutSudo = - { ... }: - { - security.apparmor.enable = true; - services.miniflux = { - enable = true; - inherit adminCredentialsFile; - }; - security.sudo.enable = false; + withoutSudo = + { ... }: + { + security.apparmor.enable = true; + services.miniflux = { + enable = true; + inherit adminCredentialsFile; }; + security.sudo.enable = false; + }; - customized = - { ... }: - { - security.apparmor.enable = true; - services.miniflux = { - enable = true; - config = { - CLEANUP_FREQUENCY = "48"; - LISTEN_ADDR = "localhost:${toString port}"; - }; - adminCredentialsFile = customAdminCredentialsFile; + customized = + { ... }: + { + security.apparmor.enable = true; + services.miniflux = { + enable = true; + config = { + CLEANUP_FREQUENCY = "48"; + LISTEN_ADDR = "localhost:${toString port}"; }; + adminCredentialsFile = customAdminCredentialsFile; }; + }; - postgresTcp = - { - config, - pkgs, - lib, - ... - }: - { - services.postgresql = { - enable = true; - initialScript = pkgs.writeText "init-postgres" '' - CREATE USER miniflux WITH PASSWORD '${postgresPassword}'; - CREATE DATABASE miniflux WITH OWNER miniflux; - ''; - enableTCPIP = true; - authentication = '' - host sameuser miniflux samenet scram-sha-256 - ''; - }; - systemd.services.postgresql.postStart = lib.mkAfter '' - $PSQL -tAd miniflux -c 'CREATE EXTENSION hstore;' + postgresTcp = + { + config, + pkgs, + lib, + ... + }: + { + services.postgresql = { + enable = true; + initialScript = pkgs.writeText "init-postgres" '' + CREATE USER miniflux WITH PASSWORD '${postgresPassword}'; + CREATE DATABASE miniflux WITH OWNER miniflux; ''; - networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ]; - }; - externalDb = - { ... }: - { - security.apparmor.enable = true; - services.miniflux = { - enable = true; - createDatabaseLocally = false; - inherit adminCredentialsFile; - config = { - DATABASE_URL = "user=miniflux host=postgresTcp dbname=miniflux sslmode=disable"; - PGPASSFILE = "/run/miniflux/pgpass"; - }; - }; - systemd.services.miniflux.preStart = '' - cp ${postgresPasswordFile} /run/miniflux/pgpass - chmod 600 /run/miniflux/pgpass + enableTCPIP = true; + authentication = '' + host sameuser miniflux samenet scram-sha-256 ''; }; - }; - testScript = '' - def runTest(machine, port, user): - machine.wait_for_unit("miniflux.service") - machine.wait_for_open_port(port) - machine.succeed(f"curl --fail 'http://localhost:{port}/healthcheck' | grep OK") - machine.succeed( - f"curl 'http://localhost:{port}/v1/me' -u '{user}' -H Content-Type:application/json | grep '\"is_admin\":true'" - ) - machine.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""') + systemd.services.postgresql.postStart = lib.mkAfter '' + $PSQL -tAd miniflux -c 'CREATE EXTENSION hstore;' + ''; + networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ]; + }; + externalDb = + { ... }: + { + security.apparmor.enable = true; + services.miniflux = { + enable = true; + createDatabaseLocally = false; + inherit adminCredentialsFile; + config = { + DATABASE_URL = "user=miniflux host=postgresTcp dbname=miniflux sslmode=disable"; + PGPASSFILE = "/run/miniflux/pgpass"; + }; + }; + systemd.services.miniflux.preStart = '' + cp ${postgresPasswordFile} /run/miniflux/pgpass + chmod 600 /run/miniflux/pgpass + ''; + }; + }; + testScript = '' + def runTest(machine, port, user): + machine.wait_for_unit("miniflux.service") + machine.wait_for_open_port(port) + machine.succeed(f"curl --fail 'http://localhost:{port}/healthcheck' | grep OK") + machine.succeed( + f"curl 'http://localhost:{port}/v1/me' -u '{user}' -H Content-Type:application/json | grep '\"is_admin\":true'" + ) + machine.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""') - default.start() - withoutSudo.start() - customized.start() - postgresTcp.start() + default.start() + withoutSudo.start() + customized.start() + postgresTcp.start() - runTest(default, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}") - runTest(withoutSudo, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}") - runTest(customized, ${toString port}, "${username}:${password}") + runTest(default, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}") + runTest(withoutSudo, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}") + runTest(customized, ${toString port}, "${username}:${password}") - postgresTcp.wait_for_unit("postgresql.service") - externalDb.start() - runTest(externalDb, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}") - ''; - } -) + postgresTcp.wait_for_unit("postgresql.service") + externalDb.start() + runTest(externalDb, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}") + ''; +} diff --git a/nixos/tests/minio.nix b/nixos/tests/minio.nix index d4d34ea660f0..dfe7f169def1 100644 --- a/nixos/tests/minio.nix +++ b/nixos/tests/minio.nix @@ -1,116 +1,114 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' - openssl req \ - -x509 -newkey rsa:4096 -sha256 -days 365 \ - -nodes -out cert.pem -keyout key.pem \ - -subj '/CN=minio' -addext "subjectAltName=DNS:localhost" +{ pkgs, ... }: +let + tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' + openssl req \ + -x509 -newkey rsa:4096 -sha256 -days 365 \ + -nodes -out cert.pem -keyout key.pem \ + -subj '/CN=minio' -addext "subjectAltName=DNS:localhost" - mkdir -p $out - cp key.pem cert.pem $out - ''; + mkdir -p $out + cp key.pem cert.pem $out + ''; - accessKey = "BKIKJAA5BMMU2RHO6IBB"; - secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; - minioPythonScript = pkgs.writeScript "minio-test.py" '' - #! ${pkgs.python3.withPackages (ps: [ ps.minio ])}/bin/python - import io - import os - import sys - from minio import Minio + accessKey = "BKIKJAA5BMMU2RHO6IBB"; + secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; + minioPythonScript = pkgs.writeScript "minio-test.py" '' + #! ${pkgs.python3.withPackages (ps: [ ps.minio ])}/bin/python + import io + import os + import sys + from minio import Minio - if len(sys.argv) > 1 and sys.argv[1] == 'tls': - tls = True - else: - tls = False + if len(sys.argv) > 1 and sys.argv[1] == 'tls': + tls = True + else: + tls = False - minioClient = Minio('localhost:9000', - access_key='${accessKey}', - secret_key='${secretKey}', - secure=tls, - cert_check=False) - sio = io.BytesIO() - sio.write(b'Test from Python') - sio.seek(0, os.SEEK_END) - sio_len = sio.tell() - sio.seek(0) - minioClient.put_object('test-bucket', 'test.txt', sio, sio_len, content_type='text/plain') - ''; - rootCredentialsFile = "/etc/nixos/minio-root-credentials"; - credsPartial = pkgs.writeText "minio-credentials-partial" '' - MINIO_ROOT_USER=${accessKey} - ''; - credsFull = pkgs.writeText "minio-credentials-full" '' - MINIO_ROOT_USER=${accessKey} - MINIO_ROOT_PASSWORD=${secretKey} - ''; - in - { - name = "minio"; - meta = with pkgs.lib.maintainers; { - maintainers = [ bachp ]; - }; + minioClient = Minio('localhost:9000', + access_key='${accessKey}', + secret_key='${secretKey}', + secure=tls, + cert_check=False) + sio = io.BytesIO() + sio.write(b'Test from Python') + sio.seek(0, os.SEEK_END) + sio_len = sio.tell() + sio.seek(0) + minioClient.put_object('test-bucket', 'test.txt', sio, sio_len, content_type='text/plain') + ''; + rootCredentialsFile = "/etc/nixos/minio-root-credentials"; + credsPartial = pkgs.writeText "minio-credentials-partial" '' + MINIO_ROOT_USER=${accessKey} + ''; + credsFull = pkgs.writeText "minio-credentials-full" '' + MINIO_ROOT_USER=${accessKey} + MINIO_ROOT_PASSWORD=${secretKey} + ''; +in +{ + name = "minio"; + meta = with pkgs.lib.maintainers; { + maintainers = [ bachp ]; + }; - nodes = { - machine = - { pkgs, ... }: - { - services.minio = { - enable = true; - inherit rootCredentialsFile; - }; - environment.systemPackages = [ pkgs.minio-client ]; - - # Minio requires at least 1GiB of free disk space to run. - virtualisation.diskSize = 4 * 1024; - - # Minio pre allocates 2GiB or memory, reserve some more - virtualisation.memorySize = 4096; + nodes = { + machine = + { pkgs, ... }: + { + services.minio = { + enable = true; + inherit rootCredentialsFile; }; - }; + environment.systemPackages = [ pkgs.minio-client ]; - testScript = '' + # Minio requires at least 1GiB of free disk space to run. + virtualisation.diskSize = 4 * 1024; - start_all() - # simulate manually editing root credentials file - machine.wait_for_unit("multi-user.target") - machine.copy_from_host("${credsFull}", "${rootCredentialsFile}") + # Minio pre allocates 2GiB or memory, reserve some more + virtualisation.memorySize = 4096; + }; + }; - # Test non-TLS server - machine.wait_for_unit("minio.service") - machine.wait_for_open_port(9000) + testScript = '' - # Create a test bucket on the server - machine.succeed( - "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4" - ) - machine.succeed("mc mb minio/test-bucket") - machine.succeed("${minioPythonScript}") - assert "test-bucket" in machine.succeed("mc ls minio") - assert "Test from Python" in machine.succeed("mc cat minio/test-bucket/test.txt") - machine.succeed("mc rb --force minio/test-bucket") - machine.systemctl("stop minio.service") + start_all() + # simulate manually editing root credentials file + machine.wait_for_unit("multi-user.target") + machine.copy_from_host("${credsFull}", "${rootCredentialsFile}") - # Test TLS server - machine.copy_from_host("${tls-cert}/cert.pem", "/var/lib/minio/certs/public.crt") - machine.copy_from_host("${tls-cert}/key.pem", "/var/lib/minio/certs/private.key") + # Test non-TLS server + machine.wait_for_unit("minio.service") + machine.wait_for_open_port(9000) - machine.systemctl("start minio.service") - machine.wait_for_unit("minio.service") - machine.wait_for_open_port(9000) + # Create a test bucket on the server + machine.succeed( + "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4" + ) + machine.succeed("mc mb minio/test-bucket") + machine.succeed("${minioPythonScript}") + assert "test-bucket" in machine.succeed("mc ls minio") + assert "Test from Python" in machine.succeed("mc cat minio/test-bucket/test.txt") + machine.succeed("mc rb --force minio/test-bucket") + machine.systemctl("stop minio.service") - # Create a test bucket on the server - machine.succeed( - "mc config host add minio https://localhost:9000 ${accessKey} ${secretKey} --api s3v4" - ) - machine.succeed("mc --insecure mb minio/test-bucket") - machine.succeed("${minioPythonScript} tls") - assert "test-bucket" in machine.succeed("mc --insecure ls minio") - assert "Test from Python" in machine.succeed("mc --insecure cat minio/test-bucket/test.txt") - machine.succeed("mc --insecure rb --force minio/test-bucket") + # Test TLS server + machine.copy_from_host("${tls-cert}/cert.pem", "/var/lib/minio/certs/public.crt") + machine.copy_from_host("${tls-cert}/key.pem", "/var/lib/minio/certs/private.key") - machine.shutdown() - ''; - } -) + machine.systemctl("start minio.service") + machine.wait_for_unit("minio.service") + machine.wait_for_open_port(9000) + + # Create a test bucket on the server + machine.succeed( + "mc config host add minio https://localhost:9000 ${accessKey} ${secretKey} --api s3v4" + ) + machine.succeed("mc --insecure mb minio/test-bucket") + machine.succeed("${minioPythonScript} tls") + assert "test-bucket" in machine.succeed("mc --insecure ls minio") + assert "Test from Python" in machine.succeed("mc --insecure cat minio/test-bucket/test.txt") + machine.succeed("mc --insecure rb --force minio/test-bucket") + + machine.shutdown() + ''; +} diff --git a/nixos/tests/misc.nix b/nixos/tests/misc.nix index 55d719aeafbb..50efcbd28999 100644 --- a/nixos/tests/misc.nix +++ b/nixos/tests/misc.nix @@ -1,170 +1,168 @@ # Miscellaneous small tests that don't warrant their own VM run. -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - foo = pkgs.writeText "foo" "Hello World"; - in - { - name = "misc"; - meta.maintainers = with lib.maintainers; [ eelco ]; +{ lib, pkgs, ... }: +let + foo = pkgs.writeText "foo" "Hello World"; +in +{ + name = "misc"; + meta.maintainers = with lib.maintainers; [ eelco ]; - nodes.machine = - { lib, ... }: - { - swapDevices = lib.mkOverride 0 [ - { - device = "/root/swapfile"; - size = 128; - } - ]; - environment.variables.EDITOR = lib.mkOverride 0 "emacs"; - systemd.tmpfiles.rules = [ "d /tmp 1777 root root 10d" ]; - systemd.tmpfiles.settings."10-test"."/tmp/somefile".d = { }; - virtualisation.fileSystems = { - "/tmp2" = { - fsType = "tmpfs"; - options = [ - "mode=1777" - "noauto" - ]; - }; - # Tests https://discourse.nixos.org/t/how-to-make-a-derivations-executables-have-the-s-permission/8555 - "/user-mount/point" = { - device = "/user-mount/source"; - fsType = "none"; - options = [ - "bind" - "rw" - "user" - "noauto" - ]; - }; - "/user-mount/denied-point" = { - device = "/user-mount/denied-source"; - fsType = "none"; - options = [ - "bind" - "rw" - "noauto" - ]; - }; + nodes.machine = + { lib, ... }: + { + swapDevices = lib.mkOverride 0 [ + { + device = "/root/swapfile"; + size = 128; + } + ]; + environment.variables.EDITOR = lib.mkOverride 0 "emacs"; + systemd.tmpfiles.rules = [ "d /tmp 1777 root root 10d" ]; + systemd.tmpfiles.settings."10-test"."/tmp/somefile".d = { }; + virtualisation.fileSystems = { + "/tmp2" = { + fsType = "tmpfs"; + options = [ + "mode=1777" + "noauto" + ]; }; - systemd.automounts = lib.singleton { - wantedBy = [ "multi-user.target" ]; - where = "/tmp2"; + # Tests https://discourse.nixos.org/t/how-to-make-a-derivations-executables-have-the-s-permission/8555 + "/user-mount/point" = { + device = "/user-mount/source"; + fsType = "none"; + options = [ + "bind" + "rw" + "user" + "noauto" + ]; }; - users.users.sybil = { - isNormalUser = true; - group = "wheel"; + "/user-mount/denied-point" = { + device = "/user-mount/denied-source"; + fsType = "none"; + options = [ + "bind" + "rw" + "noauto" + ]; }; - users.users.alice = { - isNormalUser = true; - }; - security.sudo = { - enable = true; - wheelNeedsPassword = false; - }; - boot.kernel.sysctl."vm.swappiness" = 1; - boot.kernelParams = [ "vsyscall=emulate" ]; - system.extraDependencies = [ foo ]; }; + systemd.automounts = lib.singleton { + wantedBy = [ "multi-user.target" ]; + where = "/tmp2"; + }; + users.users.sybil = { + isNormalUser = true; + group = "wheel"; + }; + users.users.alice = { + isNormalUser = true; + }; + security.sudo = { + enable = true; + wheelNeedsPassword = false; + }; + boot.kernel.sysctl."vm.swappiness" = 1; + boot.kernelParams = [ "vsyscall=emulate" ]; + system.extraDependencies = [ foo ]; + }; - testScript = '' - with subtest("nixos-version"): - machine.succeed("[ `nixos-version | wc -w` = 2 ]") + testScript = '' + with subtest("nixos-version"): + machine.succeed("[ `nixos-version | wc -w` = 2 ]") - with subtest("nixos-rebuild"): - assert "NixOS module" in machine.succeed("nixos-rebuild --help") + with subtest("nixos-rebuild"): + assert "NixOS module" in machine.succeed("nixos-rebuild --help") - with subtest("Sanity check for uid/gid assignment"): - assert "4" == machine.succeed("id -u messagebus").strip() - assert "4" == machine.succeed("id -g messagebus").strip() - assert "users:x:100:" == machine.succeed("getent group users").strip() + with subtest("Sanity check for uid/gid assignment"): + assert "4" == machine.succeed("id -u messagebus").strip() + assert "4" == machine.succeed("id -g messagebus").strip() + assert "users:x:100:" == machine.succeed("getent group users").strip() - with subtest("Regression test for GMP aborts on QEMU."): - machine.succeed("expr 1 + 2") + with subtest("Regression test for GMP aborts on QEMU."): + machine.succeed("expr 1 + 2") - with subtest("the swap file got created"): - machine.wait_for_unit("root-swapfile.swap") - machine.succeed("ls -l /root/swapfile | grep 134217728") + with subtest("the swap file got created"): + machine.wait_for_unit("root-swapfile.swap") + machine.succeed("ls -l /root/swapfile | grep 134217728") - with subtest("whether kernel.poweroff_cmd is set"): - machine.succeed('[ -x "$(cat /proc/sys/kernel/poweroff_cmd)" ]') + with subtest("whether kernel.poweroff_cmd is set"): + machine.succeed('[ -x "$(cat /proc/sys/kernel/poweroff_cmd)" ]') - with subtest("whether the io cgroupv2 controller is properly enabled"): - machine.succeed("grep -q '\\bio\\b' /sys/fs/cgroup/cgroup.controllers") + with subtest("whether the io cgroupv2 controller is properly enabled"): + machine.succeed("grep -q '\\bio\\b' /sys/fs/cgroup/cgroup.controllers") - with subtest("whether we have a reboot record in wtmp"): - machine.shutdown - machine.wait_for_unit("multi-user.target") - machine.succeed("last | grep reboot >&2") + with subtest("whether we have a reboot record in wtmp"): + machine.shutdown + machine.wait_for_unit("multi-user.target") + machine.succeed("last | grep reboot >&2") - with subtest("whether we can override environment variables"): - machine.succeed('[ "$EDITOR" = emacs ]') + with subtest("whether we can override environment variables"): + machine.succeed('[ "$EDITOR" = emacs ]') - with subtest("whether hostname (and by extension nss_myhostname) works"): - assert "machine" == machine.succeed("hostname").strip() - assert "machine" == machine.succeed("hostname -s").strip() + with subtest("whether hostname (and by extension nss_myhostname) works"): + assert "machine" == machine.succeed("hostname").strip() + assert "machine" == machine.succeed("hostname -s").strip() - with subtest("whether systemd-udevd automatically loads modules for our hardware"): - machine.succeed("systemctl start systemd-udev-settle.service") - machine.wait_for_unit("systemd-udev-settle.service") - assert "mousedev" in machine.succeed("lsmod") + with subtest("whether systemd-udevd automatically loads modules for our hardware"): + machine.succeed("systemctl start systemd-udev-settle.service") + machine.wait_for_unit("systemd-udev-settle.service") + assert "mousedev" in machine.succeed("lsmod") - with subtest("whether systemd-tmpfiles-clean works"): - machine.succeed( - "touch /tmp/foo", "systemctl start systemd-tmpfiles-clean", "[ -e /tmp/foo ]" - ) - # move into the future - machine.succeed( - 'date -s "@$(($(date +%s) + 1000000))"', - "systemctl start systemd-tmpfiles-clean", - ) - machine.fail("[ -e /tmp/foo ]") + with subtest("whether systemd-tmpfiles-clean works"): + machine.succeed( + "touch /tmp/foo", "systemctl start systemd-tmpfiles-clean", "[ -e /tmp/foo ]" + ) + # move into the future + machine.succeed( + 'date -s "@$(($(date +%s) + 1000000))"', + "systemctl start systemd-tmpfiles-clean", + ) + machine.fail("[ -e /tmp/foo ]") - with subtest("whether systemd-tmpfiles settings works"): - machine.succeed("[ -e /tmp/somefile ]") + with subtest("whether systemd-tmpfiles settings works"): + machine.succeed("[ -e /tmp/somefile ]") - with subtest("/etc/mtab"): - assert "/proc/mounts" == machine.succeed("readlink --no-newline /etc/mtab") + with subtest("/etc/mtab"): + assert "/proc/mounts" == machine.succeed("readlink --no-newline /etc/mtab") - with subtest("whether automounting works"): - machine.fail("grep '/tmp2 tmpfs' /proc/mounts") - machine.succeed("touch /tmp2/x") - machine.succeed("grep '/tmp2 tmpfs' /proc/mounts") + with subtest("whether automounting works"): + machine.fail("grep '/tmp2 tmpfs' /proc/mounts") + machine.succeed("touch /tmp2/x") + machine.succeed("grep '/tmp2 tmpfs' /proc/mounts") - with subtest( - "Whether mounting by a user is possible with the `user` option in fstab (#95444)" - ): - machine.succeed("mkdir -p /user-mount/source") - machine.succeed("touch /user-mount/source/file") - machine.succeed("chmod -R a+Xr /user-mount/source") - machine.succeed("mkdir /user-mount/point") - machine.succeed("chown alice:users /user-mount/point") - machine.succeed("su - alice -c 'mount /user-mount/point'") - machine.succeed("su - alice -c 'ls /user-mount/point/file'") - with subtest( - "Whether mounting by a user is denied without the `user` option in fstab" - ): - machine.succeed("mkdir -p /user-mount/denied-source") - machine.succeed("touch /user-mount/denied-source/file") - machine.succeed("chmod -R a+Xr /user-mount/denied-source") - machine.succeed("mkdir /user-mount/denied-point") - machine.succeed("chown alice:users /user-mount/denied-point") - machine.fail("su - alice -c 'mount /user-mount/denied-point'") + with subtest( + "Whether mounting by a user is possible with the `user` option in fstab (#95444)" + ): + machine.succeed("mkdir -p /user-mount/source") + machine.succeed("touch /user-mount/source/file") + machine.succeed("chmod -R a+Xr /user-mount/source") + machine.succeed("mkdir /user-mount/point") + machine.succeed("chown alice:users /user-mount/point") + machine.succeed("su - alice -c 'mount /user-mount/point'") + machine.succeed("su - alice -c 'ls /user-mount/point/file'") + with subtest( + "Whether mounting by a user is denied without the `user` option in fstab" + ): + machine.succeed("mkdir -p /user-mount/denied-source") + machine.succeed("touch /user-mount/denied-source/file") + machine.succeed("chmod -R a+Xr /user-mount/denied-source") + machine.succeed("mkdir /user-mount/denied-point") + machine.succeed("chown alice:users /user-mount/denied-point") + machine.fail("su - alice -c 'mount /user-mount/denied-point'") - with subtest("shell-vars"): - machine.succeed('[ -n "$NIX_PATH" ]') + with subtest("shell-vars"): + machine.succeed('[ -n "$NIX_PATH" ]') - with subtest("Test sysctl"): - machine.wait_for_unit("systemd-sysctl.service") - assert "1" == machine.succeed("sysctl -ne vm.swappiness").strip() - machine.execute("sysctl vm.swappiness=60") - assert "60" == machine.succeed("sysctl -ne vm.swappiness").strip() + with subtest("Test sysctl"): + machine.wait_for_unit("systemd-sysctl.service") + assert "1" == machine.succeed("sysctl -ne vm.swappiness").strip() + machine.execute("sysctl vm.swappiness=60") + assert "60" == machine.succeed("sysctl -ne vm.swappiness").strip() - with subtest("Test boot parameters"): - assert "vsyscall=emulate" in machine.succeed("cat /proc/cmdline") - ''; - } -) + with subtest("Test boot parameters"): + assert "vsyscall=emulate" in machine.succeed("cat /proc/cmdline") + ''; +} diff --git a/nixos/tests/misskey.nix b/nixos/tests/misskey.nix index 1a450c518aae..9afa7aa0cee4 100644 --- a/nixos/tests/misskey.nix +++ b/nixos/tests/misskey.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { lib, ... }: - let - port = 61812; - in - { - name = "misskey"; +{ lib, ... }: +let + port = 61812; +in +{ + name = "misskey"; - meta.maintainers = [ lib.maintainers.feathecutie ]; + meta.maintainers = [ lib.maintainers.feathecutie ]; - nodes.machine = { - services.misskey = { - enable = true; - settings = { - url = "http://misskey.local"; - inherit port; - }; - database.createLocally = true; - redis.createLocally = true; + nodes.machine = { + services.misskey = { + enable = true; + settings = { + url = "http://misskey.local"; + inherit port; }; + database.createLocally = true; + redis.createLocally = true; }; + }; - testScript = '' - machine.wait_for_unit("misskey.service") - machine.wait_for_open_port(${toString port}) - machine.succeed("curl --fail http://localhost:${toString port}/") - ''; - } -) + testScript = '' + machine.wait_for_unit("misskey.service") + machine.wait_for_open_port(${toString port}) + machine.succeed("curl --fail http://localhost:${toString port}/") + ''; +} diff --git a/nixos/tests/mod_perl.nix b/nixos/tests/mod_perl.nix index 677f2bcbc969..6bcca2b914f4 100644 --- a/nixos/tests/mod_perl.nix +++ b/nixos/tests/mod_perl.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "mod_perl"; +{ pkgs, lib, ... }: +{ + name = "mod_perl"; - meta = with pkgs.lib.maintainers; { - maintainers = [ sgo ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ sgo ]; + }; - nodes.machine = - { - config, - lib, - pkgs, - ... - }: - { - services.httpd = { - enable = true; - adminAddr = "admin@localhost"; - virtualHosts."modperl" = - let - inc = pkgs.writeTextDir "ModPerlTest.pm" '' - package ModPerlTest; - use strict; - use Apache2::RequestRec (); - use Apache2::RequestIO (); - use Apache2::Const -compile => qw(OK); - sub handler { - my $r = shift; - $r->content_type('text/plain'); - print "Hello mod_perl!\n"; - return Apache2::Const::OK; - } - 1; - ''; - startup = pkgs.writeScript "startup.pl" '' - use lib "${inc}", - split ":","${with pkgs.perl.pkgs; makeFullPerlPath ([ mod_perl2 ])}"; - 1; - ''; - in - { + nodes.machine = + { + config, + lib, + pkgs, + ... + }: + { + services.httpd = { + enable = true; + adminAddr = "admin@localhost"; + virtualHosts."modperl" = + let + inc = pkgs.writeTextDir "ModPerlTest.pm" '' + package ModPerlTest; + use strict; + use Apache2::RequestRec (); + use Apache2::RequestIO (); + use Apache2::Const -compile => qw(OK); + sub handler { + my $r = shift; + $r->content_type('text/plain'); + print "Hello mod_perl!\n"; + return Apache2::Const::OK; + } + 1; + ''; + startup = pkgs.writeScript "startup.pl" '' + use lib "${inc}", + split ":","${with pkgs.perl.pkgs; makeFullPerlPath ([ mod_perl2 ])}"; + 1; + ''; + in + { + extraConfig = '' + PerlRequire ${startup} + ''; + locations."/modperl" = { extraConfig = '' - PerlRequire ${startup} + SetHandler perl-script + PerlResponseHandler ModPerlTest ''; - locations."/modperl" = { - extraConfig = '' - SetHandler perl-script - PerlResponseHandler ModPerlTest - ''; - }; }; - enablePerl = true; - }; + }; + enablePerl = true; }; - testScript = - { ... }: - '' - machine.wait_for_unit("httpd.service") - response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/modperl") - assert "Hello mod_perl!" in response, "/modperl handler did not respond" - ''; - } -) + }; + testScript = + { ... }: + '' + machine.wait_for_unit("httpd.service") + response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/modperl") + assert "Hello mod_perl!" in response, "/modperl handler did not respond" + ''; +} diff --git a/nixos/tests/molly-brown.nix b/nixos/tests/molly-brown.nix index 72e42c4a5be8..56d969ae79bf 100644 --- a/nixos/tests/molly-brown.nix +++ b/nixos/tests/molly-brown.nix @@ -1,79 +1,77 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - testString = "NixOS Gemini test successful"; - in - { +let + testString = "NixOS Gemini test successful"; +in +{ - name = "molly-brown"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ehmry ]; - }; + name = "molly-brown"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ehmry ]; + }; - nodes = { + nodes = { - geminiServer = - { config, pkgs, ... }: - let - inherit (config.networking) hostName; - cfg = config.services.molly-brown; - in - { + geminiServer = + { config, pkgs, ... }: + let + inherit (config.networking) hostName; + cfg = config.services.molly-brown; + in + { - environment.systemPackages = [ - (pkgs.writeScriptBin "test-gemini" '' - #!${pkgs.python3}/bin/python + environment.systemPackages = [ + (pkgs.writeScriptBin "test-gemini" '' + #!${pkgs.python3}/bin/python - import socket - import ssl - import tempfile - import textwrap - import urllib.parse + import socket + import ssl + import tempfile + import textwrap + import urllib.parse - url = "gemini://geminiServer/init.gmi" - parsed_url = urllib.parse.urlparse(url) + url = "gemini://geminiServer/init.gmi" + parsed_url = urllib.parse.urlparse(url) - s = socket.create_connection((parsed_url.netloc, 1965)) - context = ssl.SSLContext() - context.check_hostname = False - context.verify_mode = ssl.CERT_NONE - s = context.wrap_socket(s, server_hostname=parsed_url.netloc) - s.sendall((url + "\r\n").encode("UTF-8")) - fp = s.makefile("rb") - print(fp.readline().strip()) - print(fp.readline().strip()) - print(fp.readline().strip()) - '') - ]; + s = socket.create_connection((parsed_url.netloc, 1965)) + context = ssl.SSLContext() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + s = context.wrap_socket(s, server_hostname=parsed_url.netloc) + s.sendall((url + "\r\n").encode("UTF-8")) + fp = s.makefile("rb") + print(fp.readline().strip()) + print(fp.readline().strip()) + print(fp.readline().strip()) + '') + ]; - networking.firewall.allowedTCPPorts = [ cfg.settings.Port ]; + networking.firewall.allowedTCPPorts = [ cfg.settings.Port ]; - services.molly-brown = { - enable = true; - docBase = "/tmp/docs"; - certPath = "/tmp/cert.pem"; - keyPath = "/tmp/key.pem"; - }; - - systemd.services.molly-brown.preStart = '' - ${pkgs.openssl}/bin/openssl genrsa -out "/tmp/key.pem" - ${pkgs.openssl}/bin/openssl req -new \ - -subj "/CN=${config.networking.hostName}" \ - -key "/tmp/key.pem" -out /tmp/request.pem - ${pkgs.openssl}/bin/openssl x509 -req -days 3650 \ - -in /tmp/request.pem -signkey "/tmp/key.pem" -out "/tmp/cert.pem" - - mkdir -p "${cfg.settings.DocBase}" - echo "${testString}" > "${cfg.settings.DocBase}/test.gmi" - ''; + services.molly-brown = { + enable = true; + docBase = "/tmp/docs"; + certPath = "/tmp/cert.pem"; + keyPath = "/tmp/key.pem"; }; - }; - testScript = '' - geminiServer.wait_for_unit("molly-brown") - geminiServer.wait_for_open_port(1965) - geminiServer.succeed("test-gemini") - ''; - } -) + systemd.services.molly-brown.preStart = '' + ${pkgs.openssl}/bin/openssl genrsa -out "/tmp/key.pem" + ${pkgs.openssl}/bin/openssl req -new \ + -subj "/CN=${config.networking.hostName}" \ + -key "/tmp/key.pem" -out /tmp/request.pem + ${pkgs.openssl}/bin/openssl x509 -req -days 3650 \ + -in /tmp/request.pem -signkey "/tmp/key.pem" -out "/tmp/cert.pem" + + mkdir -p "${cfg.settings.DocBase}" + echo "${testString}" > "${cfg.settings.DocBase}/test.gmi" + ''; + }; + }; + testScript = '' + geminiServer.wait_for_unit("molly-brown") + geminiServer.wait_for_open_port(1965) + geminiServer.succeed("test-gemini") + ''; + +} diff --git a/nixos/tests/mollysocket.nix b/nixos/tests/mollysocket.nix index ccfc20fdda6b..b4acbb1095cb 100644 --- a/nixos/tests/mollysocket.nix +++ b/nixos/tests/mollysocket.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - port = 1234; - in - { - name = "mollysocket"; - meta.maintainers = with lib.maintainers; [ dotlambda ]; +let + port = 1234; +in +{ + name = "mollysocket"; + meta.maintainers = with lib.maintainers; [ dotlambda ]; - nodes.mollysocket = - { ... }: - { - services.mollysocket = { - enable = true; - settings = { - inherit port; - }; + nodes.mollysocket = + { ... }: + { + services.mollysocket = { + enable = true; + settings = { + inherit port; }; }; + }; - testScript = '' - mollysocket.wait_for_unit("mollysocket.service") - mollysocket.wait_for_open_port(${toString port}) + testScript = '' + mollysocket.wait_for_unit("mollysocket.service") + mollysocket.wait_for_open_port(${toString port}) - out = mollysocket.succeed("curl --fail http://127.0.0.1:${toString port}") - assert "Version ${pkgs.mollysocket.version}" in out - ''; - } -) + out = mollysocket.succeed("curl --fail http://127.0.0.1:${toString port}") + assert "Version ${pkgs.mollysocket.version}" in out + ''; +} diff --git a/nixos/tests/monado.nix b/nixos/tests/monado.nix index 9b7e636d1f66..609b0fa647e5 100644 --- a/nixos/tests/monado.nix +++ b/nixos/tests/monado.nix @@ -1,51 +1,49 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "monado"; +{ ... }: +{ + name = "monado"; - nodes.machine = - { pkgs, ... }: + nodes.machine = + { pkgs, ... }: - { - hardware.graphics.enable = true; - users.users.alice = { - isNormalUser = true; - uid = 1000; - }; - - services.monado = { - enable = true; - defaultRuntime = true; - - forceDefaultRuntime = true; - }; - # Stop Monado from probing for any hardware - systemd.user.services.monado.environment.SIMULATED_ENABLE = "1"; - - environment.systemPackages = with pkgs; [ openxr-loader ]; + { + hardware.graphics.enable = true; + users.users.alice = { + isNormalUser = true; + uid = 1000; }; - testScript = - { nodes, ... }: - let - userId = toString nodes.machine.users.users.alice.uid; - runtimePath = "/run/user/${userId}"; - in - '' - # for defaultRuntime - machine.succeed("stat /etc/xdg/openxr/1/active_runtime.json") + services.monado = { + enable = true; + defaultRuntime = true; - machine.succeed("loginctl enable-linger alice") - machine.wait_for_unit("user@${userId}.service") + forceDefaultRuntime = true; + }; + # Stop Monado from probing for any hardware + systemd.user.services.monado.environment.SIMULATED_ENABLE = "1"; - machine.wait_for_unit("monado.socket", "alice") - machine.systemctl("start monado.service", "alice") - machine.wait_for_unit("monado.service", "alice") + environment.systemPackages = with pkgs; [ openxr-loader ]; + }; - # for forceDefaultRuntime - machine.succeed("stat /home/alice/.config/openxr/1/active_runtime.json") + testScript = + { nodes, ... }: + let + userId = toString nodes.machine.users.users.alice.uid; + runtimePath = "/run/user/${userId}"; + in + '' + # for defaultRuntime + machine.succeed("stat /etc/xdg/openxr/1/active_runtime.json") - machine.succeed("su -- alice -c env XDG_RUNTIME_DIR=${runtimePath} openxr_runtime_list") - ''; - } -) + machine.succeed("loginctl enable-linger alice") + machine.wait_for_unit("user@${userId}.service") + + machine.wait_for_unit("monado.socket", "alice") + machine.systemctl("start monado.service", "alice") + machine.wait_for_unit("monado.service", "alice") + + # for forceDefaultRuntime + machine.succeed("stat /home/alice/.config/openxr/1/active_runtime.json") + + machine.succeed("su -- alice -c env XDG_RUNTIME_DIR=${runtimePath} openxr_runtime_list") + ''; +} diff --git a/nixos/tests/monetdb.nix b/nixos/tests/monetdb.nix index 53c732338554..0ea2d2843635 100644 --- a/nixos/tests/monetdb.nix +++ b/nixos/tests/monetdb.nix @@ -1,81 +1,79 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - creds = pkgs.writeText ".monetdb" '' - user=monetdb - password=monetdb - ''; - createUser = pkgs.writeText "createUser.sql" '' - CREATE USER "voc" WITH PASSWORD 'voc' NAME 'VOC Explorer' SCHEMA "sys"; - CREATE SCHEMA "voc" AUTHORIZATION "voc"; - ALTER USER "voc" SET SCHEMA "voc"; - ''; - credsVoc = pkgs.writeText ".monetdb" '' - user=voc - password=voc - ''; - transaction = pkgs.writeText "transaction" '' - START TRANSACTION; - CREATE TABLE test (id int, data varchar(30)); - ROLLBACK; - ''; - vocData = pkgs.fetchzip { - url = "https://dev.monetdb.org/Assets/VOC/voc_dump.zip"; - hash = "sha256-sQ5acTsSAiXQfOgt2PhN7X7Z9TZGZtLrPPxgQT2pCGQ="; - }; - onboardPeople = pkgs.writeText "onboardPeople" '' - CREATE VIEW onboard_people AS - SELECT * FROM ( - SELECT 'craftsmen' AS type, craftsmen.* FROM craftsmen - UNION ALL - SELECT 'impotenten' AS type, impotenten.* FROM impotenten - UNION ALL - SELECT 'passengers' AS type, passengers.* FROM passengers - UNION ALL - SELECT 'seafarers' AS type, seafarers.* FROM seafarers - UNION ALL - SELECT 'soldiers' AS type, soldiers.* FROM soldiers - UNION ALL - SELECT 'total' AS type, total.* FROM total - ) AS onboard_people_table; - SELECT type, COUNT(*) AS total - FROM onboard_people GROUP BY type ORDER BY type; - ''; - onboardExpected = pkgs.lib.strings.replaceStrings [ "\n" ] [ "\\n" ] '' - +------------+-------+ - | type | total | - +============+=======+ - | craftsmen | 2349 | - | impotenten | 938 | - | passengers | 2813 | - | seafarers | 4468 | - | soldiers | 4177 | - | total | 2467 | - +------------+-------+ - ''; - in - { - name = "monetdb"; - meta = with pkgs.lib.maintainers; { - maintainers = [ StillerHarpo ]; - }; - nodes.machine.services.monetdb.enable = true; - testScript = '' - machine.start() - machine.wait_for_unit("monetdb") - machine.succeed("monetdbd create mydbfarm") - machine.succeed("monetdbd start mydbfarm") - machine.succeed("monetdb create voc") - machine.succeed("monetdb release voc") - machine.succeed("cp ${creds} ./.monetdb") - assert "hello world" in machine.succeed("mclient -d voc -s \"SELECT 'hello world'\"") - machine.succeed("mclient -d voc ${createUser}") - machine.succeed("cp ${credsVoc} ./.monetdb") - machine.succeed("mclient -d voc ${transaction}") - machine.succeed("mclient -d voc ${vocData}/voc_dump.sql") - assert "8131" in machine.succeed("mclient -d voc -s \"SELECT count(*) FROM voyages\"") - assert "${onboardExpected}" in machine.succeed("mclient -d voc ${onboardPeople}") +{ pkgs, ... }: +let + creds = pkgs.writeText ".monetdb" '' + user=monetdb + password=monetdb + ''; + createUser = pkgs.writeText "createUser.sql" '' + CREATE USER "voc" WITH PASSWORD 'voc' NAME 'VOC Explorer' SCHEMA "sys"; + CREATE SCHEMA "voc" AUTHORIZATION "voc"; + ALTER USER "voc" SET SCHEMA "voc"; + ''; + credsVoc = pkgs.writeText ".monetdb" '' + user=voc + password=voc + ''; + transaction = pkgs.writeText "transaction" '' + START TRANSACTION; + CREATE TABLE test (id int, data varchar(30)); + ROLLBACK; + ''; + vocData = pkgs.fetchzip { + url = "https://dev.monetdb.org/Assets/VOC/voc_dump.zip"; + hash = "sha256-sQ5acTsSAiXQfOgt2PhN7X7Z9TZGZtLrPPxgQT2pCGQ="; + }; + onboardPeople = pkgs.writeText "onboardPeople" '' + CREATE VIEW onboard_people AS + SELECT * FROM ( + SELECT 'craftsmen' AS type, craftsmen.* FROM craftsmen + UNION ALL + SELECT 'impotenten' AS type, impotenten.* FROM impotenten + UNION ALL + SELECT 'passengers' AS type, passengers.* FROM passengers + UNION ALL + SELECT 'seafarers' AS type, seafarers.* FROM seafarers + UNION ALL + SELECT 'soldiers' AS type, soldiers.* FROM soldiers + UNION ALL + SELECT 'total' AS type, total.* FROM total + ) AS onboard_people_table; + SELECT type, COUNT(*) AS total + FROM onboard_people GROUP BY type ORDER BY type; + ''; + onboardExpected = pkgs.lib.strings.replaceStrings [ "\n" ] [ "\\n" ] '' + +------------+-------+ + | type | total | + +============+=======+ + | craftsmen | 2349 | + | impotenten | 938 | + | passengers | 2813 | + | seafarers | 4468 | + | soldiers | 4177 | + | total | 2467 | + +------------+-------+ + ''; +in +{ + name = "monetdb"; + meta = with pkgs.lib.maintainers; { + maintainers = [ StillerHarpo ]; + }; + nodes.machine.services.monetdb.enable = true; + testScript = '' + machine.start() + machine.wait_for_unit("monetdb") + machine.succeed("monetdbd create mydbfarm") + machine.succeed("monetdbd start mydbfarm") + machine.succeed("monetdb create voc") + machine.succeed("monetdb release voc") + machine.succeed("cp ${creds} ./.monetdb") + assert "hello world" in machine.succeed("mclient -d voc -s \"SELECT 'hello world'\"") + machine.succeed("mclient -d voc ${createUser}") + machine.succeed("cp ${credsVoc} ./.monetdb") + machine.succeed("mclient -d voc ${transaction}") + machine.succeed("mclient -d voc ${vocData}/voc_dump.sql") + assert "8131" in machine.succeed("mclient -d voc -s \"SELECT count(*) FROM voyages\"") + assert "${onboardExpected}" in machine.succeed("mclient -d voc ${onboardPeople}") - ''; - } -) + ''; +} diff --git a/nixos/tests/moonraker.nix b/nixos/tests/moonraker.nix index 7eb43b68d1ee..fdefb64c852b 100644 --- a/nixos/tests/moonraker.nix +++ b/nixos/tests/moonraker.nix @@ -1,53 +1,51 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "moonraker"; - meta = with pkgs.lib.maintainers; { - maintainers = [ zhaofengli ]; - }; +{ pkgs, ... }: +{ + name = "moonraker"; + meta = with pkgs.lib.maintainers; { + maintainers = [ zhaofengli ]; + }; - nodes = { - printer = - { config, pkgs, ... }: - { - security.polkit.enable = true; + nodes = { + printer = + { config, pkgs, ... }: + { + security.polkit.enable = true; - services.moonraker = { - enable = true; - allowSystemControl = true; + services.moonraker = { + enable = true; + allowSystemControl = true; - settings = { - authorization = { - trusted_clients = [ - "127.0.0.0/8" - "::1/128" - ]; - }; + settings = { + authorization = { + trusted_clients = [ + "127.0.0.0/8" + "::1/128" + ]; }; }; - - services.klipper = { - enable = true; - - user = "moonraker"; - group = "moonraker"; - - # No mcu configured so won't even enter `ready` state - settings = { }; - }; }; - }; - testScript = '' - printer.start() + services.klipper = { + enable = true; - printer.wait_for_unit("klipper.service") - printer.wait_for_unit("moonraker.service") - printer.wait_until_succeeds("curl http://localhost:7125/printer/info | grep -v 'Not Found' >&2", timeout=30) + user = "moonraker"; + group = "moonraker"; - with subtest("Check that we can perform system-level operations"): - printer.succeed("curl -X POST http://localhost:7125/machine/services/stop?service=klipper | grep ok >&2") - printer.wait_until_succeeds("systemctl --no-pager show klipper.service | grep ActiveState=inactive", timeout=10) - ''; - } -) + # No mcu configured so won't even enter `ready` state + settings = { }; + }; + }; + }; + + testScript = '' + printer.start() + + printer.wait_for_unit("klipper.service") + printer.wait_for_unit("moonraker.service") + printer.wait_until_succeeds("curl http://localhost:7125/printer/info | grep -v 'Not Found' >&2", timeout=30) + + with subtest("Check that we can perform system-level operations"): + printer.succeed("curl -X POST http://localhost:7125/machine/services/stop?service=klipper | grep ok >&2") + printer.wait_until_succeeds("systemctl --no-pager show klipper.service | grep ActiveState=inactive", timeout=10) + ''; +} diff --git a/nixos/tests/moosefs.nix b/nixos/tests/moosefs.nix index 633e872f0d30..3166d34bf14a 100644 --- a/nixos/tests/moosefs.nix +++ b/nixos/tests/moosefs.nix @@ -1,99 +1,97 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - master = - { pkgs, ... }: - { - # data base is stored in memory - # server crashes with default memory size - virtualisation.memorySize = 1024; +let + master = + { pkgs, ... }: + { + # data base is stored in memory + # server crashes with default memory size + virtualisation.memorySize = 1024; - services.moosefs.master = { - enable = true; - openFirewall = true; - autoInit = true; - exports = [ - "* / rw,alldirs,admin,maproot=0:0" - "* . rw" - ]; - }; + services.moosefs.master = { + enable = true; + openFirewall = true; + autoInit = true; + exports = [ + "* / rw,alldirs,admin,maproot=0:0" + "* . rw" + ]; }; - - chunkserver = - { pkgs, ... }: - { - virtualisation.emptyDiskImages = [ 4096 ]; - boot.initrd.postDeviceCommands = '' - ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb - ''; - - fileSystems = pkgs.lib.mkVMOverride { - "/data" = { - device = "/dev/disk/by-label/data"; - fsType = "ext4"; - }; - }; - - services.moosefs = { - masterHost = "master"; - chunkserver = { - openFirewall = true; - enable = true; - hdds = [ "~/data" ]; - }; - }; - }; - - metalogger = - { pkgs, ... }: - { - services.moosefs = { - masterHost = "master"; - metalogger.enable = true; - }; - }; - - client = - { pkgs, ... }: - { - services.moosefs.client.enable = true; - }; - - in - { - name = "moosefs"; - - nodes = { - inherit master; - inherit metalogger; - chunkserver1 = chunkserver; - chunkserver2 = chunkserver; - client1 = client; - client2 = client; }; - testScript = '' - # prepare master server - master.start() - master.wait_for_unit("multi-user.target") - master.wait_for_unit("mfs-master.service") + chunkserver = + { pkgs, ... }: + { + virtualisation.emptyDiskImages = [ 4096 ]; + boot.initrd.postDeviceCommands = '' + ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb + ''; - metalogger.wait_for_unit("mfs-metalogger.service") + fileSystems = pkgs.lib.mkVMOverride { + "/data" = { + device = "/dev/disk/by-label/data"; + fsType = "ext4"; + }; + }; - for chunkserver in [chunkserver1, chunkserver2]: - chunkserver.wait_for_unit("multi-user.target") - chunkserver.succeed("chown moosefs:moosefs /data") - chunkserver.succeed("systemctl restart mfs-chunkserver") - chunkserver.wait_for_unit("mfs-chunkserver.service") + services.moosefs = { + masterHost = "master"; + chunkserver = { + openFirewall = true; + enable = true; + hdds = [ "~/data" ]; + }; + }; + }; - for client in [client1, client2]: - client.wait_for_unit("multi-user.target") - client.succeed("mkdir /moosefs") - client.succeed("mount -t moosefs master:/ /moosefs") + metalogger = + { pkgs, ... }: + { + services.moosefs = { + masterHost = "master"; + metalogger.enable = true; + }; + }; - client1.succeed("echo test > /moosefs/file") - client2.succeed("grep test /moosefs/file") - ''; - } -) + client = + { pkgs, ... }: + { + services.moosefs.client.enable = true; + }; + +in +{ + name = "moosefs"; + + nodes = { + inherit master; + inherit metalogger; + chunkserver1 = chunkserver; + chunkserver2 = chunkserver; + client1 = client; + client2 = client; + }; + + testScript = '' + # prepare master server + master.start() + master.wait_for_unit("multi-user.target") + master.wait_for_unit("mfs-master.service") + + metalogger.wait_for_unit("mfs-metalogger.service") + + for chunkserver in [chunkserver1, chunkserver2]: + chunkserver.wait_for_unit("multi-user.target") + chunkserver.succeed("chown moosefs:moosefs /data") + chunkserver.succeed("systemctl restart mfs-chunkserver") + chunkserver.wait_for_unit("mfs-chunkserver.service") + + for client in [client1, client2]: + client.wait_for_unit("multi-user.target") + client.succeed("mkdir /moosefs") + client.succeed("mount -t moosefs master:/ /moosefs") + + client1.succeed("echo test > /moosefs/file") + client2.succeed("grep test /moosefs/file") + ''; +} diff --git a/nixos/tests/mopidy.nix b/nixos/tests/mopidy.nix index e8447af8c75a..305a12f5aa2e 100644 --- a/nixos/tests/mopidy.nix +++ b/nixos/tests/mopidy.nix @@ -1,17 +1,15 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "mopidy"; +{ pkgs, ... }: +{ + name = "mopidy"; - nodes.machine = - { ... }: - { - services.mopidy.enable = true; - }; + nodes.machine = + { ... }: + { + services.mopidy.enable = true; + }; - testScript = '' - machine.wait_for_unit("mopidy") - machine.wait_for_open_port(6680) - ''; - } -) + testScript = '' + machine.wait_for_unit("mopidy") + machine.wait_for_open_port(6680) + ''; +} diff --git a/nixos/tests/morty.nix b/nixos/tests/morty.nix index 4ec44e31d487..b8e490fbbcb5 100644 --- a/nixos/tests/morty.nix +++ b/nixos/tests/morty.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "morty"; - meta = with pkgs.lib.maintainers; { - maintainers = [ leenaars ]; - }; +{ + name = "morty"; + meta = with pkgs.lib.maintainers; { + maintainers = [ leenaars ]; + }; - nodes = { - mortyProxyWithKey = + nodes = { + mortyProxyWithKey = - { ... }: - { - services.morty = { - enable = true; - key = "78a9cd0cfee20c672f78427efb2a2a96036027f0"; - port = 3001; - }; - }; - - }; - - testScript = { ... }: - '' - mortyProxyWithKey.wait_for_unit("default.target") - mortyProxyWithKey.wait_for_open_port(3001) - mortyProxyWithKey.succeed("curl -fL 127.0.0.1:3001 | grep MortyProxy") - ''; + { + services.morty = { + enable = true; + key = "78a9cd0cfee20c672f78427efb2a2a96036027f0"; + port = 3001; + }; + }; - } -) + }; + + testScript = + { ... }: + '' + mortyProxyWithKey.wait_for_unit("default.target") + mortyProxyWithKey.wait_for_open_port(3001) + mortyProxyWithKey.succeed("curl -fL 127.0.0.1:3001 | grep MortyProxy") + ''; + +} diff --git a/nixos/tests/mtp.nix b/nixos/tests/mtp.nix index b4efef0318aa..6eab7ad995a6 100644 --- a/nixos/tests/mtp.nix +++ b/nixos/tests/mtp.nix @@ -1,121 +1,119 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "mtp"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - matthewcroughan - nixinator - ]; - }; +{ pkgs, ... }: +{ + name = "mtp"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + matthewcroughan + nixinator + ]; + }; - nodes = { - client = - { config, pkgs, ... }: - { - # DBUS runs only once a user session is created, which means a user has to - # login. Here, we log in as root. Once logged in, the gvfs-daemon service runs - # as UID 0 in User-0.service - services.getty.autologinUser = "root"; + nodes = { + client = + { config, pkgs, ... }: + { + # DBUS runs only once a user session is created, which means a user has to + # login. Here, we log in as root. Once logged in, the gvfs-daemon service runs + # as UID 0 in User-0.service + services.getty.autologinUser = "root"; - # XDG_RUNTIME_DIR is needed for running systemd-user services such as - # gvfs-daemon as root. - environment.variables.XDG_RUNTIME_DIR = "/run/user/0"; + # XDG_RUNTIME_DIR is needed for running systemd-user services such as + # gvfs-daemon as root. + environment.variables.XDG_RUNTIME_DIR = "/run/user/0"; - environment.systemPackages = with pkgs; [ - usbutils - glib - jmtpfs - tree - ]; - services.gvfs.enable = true; + environment.systemPackages = with pkgs; [ + usbutils + glib + jmtpfs + tree + ]; + services.gvfs.enable = true; - # Creates a usb-mtp device inside the VM, which is mapped to the host's - # /tmp folder, it is able to write files to this location, but only has - # permissions to read its own creations. - virtualisation.qemu.options = [ - "-usb" - "-device usb-mtp,rootdir=/tmp,readonly=false" - ]; - }; - }; + # Creates a usb-mtp device inside the VM, which is mapped to the host's + # /tmp folder, it is able to write files to this location, but only has + # permissions to read its own creations. + virtualisation.qemu.options = [ + "-usb" + "-device usb-mtp,rootdir=/tmp,readonly=false" + ]; + }; + }; - testScript = - { nodes, ... }: - let - # Creates a list of QEMU MTP devices matching USB ID (46f4:0004). This - # value can be sourced in a shell script. This is so we can loop over the - # devices we find, as this test may want to use more than one MTP device - # in future. - mtpDevices = pkgs.writeScript "mtpDevices.sh" '' - export mtpDevices=$(lsusb -d 46f4:0004 | awk {'print $2","$4'} | sed 's/[:-]/ /g') - ''; - # Qemu is only capable of creating an MTP device with Picture Transfer - # Protocol. This means that gvfs must use gphoto2:// rather than mtp:// - # when mounting. - # https://github.com/qemu/qemu/blob/970bc16f60937bcfd334f14c614bd4407c247961/hw/usb/dev-mtp.c#L278 - gvfs = rec { - mountAllMtpDevices = pkgs.writeScript "mountAllMtpDevices.sh" '' - set -e - source ${mtpDevices} - for i in $mtpDevices - do - gio mount "gphoto2://[usb:$i]/" - done - ''; - unmountAllMtpDevices = pkgs.writeScript "unmountAllMtpDevices.sh" '' - set -e - source ${mtpDevices} - for i in $mtpDevices - do - gio mount -u "gphoto2://[usb:$i]/" - done - ''; - # gvfsTest: - # 1. Creates a 10M test file - # 2. Copies it to the device using GIO tools - # 3. Checks for corruption with `diff` - # 4. Removes the file, then unmounts the disks. - gvfsTest = pkgs.writeScript "gvfsTest.sh" '' - set -e - source ${mtpDevices} - ${mountAllMtpDevices} - dd if=/dev/urandom of=testFile10M bs=1M count=10 - for i in $mtpDevices - do - gio copy ./testFile10M gphoto2://[usb:$i]/ - ls -lah /run/user/0/gvfs/*/testFile10M - gio remove gphoto2://[usb:$i]/testFile10M - done - ${unmountAllMtpDevices} - ''; - }; - jmtpfs = { - # jmtpfsTest: - # 1. Mounts the device on a dir named `phone` using jmtpfs - # 2. Puts the current Nixpkgs libmtp version into a file - # 3. Checks for corruption with `diff` - # 4. Prints the directory tree - jmtpfsTest = pkgs.writeScript "jmtpfsTest.sh" '' - set -e - mkdir phone - jmtpfs phone - echo "${pkgs.libmtp.version}" > phone/tmp/testFile - echo "${pkgs.libmtp.version}" > testFile - diff phone/tmp/testFile testFile - tree phone - ''; - }; - in - # Using >&2 allows the results of the scripts to be printed to the terminal - # when building this test with Nix. Scripts would otherwise complete - # silently. - '' - start_all() - client.wait_for_unit("multi-user.target") - client.wait_for_unit("dbus.service") - client.succeed("${gvfs.gvfsTest} >&2") - client.succeed("${jmtpfs.jmtpfsTest} >&2") + testScript = + { nodes, ... }: + let + # Creates a list of QEMU MTP devices matching USB ID (46f4:0004). This + # value can be sourced in a shell script. This is so we can loop over the + # devices we find, as this test may want to use more than one MTP device + # in future. + mtpDevices = pkgs.writeScript "mtpDevices.sh" '' + export mtpDevices=$(lsusb -d 46f4:0004 | awk {'print $2","$4'} | sed 's/[:-]/ /g') ''; - } -) + # Qemu is only capable of creating an MTP device with Picture Transfer + # Protocol. This means that gvfs must use gphoto2:// rather than mtp:// + # when mounting. + # https://github.com/qemu/qemu/blob/970bc16f60937bcfd334f14c614bd4407c247961/hw/usb/dev-mtp.c#L278 + gvfs = rec { + mountAllMtpDevices = pkgs.writeScript "mountAllMtpDevices.sh" '' + set -e + source ${mtpDevices} + for i in $mtpDevices + do + gio mount "gphoto2://[usb:$i]/" + done + ''; + unmountAllMtpDevices = pkgs.writeScript "unmountAllMtpDevices.sh" '' + set -e + source ${mtpDevices} + for i in $mtpDevices + do + gio mount -u "gphoto2://[usb:$i]/" + done + ''; + # gvfsTest: + # 1. Creates a 10M test file + # 2. Copies it to the device using GIO tools + # 3. Checks for corruption with `diff` + # 4. Removes the file, then unmounts the disks. + gvfsTest = pkgs.writeScript "gvfsTest.sh" '' + set -e + source ${mtpDevices} + ${mountAllMtpDevices} + dd if=/dev/urandom of=testFile10M bs=1M count=10 + for i in $mtpDevices + do + gio copy ./testFile10M gphoto2://[usb:$i]/ + ls -lah /run/user/0/gvfs/*/testFile10M + gio remove gphoto2://[usb:$i]/testFile10M + done + ${unmountAllMtpDevices} + ''; + }; + jmtpfs = { + # jmtpfsTest: + # 1. Mounts the device on a dir named `phone` using jmtpfs + # 2. Puts the current Nixpkgs libmtp version into a file + # 3. Checks for corruption with `diff` + # 4. Prints the directory tree + jmtpfsTest = pkgs.writeScript "jmtpfsTest.sh" '' + set -e + mkdir phone + jmtpfs phone + echo "${pkgs.libmtp.version}" > phone/tmp/testFile + echo "${pkgs.libmtp.version}" > testFile + diff phone/tmp/testFile testFile + tree phone + ''; + }; + in + # Using >&2 allows the results of the scripts to be printed to the terminal + # when building this test with Nix. Scripts would otherwise complete + # silently. + '' + start_all() + client.wait_for_unit("multi-user.target") + client.wait_for_unit("dbus.service") + client.succeed("${gvfs.gvfsTest} >&2") + client.succeed("${jmtpfs.jmtpfsTest} >&2") + ''; +} diff --git a/nixos/tests/multipass.nix b/nixos/tests/multipass.nix index da63a7a41fbb..33919cde81ed 100644 --- a/nixos/tests/multipass.nix +++ b/nixos/tests/multipass.nix @@ -1,41 +1,39 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - multipass-image = import ../release.nix { - configuration = { - # Building documentation makes the test unnecessarily take a longer time: - documentation.enable = lib.mkForce false; +let + multipass-image = import ../release.nix { + configuration = { + # Building documentation makes the test unnecessarily take a longer time: + documentation.enable = lib.mkForce false; + }; + }; + +in +{ + name = "multipass"; + + meta.maintainers = [ lib.maintainers.jnsgruk ]; + + nodes.machine = + { lib, ... }: + { + virtualisation = { + cores = 1; + memorySize = 1024; + diskSize = 4096; + + multipass.enable = true; }; }; - in - { - name = "multipass"; + testScript = '' + machine.wait_for_unit("sockets.target") + machine.wait_for_unit("multipass.service") + machine.wait_for_file("/var/lib/multipass/data/multipassd/network/multipass_subnet") - meta.maintainers = [ lib.maintainers.jnsgruk ]; + # Wait for Multipass to settle + machine.sleep(1) - nodes.machine = - { lib, ... }: - { - virtualisation = { - cores = 1; - memorySize = 1024; - diskSize = 4096; - - multipass.enable = true; - }; - }; - - testScript = '' - machine.wait_for_unit("sockets.target") - machine.wait_for_unit("multipass.service") - machine.wait_for_file("/var/lib/multipass/data/multipassd/network/multipass_subnet") - - # Wait for Multipass to settle - machine.sleep(1) - - machine.succeed("multipass list") - ''; - } -) + machine.succeed("multipass list") + ''; +} diff --git a/nixos/tests/munin.nix b/nixos/tests/munin.nix index b28562bbaa2e..fad3f2b52fae 100644 --- a/nixos/tests/munin.nix +++ b/nixos/tests/munin.nix @@ -1,49 +1,47 @@ # This test runs basic munin setup with node and cron job running on the same # machine. -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "munin"; - meta = with pkgs.lib.maintainers; { - maintainers = [ domenkozar ]; - }; +{ pkgs, ... }: +{ + name = "munin"; + meta = with pkgs.lib.maintainers; { + maintainers = [ domenkozar ]; + }; - nodes = { - one = - { config, ... }: - { - services = { - munin-node = { - enable = true; - # disable a failing plugin to prevent irrelevant error message, see #23049 - disabledPlugins = [ "apc_nis" ]; - }; - munin-cron = { - enable = true; - hosts = '' - [${config.networking.hostName}] - address localhost - ''; - }; + nodes = { + one = + { config, ... }: + { + services = { + munin-node = { + enable = true; + # disable a failing plugin to prevent irrelevant error message, see #23049 + disabledPlugins = [ "apc_nis" ]; + }; + munin-cron = { + enable = true; + hosts = '' + [${config.networking.hostName}] + address localhost + ''; }; - - # increase the systemd timer interval so it fires more often - systemd.timers.munin-cron.timerConfig.OnCalendar = pkgs.lib.mkForce "*:*:0/10"; }; - }; - testScript = '' - start_all() + # increase the systemd timer interval so it fires more often + systemd.timers.munin-cron.timerConfig.OnCalendar = pkgs.lib.mkForce "*:*:0/10"; + }; + }; - with subtest("ensure munin-node starts and listens on 4949"): - one.wait_for_unit("munin-node.service") - one.wait_for_open_port(4949) + testScript = '' + start_all() - with subtest("ensure munin-cron output is correct"): - one.wait_for_file("/var/lib/munin/one/one-uptime-uptime-g.rrd") - one.wait_for_file("/var/www/munin/one/index.html") - one.wait_for_file("/var/www/munin/one/one/diskstat_iops_vda-day.png", timeout=60) - ''; - } -) + with subtest("ensure munin-node starts and listens on 4949"): + one.wait_for_unit("munin-node.service") + one.wait_for_open_port(4949) + + with subtest("ensure munin-cron output is correct"): + one.wait_for_file("/var/lib/munin/one/one-uptime-uptime-g.rrd") + one.wait_for_file("/var/www/munin/one/index.html") + one.wait_for_file("/var/www/munin/one/one/diskstat_iops_vda-day.png", timeout=60) + ''; +} diff --git a/nixos/tests/mutable-users.nix b/nixos/tests/mutable-users.nix index e4e35f2d7220..3b262ff0821f 100644 --- a/nixos/tests/mutable-users.nix +++ b/nixos/tests/mutable-users.nix @@ -1,76 +1,74 @@ # Mutable users tests. -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "mutable-users"; - meta = with pkgs.lib.maintainers; { - maintainers = [ gleber ]; - }; +{ pkgs, ... }: +{ + name = "mutable-users"; + meta = with pkgs.lib.maintainers; { + maintainers = [ gleber ]; + }; - nodes = { - machine = { - specialisation.immutable.configuration = { - users.mutableUsers = false; - }; + nodes = { + machine = { + specialisation.immutable.configuration = { + users.mutableUsers = false; + }; - specialisation.mutable.configuration = { - users.mutableUsers = true; - users.users.dry-test.isNormalUser = true; - }; + specialisation.mutable.configuration = { + users.mutableUsers = true; + users.users.dry-test.isNormalUser = true; }; }; + }; - testScript = '' - machine.start() - machine.wait_for_unit("default.target") + testScript = '' + machine.start() + machine.wait_for_unit("default.target") - # Machine starts in immutable mode. Add a user and test if reactivating - # configuration removes the user. - with subtest("Machine in immutable mode"): - assert "foobar" not in machine.succeed("cat /etc/passwd") - machine.succeed("sudo useradd foobar") - assert "foobar" in machine.succeed("cat /etc/passwd") - machine.succeed( - "/run/booted-system/specialisation/immutable/bin/switch-to-configuration test" - ) - assert "foobar" not in machine.succeed("cat /etc/passwd") + # Machine starts in immutable mode. Add a user and test if reactivating + # configuration removes the user. + with subtest("Machine in immutable mode"): + assert "foobar" not in machine.succeed("cat /etc/passwd") + machine.succeed("sudo useradd foobar") + assert "foobar" in machine.succeed("cat /etc/passwd") + machine.succeed( + "/run/booted-system/specialisation/immutable/bin/switch-to-configuration test" + ) + assert "foobar" not in machine.succeed("cat /etc/passwd") - # In immutable mode passwd is not wrapped, while in mutable mode it is - # wrapped. - with subtest("Password is wrapped in mutable mode"): - assert "/run/current-system/" in machine.succeed("which passwd") - machine.succeed( - "/run/booted-system/specialisation/mutable/bin/switch-to-configuration test" - ) - assert "/run/wrappers/" in machine.succeed("which passwd") + # In immutable mode passwd is not wrapped, while in mutable mode it is + # wrapped. + with subtest("Password is wrapped in mutable mode"): + assert "/run/current-system/" in machine.succeed("which passwd") + machine.succeed( + "/run/booted-system/specialisation/mutable/bin/switch-to-configuration test" + ) + assert "/run/wrappers/" in machine.succeed("which passwd") - with subtest("dry-activation does not change files"): - machine.succeed('test -e /home/dry-test') # home was created - machine.succeed('rm -rf /home/dry-test') + with subtest("dry-activation does not change files"): + machine.succeed('test -e /home/dry-test') # home was created + machine.succeed('rm -rf /home/dry-test') - files_to_check = ['/etc/group', - '/etc/passwd', - '/etc/shadow', - '/etc/subuid', - '/etc/subgid', - '/var/lib/nixos/uid-map', - '/var/lib/nixos/gid-map', - '/var/lib/nixos/declarative-groups', - '/var/lib/nixos/declarative-users' - ] - expected_hashes = {} - expected_stats = {} - for file in files_to_check: - expected_hashes[file] = machine.succeed(f"sha256sum {file}") - expected_stats[file] = machine.succeed(f"stat {file}") + files_to_check = ['/etc/group', + '/etc/passwd', + '/etc/shadow', + '/etc/subuid', + '/etc/subgid', + '/var/lib/nixos/uid-map', + '/var/lib/nixos/gid-map', + '/var/lib/nixos/declarative-groups', + '/var/lib/nixos/declarative-users' + ] + expected_hashes = {} + expected_stats = {} + for file in files_to_check: + expected_hashes[file] = machine.succeed(f"sha256sum {file}") + expected_stats[file] = machine.succeed(f"stat {file}") - machine.succeed("/run/booted-system/specialisation/mutable/bin/switch-to-configuration dry-activate") + machine.succeed("/run/booted-system/specialisation/mutable/bin/switch-to-configuration dry-activate") - machine.fail('test -e /home/dry-test') # home was not recreated - for file in files_to_check: - assert machine.succeed(f"sha256sum {file}") == expected_hashes[file] - assert machine.succeed(f"stat {file}") == expected_stats[file] - ''; - } -) + machine.fail('test -e /home/dry-test') # home was not recreated + for file in files_to_check: + assert machine.succeed(f"sha256sum {file}") == expected_hashes[file] + assert machine.succeed(f"stat {file}") == expected_stats[file] + ''; +} diff --git a/nixos/tests/mympd.nix b/nixos/tests/mympd.nix index 20c5603da69d..8d599dd3ad39 100644 --- a/nixos/tests/mympd.nix +++ b/nixos/tests/mympd.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "mympd"; +{ pkgs, lib, ... }: +{ + name = "mympd"; - nodes.mympd = { - services.mympd = { - enable = true; - settings = { - http_port = 8081; - }; + nodes.mympd = { + services.mympd = { + enable = true; + settings = { + http_port = 8081; }; - - services.mpd.enable = true; }; - testScript = '' - start_all(); - machine.wait_for_unit("mympd.service"); + services.mpd.enable = true; + }; - # Ensure that mympd can connect to mpd - machine.wait_until_succeeds( - "journalctl -eu mympd -o cat | grep 'Connected to MPD'" - ) + testScript = '' + start_all(); + machine.wait_for_unit("mympd.service"); - # Ensure that the web server is working - machine.succeed("curl http://localhost:8081 --compressed | grep -o myMPD") - ''; - } -) + # Ensure that mympd can connect to mpd + machine.wait_until_succeeds( + "journalctl -eu mympd -o cat | grep 'Connected to MPD'" + ) + + # Ensure that the web server is working + machine.succeed("curl http://localhost:8081 --compressed | grep -o myMPD") + ''; +} diff --git a/nixos/tests/nar-serve.nix b/nixos/tests/nar-serve.nix index fde57eb4f670..d05d3f136251 100644 --- a/nixos/tests/nar-serve.nix +++ b/nixos/tests/nar-serve.nix @@ -1,51 +1,49 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "nar-serve"; - meta.maintainers = [ lib.maintainers.rizary ]; - nodes = { - server = - { pkgs, ... }: - { - services.nginx = { - enable = true; - virtualHosts.default.root = "/var/www"; - }; - services.nar-serve = { - enable = true; - # Connect to the localhost nginx instead of the default - # https://cache.nixos.org - cacheURL = "http://localhost/"; - }; - environment.systemPackages = [ - pkgs.hello - pkgs.curl - ]; - - networking.firewall.allowedTCPPorts = [ 8383 ]; - - # virtualisation.diskSize = 2 * 1024; +{ pkgs, lib, ... }: +{ + name = "nar-serve"; + meta.maintainers = [ lib.maintainers.rizary ]; + nodes = { + server = + { pkgs, ... }: + { + services.nginx = { + enable = true; + virtualHosts.default.root = "/var/www"; }; - }; - testScript = '' - import os + services.nar-serve = { + enable = true; + # Connect to the localhost nginx instead of the default + # https://cache.nixos.org + cacheURL = "http://localhost/"; + }; + environment.systemPackages = [ + pkgs.hello + pkgs.curl + ]; - start_all() + networking.firewall.allowedTCPPorts = [ 8383 ]; - # Create a fake cache with Nginx service the static files - server.succeed( - "nix --experimental-features nix-command copy --to file:///var/www ${pkgs.hello}" - ) - server.wait_for_unit("nginx.service") - server.wait_for_open_port(80) + # virtualisation.diskSize = 2 * 1024; + }; + }; + testScript = '' + import os - # Check that nar-serve can return the content of the derivation - drvName = os.path.basename("${pkgs.hello}") - drvHash = drvName.split("-")[0] - server.wait_for_unit("nar-serve.service") - server.succeed( - "curl -o hello -f http://localhost:8383/nix/store/{}/bin/hello".format(drvHash) - ) - ''; - } -) + start_all() + + # Create a fake cache with Nginx service the static files + server.succeed( + "nix --experimental-features nix-command copy --to file:///var/www ${pkgs.hello}" + ) + server.wait_for_unit("nginx.service") + server.wait_for_open_port(80) + + # Check that nar-serve can return the content of the derivation + drvName = os.path.basename("${pkgs.hello}") + drvHash = drvName.split("-")[0] + server.wait_for_unit("nar-serve.service") + server.succeed( + "curl -o hello -f http://localhost:8383/nix/store/{}/bin/hello".format(drvHash) + ) + ''; +} diff --git a/nixos/tests/nats.nix b/nixos/tests/nats.nix index b62b9201101a..2ed97ee86b09 100644 --- a/nixos/tests/nats.nix +++ b/nixos/tests/nats.nix @@ -6,75 +6,73 @@ let topic = "foo.bar"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "nats"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ c0deaddict ]; - }; +{ pkgs, lib, ... }: +{ + name = "nats"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ c0deaddict ]; + }; - nodes = - let - client = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ natscli ]; - }; - in - { - server = - { pkgs, ... }: - { - networking.firewall.allowedTCPPorts = [ port ]; - services.nats = { - inherit port; - enable = true; - settings = { - authorization = { - users = [ - { - user = username; - inherit password; - } - ]; - }; + nodes = + let + client = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ natscli ]; + }; + in + { + server = + { pkgs, ... }: + { + networking.firewall.allowedTCPPorts = [ port ]; + services.nats = { + inherit port; + enable = true; + settings = { + authorization = { + users = [ + { + user = username; + inherit password; + } + ]; }; }; }; + }; - client1 = client; - client2 = client; - }; + client1 = client; + client2 = client; + }; - testScript = - let - file = "/tmp/msg"; - in - '' - def nats_cmd(*args): - return ( - "nats " - "--server=nats://server:${toString port} " - "--user=${username} " - "--password=${password} " - "{}" - ).format(" ".join(args)) + testScript = + let + file = "/tmp/msg"; + in + '' + def nats_cmd(*args): + return ( + "nats " + "--server=nats://server:${toString port} " + "--user=${username} " + "--password=${password} " + "{}" + ).format(" ".join(args)) - def parallel(*fns): - from threading import Thread - threads = [ Thread(target=fn) for fn in fns ] - for t in threads: t.start() - for t in threads: t.join() + def parallel(*fns): + from threading import Thread + threads = [ Thread(target=fn) for fn in fns ] + for t in threads: t.start() + for t in threads: t.join() - start_all() - server.wait_for_unit("nats.service") + start_all() + server.wait_for_unit("nats.service") - with subtest("pub sub"): - parallel( - lambda: client1.succeed(nats_cmd("sub", "--count", "1", "${topic}")), - lambda: client2.succeed("sleep 2 && {}".format(nats_cmd("pub", "${topic}", "hello"))), - ) - ''; - } -) + with subtest("pub sub"): + parallel( + lambda: client1.succeed(nats_cmd("sub", "--count", "1", "${topic}")), + lambda: client2.succeed("sleep 2 && {}".format(nats_cmd("pub", "${topic}", "hello"))), + ) + ''; +} diff --git a/nixos/tests/navidrome.nix b/nixos/tests/navidrome.nix index b6afad8e56c7..e57cec0d2830 100644 --- a/nixos/tests/navidrome.nix +++ b/nixos/tests/navidrome.nix @@ -1,17 +1,15 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "navidrome"; +{ pkgs, ... }: +{ + name = "navidrome"; - nodes.machine = - { ... }: - { - services.navidrome.enable = true; - }; + nodes.machine = + { ... }: + { + services.navidrome.enable = true; + }; - testScript = '' - machine.wait_for_unit("navidrome") - machine.wait_for_open_port(4533) - ''; - } -) + testScript = '' + machine.wait_for_unit("navidrome") + machine.wait_for_open_port(4533) + ''; +} diff --git a/nixos/tests/nbd.nix b/nixos/tests/nbd.nix index 7905a4a6127e..088e35c2df6a 100644 --- a/nixos/tests/nbd.nix +++ b/nixos/tests/nbd.nix @@ -1,116 +1,114 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - listenPort = 30123; - testString = "It works!"; - mkCreateSmallFileService = - { - path, - loop ? false, - }: - { - script = '' - ${pkgs.coreutils}/bin/dd if=/dev/zero of=${path} bs=1K count=100 - ${pkgs.lib.optionalString loop "${pkgs.util-linux}/bin/losetup --find ${path}"} - ''; - serviceConfig = { - Type = "oneshot"; - }; - wantedBy = [ "multi-user.target" ]; - before = [ "nbd-server.service" ]; +{ pkgs, ... }: +let + listenPort = 30123; + testString = "It works!"; + mkCreateSmallFileService = + { + path, + loop ? false, + }: + { + script = '' + ${pkgs.coreutils}/bin/dd if=/dev/zero of=${path} bs=1K count=100 + ${pkgs.lib.optionalString loop "${pkgs.util-linux}/bin/losetup --find ${path}"} + ''; + serviceConfig = { + Type = "oneshot"; }; - in - { - name = "nbd"; - - nodes = { - server = - { config, pkgs, ... }: - { - # Create some small files of zeros to use as the ndb disks - ## `vault-pub.disk` is accessible from any IP - systemd.services.create-pub-file = mkCreateSmallFileService { path = "/vault-pub.disk"; }; - ## `vault-priv.disk` is accessible only from localhost. - ## It's also a loopback device to test exporting /dev/... - systemd.services.create-priv-file = mkCreateSmallFileService { - path = "/vault-priv.disk"; - loop = true; - }; - ## `aaa.disk` is just here because "[aaa]" sorts before - ## "[generic]" lexicographically, and nbd-server breaks if - ## "[generic]" isn't the first section. - systemd.services.create-aaa-file = mkCreateSmallFileService { path = "/aaa.disk"; }; - - # Needed only for nbd-client used in the tests. - environment.systemPackages = [ pkgs.nbd ]; - - # Open the nbd port in the firewall - networking.firewall.allowedTCPPorts = [ listenPort ]; - - # Run the nbd server and expose the small file created above - services.nbd.server = { - enable = true; - exports = { - aaa = { - path = "/aaa.disk"; - }; - vault-pub = { - path = "/vault-pub.disk"; - }; - vault-priv = { - path = "/dev/loop0"; - allowAddresses = [ - "127.0.0.1" - "::1" - ]; - }; - }; - listenAddress = "0.0.0.0"; - listenPort = listenPort; - }; - }; - - client = - { config, pkgs, ... }: - { - programs.nbd.enable = true; - }; + wantedBy = [ "multi-user.target" ]; + before = [ "nbd-server.service" ]; }; +in +{ + name = "nbd"; - testScript = '' - testString = "${testString}" + nodes = { + server = + { config, pkgs, ... }: + { + # Create some small files of zeros to use as the ndb disks + ## `vault-pub.disk` is accessible from any IP + systemd.services.create-pub-file = mkCreateSmallFileService { path = "/vault-pub.disk"; }; + ## `vault-priv.disk` is accessible only from localhost. + ## It's also a loopback device to test exporting /dev/... + systemd.services.create-priv-file = mkCreateSmallFileService { + path = "/vault-priv.disk"; + loop = true; + }; + ## `aaa.disk` is just here because "[aaa]" sorts before + ## "[generic]" lexicographically, and nbd-server breaks if + ## "[generic]" isn't the first section. + systemd.services.create-aaa-file = mkCreateSmallFileService { path = "/aaa.disk"; }; - start_all() - server.wait_for_open_port(${toString listenPort}) + # Needed only for nbd-client used in the tests. + environment.systemPackages = [ pkgs.nbd ]; - # Client: Connect to the server, write a small string to the nbd disk, and cleanly disconnect - client.succeed("nbd-client server ${toString listenPort} /dev/nbd0 -name vault-pub -persist") - client.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc") - client.succeed("nbd-client -d /dev/nbd0") + # Open the nbd port in the firewall + networking.firewall.allowedTCPPorts = [ listenPort ]; - # Server: Check that the string written by the client is indeed in the file - foundString = server.succeed(f"dd status=none if=/vault-pub.disk count={len(testString)}")[:len(testString)] - if foundString != testString: - raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'") + # Run the nbd server and expose the small file created above + services.nbd.server = { + enable = true; + exports = { + aaa = { + path = "/aaa.disk"; + }; + vault-pub = { + path = "/vault-pub.disk"; + }; + vault-priv = { + path = "/dev/loop0"; + allowAddresses = [ + "127.0.0.1" + "::1" + ]; + }; + }; + listenAddress = "0.0.0.0"; + listenPort = listenPort; + }; + }; - # Client: Fail to connect to the private disk - client.fail("nbd-client server ${toString listenPort} /dev/nbd0 -name vault-priv -persist") + client = + { config, pkgs, ... }: + { + programs.nbd.enable = true; + }; + }; - # Server: Successfully connect to the private disk - server.succeed("nbd-client localhost ${toString listenPort} /dev/nbd0 -name vault-priv -persist") - server.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc") - foundString = server.succeed(f"dd status=none if=/dev/loop0 count={len(testString)}")[:len(testString)] - if foundString != testString: - raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'") - server.succeed("nbd-client -d /dev/nbd0") + testScript = '' + testString = "${testString}" - # Server: Successfully connect to the aaa disk - server.succeed("nbd-client localhost ${toString listenPort} /dev/nbd0 -name aaa -persist") - server.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc") - foundString = server.succeed(f"dd status=none if=/aaa.disk count={len(testString)}")[:len(testString)] - if foundString != testString: - raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'") - server.succeed("nbd-client -d /dev/nbd0") - ''; - } -) + start_all() + server.wait_for_open_port(${toString listenPort}) + + # Client: Connect to the server, write a small string to the nbd disk, and cleanly disconnect + client.succeed("nbd-client server ${toString listenPort} /dev/nbd0 -name vault-pub -persist") + client.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc") + client.succeed("nbd-client -d /dev/nbd0") + + # Server: Check that the string written by the client is indeed in the file + foundString = server.succeed(f"dd status=none if=/vault-pub.disk count={len(testString)}")[:len(testString)] + if foundString != testString: + raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'") + + # Client: Fail to connect to the private disk + client.fail("nbd-client server ${toString listenPort} /dev/nbd0 -name vault-priv -persist") + + # Server: Successfully connect to the private disk + server.succeed("nbd-client localhost ${toString listenPort} /dev/nbd0 -name vault-priv -persist") + server.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc") + foundString = server.succeed(f"dd status=none if=/dev/loop0 count={len(testString)}")[:len(testString)] + if foundString != testString: + raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'") + server.succeed("nbd-client -d /dev/nbd0") + + # Server: Successfully connect to the aaa disk + server.succeed("nbd-client localhost ${toString listenPort} /dev/nbd0 -name aaa -persist") + server.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc") + foundString = server.succeed(f"dd status=none if=/aaa.disk count={len(testString)}")[:len(testString)] + if foundString != testString: + raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'") + server.succeed("nbd-client -d /dev/nbd0") + ''; +} diff --git a/nixos/tests/ncdns.nix b/nixos/tests/ncdns.nix index 48f17ed16b85..97f509250475 100644 --- a/nixos/tests/ncdns.nix +++ b/nixos/tests/ncdns.nix @@ -1,97 +1,95 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - fakeReply = pkgs.writeText "namecoin-reply.json" '' - { "error": null, - "id": 1, - "result": { - "address": "T31q8ucJ4dI1xzhxQ5QispfECld5c7Xw", - "expired": false, - "expires_in": 2248, - "height": 438155, - "name": "d/test", - "txid": "db61c0b2540ba0c1a2c8cc92af703a37002e7566ecea4dbf8727c7191421edfb", - "value": "{\"ip\": \"1.2.3.4\", \"email\": \"root@test.bit\",\"info\": \"Fake record\"}", - "vout": 0 - } +{ lib, pkgs, ... }: +let + fakeReply = pkgs.writeText "namecoin-reply.json" '' + { "error": null, + "id": 1, + "result": { + "address": "T31q8ucJ4dI1xzhxQ5QispfECld5c7Xw", + "expired": false, + "expires_in": 2248, + "height": 438155, + "name": "d/test", + "txid": "db61c0b2540ba0c1a2c8cc92af703a37002e7566ecea4dbf8727c7191421edfb", + "value": "{\"ip\": \"1.2.3.4\", \"email\": \"root@test.bit\",\"info\": \"Fake record\"}", + "vout": 0 } - ''; + } + ''; - # Disabled because DNSSEC does not currently validate, - # see https://github.com/namecoin/ncdns/issues/127 - dnssec = false; + # Disabled because DNSSEC does not currently validate, + # see https://github.com/namecoin/ncdns/issues/127 + dnssec = false; - in +in - { - name = "ncdns"; - meta = with pkgs.lib.maintainers; { - maintainers = [ rnhmjoj ]; - }; +{ + name = "ncdns"; + meta = with pkgs.lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; - nodes.server = - { ... }: - { - networking.nameservers = [ "::1" ]; + nodes.server = + { ... }: + { + networking.nameservers = [ "::1" ]; - services.namecoind.rpc = { - address = "::1"; - user = "namecoin"; - password = "secret"; - port = 8332; - }; - - # Fake namecoin RPC server because we can't - # run a full node in a test. - systemd.services.namecoind = { - wantedBy = [ "multi-user.target" ]; - script = '' - while true; do - echo -e "HTTP/1.1 200 OK\n\n $(<${fakeReply})\n" \ - | ${pkgs.netcat}/bin/nc -N -l ::1 8332 - done - ''; - }; - - services.ncdns = { - enable = true; - dnssec.enable = dnssec; - identity.hostname = "example.com"; - identity.hostmaster = "root@example.com"; - identity.address = "1.0.0.1"; - }; - - services.pdns-recursor.enable = true; - services.pdns-recursor.resolveNamecoin = true; - - environment.systemPackages = [ pkgs.dnsutils ]; + services.namecoind.rpc = { + address = "::1"; + user = "namecoin"; + password = "secret"; + port = 8332; }; - testScript = - (lib.optionalString dnssec '' - with subtest("DNSSEC keys have been generated"): - server.wait_for_unit("ncdns") - server.wait_for_file("/var/lib/ncdns/bit.key") - server.wait_for_file("/var/lib/ncdns/bit-zone.key") + # Fake namecoin RPC server because we can't + # run a full node in a test. + systemd.services.namecoind = { + wantedBy = [ "multi-user.target" ]; + script = '' + while true; do + echo -e "HTTP/1.1 200 OK\n\n $(<${fakeReply})\n" \ + | ${pkgs.netcat}/bin/nc -N -l ::1 8332 + done + ''; + }; - with subtest("DNSKEY bit record is present"): - server.wait_for_unit("pdns-recursor") - server.wait_for_open_port(53) - server.succeed("host -t DNSKEY bit") - '') - + '' - with subtest("can resolve a .bit name"): - server.wait_for_unit("namecoind") - server.wait_for_unit("ncdns") - server.wait_for_open_port(8332) - assert "1.2.3.4" in server.succeed("dig @localhost -p 5333 test.bit") + services.ncdns = { + enable = true; + dnssec.enable = dnssec; + identity.hostname = "example.com"; + identity.hostmaster = "root@example.com"; + identity.address = "1.0.0.1"; + }; - with subtest("SOA record has identity information"): - assert "example.com" in server.succeed("dig SOA @localhost -p 5333 bit") + services.pdns-recursor.enable = true; + services.pdns-recursor.resolveNamecoin = true; - with subtest("bit. zone forwarding works"): - server.wait_for_unit("pdns-recursor") - assert "1.2.3.4" in server.succeed("host test.bit") - ''; - } -) + environment.systemPackages = [ pkgs.dnsutils ]; + }; + + testScript = + (lib.optionalString dnssec '' + with subtest("DNSSEC keys have been generated"): + server.wait_for_unit("ncdns") + server.wait_for_file("/var/lib/ncdns/bit.key") + server.wait_for_file("/var/lib/ncdns/bit-zone.key") + + with subtest("DNSKEY bit record is present"): + server.wait_for_unit("pdns-recursor") + server.wait_for_open_port(53) + server.succeed("host -t DNSKEY bit") + '') + + '' + with subtest("can resolve a .bit name"): + server.wait_for_unit("namecoind") + server.wait_for_unit("ncdns") + server.wait_for_open_port(8332) + assert "1.2.3.4" in server.succeed("dig @localhost -p 5333 test.bit") + + with subtest("SOA record has identity information"): + assert "example.com" in server.succeed("dig SOA @localhost -p 5333 bit") + + with subtest("bit. zone forwarding works"): + server.wait_for_unit("pdns-recursor") + assert "1.2.3.4" in server.succeed("host test.bit") + ''; +} diff --git a/nixos/tests/ndppd.nix b/nixos/tests/ndppd.nix index 23f1826cc3f7..465e86bfa77d 100644 --- a/nixos/tests/ndppd.nix +++ b/nixos/tests/ndppd.nix @@ -1,74 +1,72 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "ndppd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fpletz ]; - }; +{ pkgs, lib, ... }: +{ + name = "ndppd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fpletz ]; + }; - nodes = { - upstream = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.tcpdump ]; - networking.useDHCP = false; - networking.interfaces = { - eth1 = { - ipv6.addresses = [ - { - address = "fd23::1"; - prefixLength = 112; - } - ]; - ipv6.routes = [ - { - address = "fd42::"; - prefixLength = 112; - } - ]; - }; + nodes = { + upstream = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.tcpdump ]; + networking.useDHCP = false; + networking.interfaces = { + eth1 = { + ipv6.addresses = [ + { + address = "fd23::1"; + prefixLength = 112; + } + ]; + ipv6.routes = [ + { + address = "fd42::"; + prefixLength = 112; + } + ]; }; }; - server = - { pkgs, ... }: - { - boot.kernel.sysctl = { - "net.ipv6.conf.all.forwarding" = "1"; - "net.ipv6.conf.default.forwarding" = "1"; - }; - environment.systemPackages = [ pkgs.tcpdump ]; - networking.useDHCP = false; - networking.interfaces = { - eth1 = { - ipv6.addresses = [ - { - address = "fd23::2"; - prefixLength = 112; - } - ]; - }; - }; - services.ndppd = { - enable = true; - proxies.eth1.rules."fd42::/112" = { }; - }; - containers.client = { - autoStart = true; - privateNetwork = true; - hostAddress = "192.168.255.1"; - localAddress = "192.168.255.2"; - hostAddress6 = "fd42::1"; - localAddress6 = "fd42::2"; - config = { }; + }; + server = + { pkgs, ... }: + { + boot.kernel.sysctl = { + "net.ipv6.conf.all.forwarding" = "1"; + "net.ipv6.conf.default.forwarding" = "1"; + }; + environment.systemPackages = [ pkgs.tcpdump ]; + networking.useDHCP = false; + networking.interfaces = { + eth1 = { + ipv6.addresses = [ + { + address = "fd23::2"; + prefixLength = 112; + } + ]; }; }; - }; + services.ndppd = { + enable = true; + proxies.eth1.rules."fd42::/112" = { }; + }; + containers.client = { + autoStart = true; + privateNetwork = true; + hostAddress = "192.168.255.1"; + localAddress = "192.168.255.2"; + hostAddress6 = "fd42::1"; + localAddress6 = "fd42::2"; + config = { }; + }; + }; + }; - testScript = '' - start_all() - server.wait_for_unit("multi-user.target") - upstream.wait_for_unit("multi-user.target") - upstream.wait_until_succeeds("ping -c5 fd42::2") - ''; - } -) + testScript = '' + start_all() + server.wait_for_unit("multi-user.target") + upstream.wait_for_unit("multi-user.target") + upstream.wait_until_succeeds("ping -c5 fd42::2") + ''; +} diff --git a/nixos/tests/nebula.nix b/nixos/tests/nebula.nix index 68a48d4c86a2..68f60f8388fd 100644 --- a/nixos/tests/nebula.nix +++ b/nixos/tests/nebula.nix @@ -1,426 +1,424 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let +{ pkgs, lib, ... }: +let - # We'll need to be able to trade cert files between nodes via scp. - inherit (import ./ssh-keys.nix pkgs) - snakeOilPrivateKey - snakeOilPublicKey - ; + # We'll need to be able to trade cert files between nodes via scp. + inherit (import ./ssh-keys.nix pkgs) + snakeOilPrivateKey + snakeOilPublicKey + ; - makeNebulaNode = - { config, ... }: - name: extraConfig: - lib.mkMerge [ - { - # Expose nebula for doing cert signing. - environment.systemPackages = [ - pkgs.dig - pkgs.nebula - ]; - users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - services.openssh.enable = true; - networking.firewall.enable = true; # Implicitly true, but let's make sure. - networking.interfaces.eth1.useDHCP = false; + makeNebulaNode = + { config, ... }: + name: extraConfig: + lib.mkMerge [ + { + # Expose nebula for doing cert signing. + environment.systemPackages = [ + pkgs.dig + pkgs.nebula + ]; + users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + services.openssh.enable = true; + networking.firewall.enable = true; # Implicitly true, but let's make sure. + networking.interfaces.eth1.useDHCP = false; - services.nebula.networks.smoke = { - # Note that these paths won't exist when the machine is first booted. - ca = "/etc/nebula/ca.crt"; - cert = "/etc/nebula/${name}.crt"; - key = "/etc/nebula/${name}.key"; - listen = { - host = "0.0.0.0"; - port = - if - ( - config.services.nebula.networks.smoke.isLighthouse || config.services.nebula.networks.smoke.isRelay - ) - then - 4242 - else - 0; - }; + services.nebula.networks.smoke = { + # Note that these paths won't exist when the machine is first booted. + ca = "/etc/nebula/ca.crt"; + cert = "/etc/nebula/${name}.crt"; + key = "/etc/nebula/${name}.key"; + listen = { + host = "0.0.0.0"; + port = + if + ( + config.services.nebula.networks.smoke.isLighthouse || config.services.nebula.networks.smoke.isRelay + ) + then + 4242 + else + 0; }; - } - extraConfig - ]; + }; + } + extraConfig + ]; - in - { - name = "nebula"; +in +{ + name = "nebula"; - nodes = { + nodes = { - lighthouse = - { ... }@args: - makeNebulaNode args "lighthouse" { - networking.firewall.allowedUDPPorts = [ 53 ]; - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; + lighthouse = + { ... }@args: + makeNebulaNode args "lighthouse" { + networking.firewall.allowedUDPPorts = [ 53 ]; + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; - services.nebula.networks.smoke = { - isLighthouse = true; - isRelay = true; - firewall = { - outbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - inbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - }; - lighthouse = { - dns = { - enable = true; - host = "10.0.100.1"; # bind to lighthouse interface - port = 53; # answer on standard DNS port - }; + services.nebula.networks.smoke = { + isLighthouse = true; + isRelay = true; + firewall = { + outbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; + inbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; + }; + lighthouse = { + dns = { + enable = true; + host = "10.0.100.1"; # bind to lighthouse interface + port = 53; # answer on standard DNS port }; }; }; + }; - allowAny = - { ... }@args: - makeNebulaNode args "allowAny" { - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; + allowAny = + { ... }@args: + makeNebulaNode args "allowAny" { + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; - services.nebula.networks.smoke = { - staticHostMap = { - "10.0.100.1" = [ "192.168.1.1:4242" ]; - }; - isLighthouse = false; - lighthouses = [ "10.0.100.1" ]; - relays = [ "10.0.100.1" ]; - firewall = { - outbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - inbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - }; + services.nebula.networks.smoke = { + staticHostMap = { + "10.0.100.1" = [ "192.168.1.1:4242" ]; + }; + isLighthouse = false; + lighthouses = [ "10.0.100.1" ]; + relays = [ "10.0.100.1" ]; + firewall = { + outbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; + inbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; }; }; + }; - allowFromLighthouse = - { ... }@args: - makeNebulaNode args "allowFromLighthouse" { - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.1.3"; - prefixLength = 24; - } - ]; + allowFromLighthouse = + { ... }@args: + makeNebulaNode args "allowFromLighthouse" { + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.1.3"; + prefixLength = 24; + } + ]; - services.nebula.networks.smoke = { - staticHostMap = { - "10.0.100.1" = [ "192.168.1.1:4242" ]; - }; - isLighthouse = false; - lighthouses = [ "10.0.100.1" ]; - relays = [ "10.0.100.1" ]; - firewall = { - outbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - inbound = [ - { - port = "any"; - proto = "any"; - host = "lighthouse"; - } - ]; - }; + services.nebula.networks.smoke = { + staticHostMap = { + "10.0.100.1" = [ "192.168.1.1:4242" ]; + }; + isLighthouse = false; + lighthouses = [ "10.0.100.1" ]; + relays = [ "10.0.100.1" ]; + firewall = { + outbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; + inbound = [ + { + port = "any"; + proto = "any"; + host = "lighthouse"; + } + ]; }; }; + }; - allowToLighthouse = - { ... }@args: - makeNebulaNode args "allowToLighthouse" { - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.1.4"; - prefixLength = 24; - } - ]; + allowToLighthouse = + { ... }@args: + makeNebulaNode args "allowToLighthouse" { + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.1.4"; + prefixLength = 24; + } + ]; - services.nebula.networks.smoke = { - enable = true; - staticHostMap = { - "10.0.100.1" = [ "192.168.1.1:4242" ]; - }; - isLighthouse = false; - lighthouses = [ "10.0.100.1" ]; - relays = [ "10.0.100.1" ]; - firewall = { - outbound = [ - { - port = "any"; - proto = "any"; - host = "lighthouse"; - } - ]; - inbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - }; + services.nebula.networks.smoke = { + enable = true; + staticHostMap = { + "10.0.100.1" = [ "192.168.1.1:4242" ]; + }; + isLighthouse = false; + lighthouses = [ "10.0.100.1" ]; + relays = [ "10.0.100.1" ]; + firewall = { + outbound = [ + { + port = "any"; + proto = "any"; + host = "lighthouse"; + } + ]; + inbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; }; }; + }; - disabled = - { ... }@args: - makeNebulaNode args "disabled" { - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.1.5"; - prefixLength = 24; - } - ]; + disabled = + { ... }@args: + makeNebulaNode args "disabled" { + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.1.5"; + prefixLength = 24; + } + ]; - services.nebula.networks.smoke = { - enable = false; - staticHostMap = { - "10.0.100.1" = [ "192.168.1.1:4242" ]; - }; - isLighthouse = false; - lighthouses = [ "10.0.100.1" ]; - relays = [ "10.0.100.1" ]; - firewall = { - outbound = [ - { - port = "any"; - proto = "any"; - host = "lighthouse"; - } - ]; - inbound = [ - { - port = "any"; - proto = "any"; - host = "any"; - } - ]; - }; + services.nebula.networks.smoke = { + enable = false; + staticHostMap = { + "10.0.100.1" = [ "192.168.1.1:4242" ]; + }; + isLighthouse = false; + lighthouses = [ "10.0.100.1" ]; + relays = [ "10.0.100.1" ]; + firewall = { + outbound = [ + { + port = "any"; + proto = "any"; + host = "lighthouse"; + } + ]; + inbound = [ + { + port = "any"; + proto = "any"; + host = "any"; + } + ]; }; }; + }; - }; + }; - testScript = - let + testScript = + let - setUpPrivateKey = name: '' - ${name}.start() - ${name}.succeed( - "mkdir -p /root/.ssh", - "chmod 700 /root/.ssh", - "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil", - "chmod 600 /root/.ssh/id_snakeoil", - "mkdir -p /root" - ) - ''; - - # From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines. - sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil"; - - restartAndCheckNebula = name: ip: '' - ${name}.systemctl("restart nebula@smoke.service") - ${name}.succeed("ping -c5 ${ip}") - ''; - - # Create a keypair on the client node, then use the public key to sign a cert on the lighthouse. - signKeysFor = name: ip: '' - lighthouse.wait_for_unit("sshd.service") - ${name}.wait_for_unit("sshd.service") - ${name}.succeed( - "mkdir -p /etc/nebula", - "nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub", - "scp ${sshOpts} /etc/nebula/${name}.pub root@192.168.1.1:/root/${name}.pub", - ) - lighthouse.succeed( - 'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /root/${name}.pub -out-crt /root/${name}.crt' - ) - ${name}.succeed( - "scp ${sshOpts} root@192.168.1.1:/root/${name}.crt /etc/nebula/${name}.crt", - "scp ${sshOpts} root@192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt", - '(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true' - ) - ''; - - getPublicIp = node: '' - ${node}.succeed("ip --brief addr show eth1 | awk '{print $3}' | tail -n1 | cut -d/ -f1").strip() - ''; - - # Never do this for anything security critical! (Thankfully it's just a test.) - # Restart Nebula right after the mutual block and/or restore so the state is fresh. - blockTrafficBetween = nodeA: nodeB: '' - node_a = ${getPublicIp nodeA} - node_b = ${getPublicIp nodeB} - ${nodeA}.succeed("iptables -I INPUT -s " + node_b + " -j DROP") - ${nodeB}.succeed("iptables -I INPUT -s " + node_a + " -j DROP") - ${nodeA}.systemctl("restart nebula@smoke.service") - ${nodeB}.systemctl("restart nebula@smoke.service") - ''; - allowTrafficBetween = nodeA: nodeB: '' - node_a = ${getPublicIp nodeA} - node_b = ${getPublicIp nodeB} - ${nodeA}.succeed("iptables -D INPUT -s " + node_b + " -j DROP") - ${nodeB}.succeed("iptables -D INPUT -s " + node_a + " -j DROP") - ${nodeA}.systemctl("restart nebula@smoke.service") - ${nodeB}.systemctl("restart nebula@smoke.service") - ''; - in - '' - # Create the certificate and sign the lighthouse's keys. - ${setUpPrivateKey "lighthouse"} - lighthouse.succeed( - "mkdir -p /etc/nebula", - 'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key', - 'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key', - 'chown -R nebula-smoke:nebula-smoke /etc/nebula' + setUpPrivateKey = name: '' + ${name}.start() + ${name}.succeed( + "mkdir -p /root/.ssh", + "chmod 700 /root/.ssh", + "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil", + "chmod 600 /root/.ssh/id_snakeoil", + "mkdir -p /root" ) - - # Reboot the lighthouse and verify that the nebula service comes up on boot. - # Since rebooting takes a while, we'll just restart the service on the other nodes. - lighthouse.shutdown() - lighthouse.start() - lighthouse.wait_for_unit("nebula@smoke.service") - lighthouse.succeed("ping -c5 10.0.100.1") - - # Create keys for allowAny's nebula service and test that it comes up. - ${setUpPrivateKey "allowAny"} - ${signKeysFor "allowAny" "10.0.100.2/24"} - ${restartAndCheckNebula "allowAny" "10.0.100.2"} - - # Create keys for allowFromLighthouse's nebula service and test that it comes up. - ${setUpPrivateKey "allowFromLighthouse"} - ${signKeysFor "allowFromLighthouse" "10.0.100.3/24"} - ${restartAndCheckNebula "allowFromLighthouse" "10.0.100.3"} - - # Create keys for allowToLighthouse's nebula service and test that it comes up. - ${setUpPrivateKey "allowToLighthouse"} - ${signKeysFor "allowToLighthouse" "10.0.100.4/24"} - ${restartAndCheckNebula "allowToLighthouse" "10.0.100.4"} - - # Create keys for disabled's nebula service and test that it does not come up. - ${setUpPrivateKey "disabled"} - ${signKeysFor "disabled" "10.0.100.5/24"} - disabled.fail("systemctl status nebula@smoke.service") - disabled.fail("ping -c5 10.0.100.5") - - # The lighthouse can ping allowAny and allowFromLighthouse but not disabled - lighthouse.succeed("ping -c3 10.0.100.2") - lighthouse.succeed("ping -c3 10.0.100.3") - lighthouse.fail("ping -c3 10.0.100.5") - - # allowAny can ping the lighthouse, but not allowFromLighthouse because of its inbound firewall - allowAny.succeed("ping -c3 10.0.100.1") - allowAny.fail("ping -c3 10.0.100.3") - # allowAny can also resolve DNS on lighthouse - allowAny.succeed("dig @10.0.100.1 allowToLighthouse | grep -E 'allowToLighthouse\.\s+[0-9]+\s+IN\s+A\s+10\.0\.100\.4'") - - # allowFromLighthouse can ping the lighthouse and allowAny - allowFromLighthouse.succeed("ping -c3 10.0.100.1") - allowFromLighthouse.succeed("ping -c3 10.0.100.2") - - # block allowFromLighthouse <-> allowAny, and allowFromLighthouse -> allowAny should still work. - ${blockTrafficBetween "allowFromLighthouse" "allowAny"} - allowFromLighthouse.succeed("ping -c10 10.0.100.2") - ${allowTrafficBetween "allowFromLighthouse" "allowAny"} - allowFromLighthouse.succeed("ping -c10 10.0.100.2") - - # allowToLighthouse can ping the lighthouse but not allowAny or allowFromLighthouse - allowToLighthouse.succeed("ping -c3 10.0.100.1") - allowToLighthouse.fail("ping -c3 10.0.100.2") - allowToLighthouse.fail("ping -c3 10.0.100.3") - - # allowAny can ping allowFromLighthouse now that allowFromLighthouse pinged it first - allowAny.succeed("ping -c3 10.0.100.3") - - # block allowAny <-> allowFromLighthouse, and allowAny -> allowFromLighthouse should still work. - ${blockTrafficBetween "allowAny" "allowFromLighthouse"} - allowFromLighthouse.succeed("ping -c10 10.0.100.2") - allowAny.succeed("ping -c10 10.0.100.3") - ${allowTrafficBetween "allowAny" "allowFromLighthouse"} - allowFromLighthouse.succeed("ping -c10 10.0.100.2") - allowAny.succeed("ping -c10 10.0.100.3") - - # allowToLighthouse can ping allowAny if allowAny pings it first - allowAny.succeed("ping -c3 10.0.100.4") - allowToLighthouse.succeed("ping -c3 10.0.100.2") - - # block allowToLighthouse <-> allowAny, and allowAny <-> allowToLighthouse should still work. - ${blockTrafficBetween "allowAny" "allowToLighthouse"} - allowAny.succeed("ping -c10 10.0.100.4") - allowToLighthouse.succeed("ping -c10 10.0.100.2") - ${allowTrafficBetween "allowAny" "allowToLighthouse"} - allowAny.succeed("ping -c10 10.0.100.4") - allowToLighthouse.succeed("ping -c10 10.0.100.2") - - # block lighthouse <-> allowFromLighthouse and allowAny <-> allowFromLighthouse; allowFromLighthouse won't get to allowAny - ${blockTrafficBetween "allowFromLighthouse" "lighthouse"} - ${blockTrafficBetween "allowFromLighthouse" "allowAny"} - allowFromLighthouse.fail("ping -c3 10.0.100.2") - ${allowTrafficBetween "allowFromLighthouse" "lighthouse"} - ${allowTrafficBetween "allowFromLighthouse" "allowAny"} - allowFromLighthouse.succeed("ping -c3 10.0.100.2") - - # block lighthouse <-> allowAny, allowAny <-> allowFromLighthouse, and allowAny <-> allowToLighthouse; it won't get to allowFromLighthouse or allowToLighthouse - ${blockTrafficBetween "allowAny" "lighthouse"} - ${blockTrafficBetween "allowAny" "allowFromLighthouse"} - ${blockTrafficBetween "allowAny" "allowToLighthouse"} - allowFromLighthouse.fail("ping -c3 10.0.100.2") - allowAny.fail("ping -c3 10.0.100.3") - allowAny.fail("ping -c3 10.0.100.4") - ${allowTrafficBetween "allowAny" "lighthouse"} - ${allowTrafficBetween "allowAny" "allowFromLighthouse"} - ${allowTrafficBetween "allowAny" "allowToLighthouse"} - allowFromLighthouse.succeed("ping -c3 10.0.100.2") - allowAny.succeed("ping -c3 10.0.100.3") - allowAny.succeed("ping -c3 10.0.100.4") - - # block lighthouse <-> allowToLighthouse and allowToLighthouse <-> allowAny; it won't get to allowAny - ${blockTrafficBetween "allowToLighthouse" "lighthouse"} - ${blockTrafficBetween "allowToLighthouse" "allowAny"} - allowAny.fail("ping -c3 10.0.100.4") - allowToLighthouse.fail("ping -c3 10.0.100.2") - ${allowTrafficBetween "allowToLighthouse" "lighthouse"} - ${allowTrafficBetween "allowToLighthouse" "allowAny"} - allowAny.succeed("ping -c3 10.0.100.4") - allowToLighthouse.succeed("ping -c3 10.0.100.2") ''; - } -) + + # From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines. + sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil"; + + restartAndCheckNebula = name: ip: '' + ${name}.systemctl("restart nebula@smoke.service") + ${name}.succeed("ping -c5 ${ip}") + ''; + + # Create a keypair on the client node, then use the public key to sign a cert on the lighthouse. + signKeysFor = name: ip: '' + lighthouse.wait_for_unit("sshd.service") + ${name}.wait_for_unit("sshd.service") + ${name}.succeed( + "mkdir -p /etc/nebula", + "nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub", + "scp ${sshOpts} /etc/nebula/${name}.pub root@192.168.1.1:/root/${name}.pub", + ) + lighthouse.succeed( + 'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /root/${name}.pub -out-crt /root/${name}.crt' + ) + ${name}.succeed( + "scp ${sshOpts} root@192.168.1.1:/root/${name}.crt /etc/nebula/${name}.crt", + "scp ${sshOpts} root@192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt", + '(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true' + ) + ''; + + getPublicIp = node: '' + ${node}.succeed("ip --brief addr show eth1 | awk '{print $3}' | tail -n1 | cut -d/ -f1").strip() + ''; + + # Never do this for anything security critical! (Thankfully it's just a test.) + # Restart Nebula right after the mutual block and/or restore so the state is fresh. + blockTrafficBetween = nodeA: nodeB: '' + node_a = ${getPublicIp nodeA} + node_b = ${getPublicIp nodeB} + ${nodeA}.succeed("iptables -I INPUT -s " + node_b + " -j DROP") + ${nodeB}.succeed("iptables -I INPUT -s " + node_a + " -j DROP") + ${nodeA}.systemctl("restart nebula@smoke.service") + ${nodeB}.systemctl("restart nebula@smoke.service") + ''; + allowTrafficBetween = nodeA: nodeB: '' + node_a = ${getPublicIp nodeA} + node_b = ${getPublicIp nodeB} + ${nodeA}.succeed("iptables -D INPUT -s " + node_b + " -j DROP") + ${nodeB}.succeed("iptables -D INPUT -s " + node_a + " -j DROP") + ${nodeA}.systemctl("restart nebula@smoke.service") + ${nodeB}.systemctl("restart nebula@smoke.service") + ''; + in + '' + # Create the certificate and sign the lighthouse's keys. + ${setUpPrivateKey "lighthouse"} + lighthouse.succeed( + "mkdir -p /etc/nebula", + 'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key', + 'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key', + 'chown -R nebula-smoke:nebula-smoke /etc/nebula' + ) + + # Reboot the lighthouse and verify that the nebula service comes up on boot. + # Since rebooting takes a while, we'll just restart the service on the other nodes. + lighthouse.shutdown() + lighthouse.start() + lighthouse.wait_for_unit("nebula@smoke.service") + lighthouse.succeed("ping -c5 10.0.100.1") + + # Create keys for allowAny's nebula service and test that it comes up. + ${setUpPrivateKey "allowAny"} + ${signKeysFor "allowAny" "10.0.100.2/24"} + ${restartAndCheckNebula "allowAny" "10.0.100.2"} + + # Create keys for allowFromLighthouse's nebula service and test that it comes up. + ${setUpPrivateKey "allowFromLighthouse"} + ${signKeysFor "allowFromLighthouse" "10.0.100.3/24"} + ${restartAndCheckNebula "allowFromLighthouse" "10.0.100.3"} + + # Create keys for allowToLighthouse's nebula service and test that it comes up. + ${setUpPrivateKey "allowToLighthouse"} + ${signKeysFor "allowToLighthouse" "10.0.100.4/24"} + ${restartAndCheckNebula "allowToLighthouse" "10.0.100.4"} + + # Create keys for disabled's nebula service and test that it does not come up. + ${setUpPrivateKey "disabled"} + ${signKeysFor "disabled" "10.0.100.5/24"} + disabled.fail("systemctl status nebula@smoke.service") + disabled.fail("ping -c5 10.0.100.5") + + # The lighthouse can ping allowAny and allowFromLighthouse but not disabled + lighthouse.succeed("ping -c3 10.0.100.2") + lighthouse.succeed("ping -c3 10.0.100.3") + lighthouse.fail("ping -c3 10.0.100.5") + + # allowAny can ping the lighthouse, but not allowFromLighthouse because of its inbound firewall + allowAny.succeed("ping -c3 10.0.100.1") + allowAny.fail("ping -c3 10.0.100.3") + # allowAny can also resolve DNS on lighthouse + allowAny.succeed("dig @10.0.100.1 allowToLighthouse | grep -E 'allowToLighthouse\.\s+[0-9]+\s+IN\s+A\s+10\.0\.100\.4'") + + # allowFromLighthouse can ping the lighthouse and allowAny + allowFromLighthouse.succeed("ping -c3 10.0.100.1") + allowFromLighthouse.succeed("ping -c3 10.0.100.2") + + # block allowFromLighthouse <-> allowAny, and allowFromLighthouse -> allowAny should still work. + ${blockTrafficBetween "allowFromLighthouse" "allowAny"} + allowFromLighthouse.succeed("ping -c10 10.0.100.2") + ${allowTrafficBetween "allowFromLighthouse" "allowAny"} + allowFromLighthouse.succeed("ping -c10 10.0.100.2") + + # allowToLighthouse can ping the lighthouse but not allowAny or allowFromLighthouse + allowToLighthouse.succeed("ping -c3 10.0.100.1") + allowToLighthouse.fail("ping -c3 10.0.100.2") + allowToLighthouse.fail("ping -c3 10.0.100.3") + + # allowAny can ping allowFromLighthouse now that allowFromLighthouse pinged it first + allowAny.succeed("ping -c3 10.0.100.3") + + # block allowAny <-> allowFromLighthouse, and allowAny -> allowFromLighthouse should still work. + ${blockTrafficBetween "allowAny" "allowFromLighthouse"} + allowFromLighthouse.succeed("ping -c10 10.0.100.2") + allowAny.succeed("ping -c10 10.0.100.3") + ${allowTrafficBetween "allowAny" "allowFromLighthouse"} + allowFromLighthouse.succeed("ping -c10 10.0.100.2") + allowAny.succeed("ping -c10 10.0.100.3") + + # allowToLighthouse can ping allowAny if allowAny pings it first + allowAny.succeed("ping -c3 10.0.100.4") + allowToLighthouse.succeed("ping -c3 10.0.100.2") + + # block allowToLighthouse <-> allowAny, and allowAny <-> allowToLighthouse should still work. + ${blockTrafficBetween "allowAny" "allowToLighthouse"} + allowAny.succeed("ping -c10 10.0.100.4") + allowToLighthouse.succeed("ping -c10 10.0.100.2") + ${allowTrafficBetween "allowAny" "allowToLighthouse"} + allowAny.succeed("ping -c10 10.0.100.4") + allowToLighthouse.succeed("ping -c10 10.0.100.2") + + # block lighthouse <-> allowFromLighthouse and allowAny <-> allowFromLighthouse; allowFromLighthouse won't get to allowAny + ${blockTrafficBetween "allowFromLighthouse" "lighthouse"} + ${blockTrafficBetween "allowFromLighthouse" "allowAny"} + allowFromLighthouse.fail("ping -c3 10.0.100.2") + ${allowTrafficBetween "allowFromLighthouse" "lighthouse"} + ${allowTrafficBetween "allowFromLighthouse" "allowAny"} + allowFromLighthouse.succeed("ping -c3 10.0.100.2") + + # block lighthouse <-> allowAny, allowAny <-> allowFromLighthouse, and allowAny <-> allowToLighthouse; it won't get to allowFromLighthouse or allowToLighthouse + ${blockTrafficBetween "allowAny" "lighthouse"} + ${blockTrafficBetween "allowAny" "allowFromLighthouse"} + ${blockTrafficBetween "allowAny" "allowToLighthouse"} + allowFromLighthouse.fail("ping -c3 10.0.100.2") + allowAny.fail("ping -c3 10.0.100.3") + allowAny.fail("ping -c3 10.0.100.4") + ${allowTrafficBetween "allowAny" "lighthouse"} + ${allowTrafficBetween "allowAny" "allowFromLighthouse"} + ${allowTrafficBetween "allowAny" "allowToLighthouse"} + allowFromLighthouse.succeed("ping -c3 10.0.100.2") + allowAny.succeed("ping -c3 10.0.100.3") + allowAny.succeed("ping -c3 10.0.100.4") + + # block lighthouse <-> allowToLighthouse and allowToLighthouse <-> allowAny; it won't get to allowAny + ${blockTrafficBetween "allowToLighthouse" "lighthouse"} + ${blockTrafficBetween "allowToLighthouse" "allowAny"} + allowAny.fail("ping -c3 10.0.100.4") + allowToLighthouse.fail("ping -c3 10.0.100.2") + ${allowTrafficBetween "allowToLighthouse" "lighthouse"} + ${allowTrafficBetween "allowToLighthouse" "allowAny"} + allowAny.succeed("ping -c3 10.0.100.4") + allowToLighthouse.succeed("ping -c3 10.0.100.2") + ''; +} diff --git a/nixos/tests/netbird.nix b/nixos/tests/netbird.nix index f684a3104265..581c6eeeab2e 100644 --- a/nixos/tests/netbird.nix +++ b/nixos/tests/netbird.nix @@ -1,59 +1,57 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "netbird"; +{ pkgs, lib, ... }: +{ + name = "netbird"; - meta.maintainers = with pkgs.lib.maintainers; [ - nazarewk - ]; + meta.maintainers = with pkgs.lib.maintainers; [ + nazarewk + ]; - nodes = { - clients = - { ... }: - { - services.netbird.enable = true; - services.netbird.clients.custom.port = 51819; - }; - }; + nodes = { + clients = + { ... }: + { + services.netbird.enable = true; + services.netbird.clients.custom.port = 51819; + }; + }; - # TODO: confirm the whole solution is working end-to-end when netbird server is implemented - testScript = '' - start_all() - def did_start(node, name): - node.wait_for_unit(f"{name}.service") - node.wait_for_file(f"/var/run/{name}/sock") - output = node.succeed(f"{name} status") + # TODO: confirm the whole solution is working end-to-end when netbird server is implemented + testScript = '' + start_all() + def did_start(node, name): + node.wait_for_unit(f"{name}.service") + node.wait_for_file(f"/var/run/{name}/sock") + output = node.succeed(f"{name} status") - # not sure why, but it can print either of: - # - Daemon status: NeedsLogin - # - Management: Disconnected - expected = [ - "Disconnected", - "NeedsLogin", - ] - assert any(msg in output for msg in expected) + # not sure why, but it can print either of: + # - Daemon status: NeedsLogin + # - Management: Disconnected + expected = [ + "Disconnected", + "NeedsLogin", + ] + assert any(msg in output for msg in expected) - did_start(clients, "netbird") - did_start(clients, "netbird-custom") - ''; + did_start(clients, "netbird") + did_start(clients, "netbird-custom") + ''; - /* - `netbird status` used to print `Daemon status: NeedsLogin` - https://github.com/netbirdio/netbird/blob/23a14737974e3849fa86408d136cc46db8a885d0/client/cmd/status.go#L154-L164 - as the first line, but now it is just: + /* + `netbird status` used to print `Daemon status: NeedsLogin` + https://github.com/netbirdio/netbird/blob/23a14737974e3849fa86408d136cc46db8a885d0/client/cmd/status.go#L154-L164 + as the first line, but now it is just: - Daemon version: 0.26.3 - CLI version: 0.26.3 - Management: Disconnected - Signal: Disconnected - Relays: 0/0 Available - Nameservers: 0/0 Available - FQDN: - NetBird IP: N/A - Interface type: N/A - Quantum resistance: false - Routes: - - Peers count: 0/0 Connected - */ - } -) + Daemon version: 0.26.3 + CLI version: 0.26.3 + Management: Disconnected + Signal: Disconnected + Relays: 0/0 Available + Nameservers: 0/0 Available + FQDN: + NetBird IP: N/A + Interface type: N/A + Quantum resistance: false + Routes: - + Peers count: 0/0 Connected + */ +} diff --git a/nixos/tests/netdata.nix b/nixos/tests/netdata.nix index 77c34e70e7d0..2a85c11f0542 100644 --- a/nixos/tests/netdata.nix +++ b/nixos/tests/netdata.nix @@ -1,57 +1,55 @@ # This test runs netdata and checks for data via apps.plugin -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "netdata"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - cransom - raitobezarius - ]; - }; +{ pkgs, ... }: +{ + name = "netdata"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + cransom + raitobezarius + ]; + }; - nodes = { - netdata = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - curl - jq - netdata - ]; - services.netdata = { - enable = true; - package = pkgs.netdataCloud; - python.recommendedPythonPackages = true; + nodes = { + netdata = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + curl + jq + netdata + ]; + services.netdata = { + enable = true; + package = pkgs.netdataCloud; + python.recommendedPythonPackages = true; - configDir."apps_groups.conf" = pkgs.writeText "apps_groups.conf" '' - netdata_test: netdata - ''; - }; + configDir."apps_groups.conf" = pkgs.writeText "apps_groups.conf" '' + netdata_test: netdata + ''; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - netdata.wait_for_unit("netdata.service") + netdata.wait_for_unit("netdata.service") - # wait for the service to listen before sending a request - netdata.wait_for_open_port(19999) + # wait for the service to listen before sending a request + netdata.wait_for_open_port(19999) - # check if the netdata main page loads. - netdata.succeed("curl --fail http://127.0.0.1:19999") - netdata.succeed("sleep 4") + # check if the netdata main page loads. + netdata.succeed("curl --fail http://127.0.0.1:19999") + netdata.succeed("sleep 4") - # check if netdata api shows correct os - url = "http://127.0.0.1:19999/api/v3/info" - filter = '.agents[0].application.os.os | . == "NixOS"' - cmd = f"curl -s {url} | jq -e '{filter}'" - netdata.wait_until_succeeds(cmd) + # check if netdata api shows correct os + url = "http://127.0.0.1:19999/api/v3/info" + filter = '.agents[0].application.os.os | . == "NixOS"' + cmd = f"curl -s {url} | jq -e '{filter}'" + netdata.wait_until_succeeds(cmd) - # check if the control socket is available - netdata.succeed("sudo netdatacli ping") - ''; - } -) + # check if the control socket is available + netdata.succeed("sudo netdatacli ping") + ''; +} diff --git a/nixos/tests/networking-proxy.nix b/nixos/tests/networking-proxy.nix index 4592bffec9e3..c20dd297328f 100644 --- a/nixos/tests/networking-proxy.nix +++ b/nixos/tests/networking-proxy.nix @@ -11,130 +11,128 @@ let }; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "networking-proxy"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, ... }: +{ + name = "networking-proxy"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes = { - # no proxy - machine = - { ... }: + nodes = { + # no proxy + machine = + { ... }: - default-config; + default-config; - # proxy default - machine2 = - { ... }: + # proxy default + machine2 = + { ... }: - default-config - // { - networking.proxy.default = "http://user:pass@host:port"; + default-config + // { + networking.proxy.default = "http://user:pass@host:port"; + }; + + # specific proxy options + machine3 = + { ... }: + + default-config + // { + networking.proxy = { + # useless because overridden by the next options + default = "http://user:pass@host:port"; + # advanced proxy setup + httpProxy = "123-http://user:pass@http-host:port"; + httpsProxy = "456-http://user:pass@https-host:port"; + rsyncProxy = "789-http://user:pass@rsync-host:port"; + ftpProxy = "101112-http://user:pass@ftp-host:port"; + noProxy = "131415-127.0.0.1,localhost,.localdomain"; }; + }; - # specific proxy options - machine3 = - { ... }: + # mix default + proxy options + machine4 = + { ... }: - default-config - // { - networking.proxy = { - # useless because overridden by the next options - default = "http://user:pass@host:port"; - # advanced proxy setup - httpProxy = "123-http://user:pass@http-host:port"; - httpsProxy = "456-http://user:pass@https-host:port"; - rsyncProxy = "789-http://user:pass@rsync-host:port"; - ftpProxy = "101112-http://user:pass@ftp-host:port"; - noProxy = "131415-127.0.0.1,localhost,.localdomain"; - }; + default-config + // { + networking.proxy = { + # open for all *_proxy env var + default = "000-http://user:pass@default-host:port"; + # except for those 2 + rsyncProxy = "123-http://user:pass@http-host:port"; + noProxy = "131415-127.0.0.1,localhost,.localdomain"; }; + }; + }; - # mix default + proxy options - machine4 = - { ... }: - - default-config - // { - networking.proxy = { - # open for all *_proxy env var - default = "000-http://user:pass@default-host:port"; - # except for those 2 - rsyncProxy = "123-http://user:pass@http-host:port"; - noProxy = "131415-127.0.0.1,localhost,.localdomain"; - }; - }; - }; - - testScript = '' - from typing import Dict, Optional + testScript = '' + from typing import Dict, Optional - def get_machine_env(machine: Machine, user: Optional[str] = None) -> Dict[str, str]: - """ - Gets the environment from a given machine, and returns it as a - dictionary in the form: - {"lowercase_var_name": "value"} + def get_machine_env(machine: Machine, user: Optional[str] = None) -> Dict[str, str]: + """ + Gets the environment from a given machine, and returns it as a + dictionary in the form: + {"lowercase_var_name": "value"} - Duplicate environment variables with the same name - (e.g. "foo" and "FOO") are handled in an undefined manner. - """ - if user is not None: - env = machine.succeed("su - {} -c 'env -0'".format(user)) - else: - env = machine.succeed("env -0") - ret = {} - for line in env.split("\0"): - if "=" not in line: - continue + Duplicate environment variables with the same name + (e.g. "foo" and "FOO") are handled in an undefined manner. + """ + if user is not None: + env = machine.succeed("su - {} -c 'env -0'".format(user)) + else: + env = machine.succeed("env -0") + ret = {} + for line in env.split("\0"): + if "=" not in line: + continue - key, val = line.split("=", 1) - ret[key.lower()] = val - return ret + key, val = line.split("=", 1) + ret[key.lower()] = val + return ret - start_all() + start_all() - with subtest("no proxy"): - assert "proxy" not in machine.succeed("env").lower() - assert "proxy" not in machine.succeed("su - alice -c env").lower() + with subtest("no proxy"): + assert "proxy" not in machine.succeed("env").lower() + assert "proxy" not in machine.succeed("su - alice -c env").lower() - with subtest("default proxy"): - assert "proxy" in machine2.succeed("env").lower() - assert "proxy" in machine2.succeed("su - alice -c env").lower() + with subtest("default proxy"): + assert "proxy" in machine2.succeed("env").lower() + assert "proxy" in machine2.succeed("su - alice -c env").lower() - with subtest("explicitly-set proxy"): - env = get_machine_env(machine3) - assert "123" in env["http_proxy"] - assert "456" in env["https_proxy"] - assert "789" in env["rsync_proxy"] - assert "101112" in env["ftp_proxy"] - assert "131415" in env["no_proxy"] + with subtest("explicitly-set proxy"): + env = get_machine_env(machine3) + assert "123" in env["http_proxy"] + assert "456" in env["https_proxy"] + assert "789" in env["rsync_proxy"] + assert "101112" in env["ftp_proxy"] + assert "131415" in env["no_proxy"] - env = get_machine_env(machine3, "alice") - assert "123" in env["http_proxy"] - assert "456" in env["https_proxy"] - assert "789" in env["rsync_proxy"] - assert "101112" in env["ftp_proxy"] - assert "131415" in env["no_proxy"] + env = get_machine_env(machine3, "alice") + assert "123" in env["http_proxy"] + assert "456" in env["https_proxy"] + assert "789" in env["rsync_proxy"] + assert "101112" in env["ftp_proxy"] + assert "131415" in env["no_proxy"] - with subtest("default proxy + some other specifics"): - env = get_machine_env(machine4) - assert "000" in env["http_proxy"] - assert "000" in env["https_proxy"] - assert "123" in env["rsync_proxy"] - assert "000" in env["ftp_proxy"] - assert "131415" in env["no_proxy"] + with subtest("default proxy + some other specifics"): + env = get_machine_env(machine4) + assert "000" in env["http_proxy"] + assert "000" in env["https_proxy"] + assert "123" in env["rsync_proxy"] + assert "000" in env["ftp_proxy"] + assert "131415" in env["no_proxy"] - env = get_machine_env(machine4, "alice") - assert "000" in env["http_proxy"] - assert "000" in env["https_proxy"] - assert "123" in env["rsync_proxy"] - assert "000" in env["ftp_proxy"] - assert "131415" in env["no_proxy"] - ''; - } -) + env = get_machine_env(machine4, "alice") + assert "000" in env["http_proxy"] + assert "000" in env["https_proxy"] + assert "123" in env["rsync_proxy"] + assert "000" in env["ftp_proxy"] + assert "131415" in env["no_proxy"] + ''; +} diff --git a/nixos/tests/nexus.nix b/nixos/tests/nexus.nix index 9c3e65daf317..d8cad38500ed 100644 --- a/nixos/tests/nexus.nix +++ b/nixos/tests/nexus.nix @@ -3,34 +3,32 @@ # 2. nexus service can startup on server (creating database and all other initial stuff) # 3. the web application is reachable via HTTP -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "nexus"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ironpinguin ]; - }; +{ pkgs, ... }: +{ + name = "nexus"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ironpinguin ]; + }; - nodes = { + nodes = { - server = - { ... }: - { - virtualisation.memorySize = 2047; # qemu-system-i386 has a 2047M limit - virtualisation.diskSize = 8192; + server = + { ... }: + { + virtualisation.memorySize = 2047; # qemu-system-i386 has a 2047M limit + virtualisation.diskSize = 8192; - services.nexus.enable = true; - }; + services.nexus.enable = true; + }; - }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("nexus") - server.wait_for_open_port(8081) + server.wait_for_unit("nexus") + server.wait_for_open_port(8081) - server.succeed("curl -f 127.0.0.1:8081") - ''; - } -) + server.succeed("curl -f 127.0.0.1:8081") + ''; +} diff --git a/nixos/tests/nghttpx.nix b/nixos/tests/nghttpx.nix index 15acb81b12c1..a9f5cf112e2d 100644 --- a/nixos/tests/nghttpx.nix +++ b/nixos/tests/nghttpx.nix @@ -1,66 +1,64 @@ let nginxRoot = "/run/nginx"; in -import ./make-test-python.nix ( - { ... }: - { - name = "nghttpx"; - nodes = { - webserver = { - networking.firewall.allowedTCPPorts = [ 80 ]; - systemd.services.nginx = { - preStart = '' - mkdir -p ${nginxRoot} - echo "Hello world!" > ${nginxRoot}/hello-world.txt - ''; - }; - - services.nginx = { - enable = true; - virtualHosts.server = { - locations."/".root = nginxRoot; - }; - }; +{ ... }: +{ + name = "nghttpx"; + nodes = { + webserver = { + networking.firewall.allowedTCPPorts = [ 80 ]; + systemd.services.nginx = { + preStart = '' + mkdir -p ${nginxRoot} + echo "Hello world!" > ${nginxRoot}/hello-world.txt + ''; }; - proxy = { - networking.firewall.allowedTCPPorts = [ 80 ]; - services.nghttpx = { - enable = true; - frontends = [ - { - server = { - host = "*"; - port = 80; - }; - - params = { - tls = "no-tls"; - }; - } - ]; - backends = [ - { - server = { - host = "webserver"; - port = 80; - }; - patterns = [ "/" ]; - params.proto = "http/1.1"; - } - ]; + services.nginx = { + enable = true; + virtualHosts.server = { + locations."/".root = nginxRoot; }; }; - - client = { }; }; - testScript = '' - start_all() + proxy = { + networking.firewall.allowedTCPPorts = [ 80 ]; + services.nghttpx = { + enable = true; + frontends = [ + { + server = { + host = "*"; + port = 80; + }; - webserver.wait_for_open_port(80) - proxy.wait_for_open_port(80) - client.wait_until_succeeds("curl -s --fail http://proxy/hello-world.txt") - ''; - } -) + params = { + tls = "no-tls"; + }; + } + ]; + backends = [ + { + server = { + host = "webserver"; + port = 80; + }; + patterns = [ "/" ]; + params.proto = "http/1.1"; + } + ]; + }; + }; + + client = { }; + }; + + testScript = '' + start_all() + + webserver.wait_for_open_port(80) + proxy.wait_for_open_port(80) + client.wait_until_succeeds("curl -s --fail http://proxy/hello-world.txt") + ''; +} diff --git a/nixos/tests/nginx-njs.nix b/nixos/tests/nginx-njs.nix index 7e9e911ee02f..a64bbad08e10 100644 --- a/nixos/tests/nginx-njs.nix +++ b/nixos/tests/nginx-njs.nix @@ -1,37 +1,35 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "nginx-njs"; +{ pkgs, lib, ... }: +{ + name = "nginx-njs"; - nodes.machine = - { - config, - lib, - pkgs, - ... - }: - { - services.nginx = { - enable = true; - additionalModules = [ pkgs.nginxModules.njs ]; - commonHttpConfig = '' - js_import http from ${builtins.toFile "http.js" '' - function hello(r) { - r.return(200, "Hello world!"); - } - export default {hello}; - ''}; - ''; - virtualHosts."localhost".locations."/".extraConfig = '' - js_content http.hello; - ''; - }; + nodes.machine = + { + config, + lib, + pkgs, + ... + }: + { + services.nginx = { + enable = true; + additionalModules = [ pkgs.nginxModules.njs ]; + commonHttpConfig = '' + js_import http from ${builtins.toFile "http.js" '' + function hello(r) { + r.return(200, "Hello world!"); + } + export default {hello}; + ''}; + ''; + virtualHosts."localhost".locations."/".extraConfig = '' + js_content http.hello; + ''; }; - testScript = '' - machine.wait_for_unit("nginx") + }; + testScript = '' + machine.wait_for_unit("nginx") - response = machine.wait_until_succeeds("curl -fvvv -s http://127.0.0.1/") - assert "Hello world!" == response, f"Expected 'Hello world!', got '{response}'" - ''; - } -) + response = machine.wait_until_succeeds("curl -fvvv -s http://127.0.0.1/") + assert "Hello world!" == response, f"Expected 'Hello world!', got '{response}'" + ''; +} diff --git a/nixos/tests/nimdow.nix b/nixos/tests/nimdow.nix index d71fcf35acd8..7d1d815bcd13 100644 --- a/nixos/tests/nimdow.nix +++ b/nixos/tests/nimdow.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "nimdow"; - meta = with pkgs.lib.maintainers; { - maintainers = [ marcusramberg ]; +{ pkgs, ... }: +{ + name = "nimdow"; + meta = with pkgs.lib.maintainers; { + maintainers = [ marcusramberg ]; + }; + + nodes.machine = + { lib, ... }: + { + imports = [ + ./common/x11.nix + ./common/user-account.nix + ]; + test-support.displayManager.auto.user = "alice"; + services.displayManager.defaultSession = lib.mkForce "none+nimdow"; + services.xserver.windowManager.nimdow.enable = true; }; - nodes.machine = - { lib, ... }: - { - imports = [ - ./common/x11.nix - ./common/user-account.nix - ]; - test-support.displayManager.auto.user = "alice"; - services.displayManager.defaultSession = lib.mkForce "none+nimdow"; - services.xserver.windowManager.nimdow.enable = true; - }; + testScript = + { ... }: + '' + with subtest("ensure x starts"): + machine.wait_for_x() + machine.wait_for_file("/home/alice/.Xauthority") + machine.succeed("xauth merge ~alice/.Xauthority") - testScript = - { ... }: - '' - with subtest("ensure x starts"): - machine.wait_for_x() - machine.wait_for_file("/home/alice/.Xauthority") - machine.succeed("xauth merge ~alice/.Xauthority") - - with subtest("ensure we can open a new terminal"): - machine.send_key("meta_l-ret") - machine.wait_for_window(r"alice.*?machine") - machine.screenshot("terminal") - ''; - } -) + with subtest("ensure we can open a new terminal"): + machine.send_key("meta_l-ret") + machine.wait_for_window(r"alice.*?machine") + machine.screenshot("terminal") + ''; +} diff --git a/nixos/tests/nitter.nix b/nixos/tests/nitter.nix index 3b063c9f19ae..4678652927e5 100644 --- a/nixos/tests/nitter.nix +++ b/nixos/tests/nitter.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - # In a real deployment this should naturally not common from the nix store - # and be seeded via agenix or as a non-nix managed file. - # - # These credentials are from the nitter wiki and are expired. We must provide - # credentials in the correct format, otherwise nitter fails to start. They - # must not be valid, as unauthorized errors are handled gracefully. - sessionsFile = pkgs.writeText "sessions.jsonl" '' - {"oauth_token":"1719213587296620928-BsXY2RIJEw7fjxoNwbBemgjJhueK0m","oauth_token_secret":"N0WB0xhL4ng6WTN44aZO82SUJjz7ssI3hHez2CUhTiYqy"} - ''; - in - { - name = "nitter"; - meta.maintainers = with pkgs.lib.maintainers; [ erdnaxe ]; +let + # In a real deployment this should naturally not common from the nix store + # and be seeded via agenix or as a non-nix managed file. + # + # These credentials are from the nitter wiki and are expired. We must provide + # credentials in the correct format, otherwise nitter fails to start. They + # must not be valid, as unauthorized errors are handled gracefully. + sessionsFile = pkgs.writeText "sessions.jsonl" '' + {"oauth_token":"1719213587296620928-BsXY2RIJEw7fjxoNwbBemgjJhueK0m","oauth_token_secret":"N0WB0xhL4ng6WTN44aZO82SUJjz7ssI3hHez2CUhTiYqy"} + ''; +in +{ + name = "nitter"; + meta.maintainers = with pkgs.lib.maintainers; [ erdnaxe ]; - nodes.machine = { - services.nitter = { - enable = true; - # Test CAP_NET_BIND_SERVICE - server.port = 80; - # Provide dummy guest accounts - inherit sessionsFile; - }; + nodes.machine = { + services.nitter = { + enable = true; + # Test CAP_NET_BIND_SERVICE + server.port = 80; + # Provide dummy guest accounts + inherit sessionsFile; }; + }; - testScript = '' - machine.wait_for_unit("nitter.service") - machine.wait_for_open_port(80) - machine.succeed("curl --fail http://localhost:80/") - ''; - } -) + testScript = '' + machine.wait_for_unit("nitter.service") + machine.wait_for_open_port(80) + machine.succeed("curl --fail http://localhost:80/") + ''; +} diff --git a/nixos/tests/nix-config.nix b/nixos/tests/nix-config.nix index 18fa3ae347aa..b39baf3f6b50 100644 --- a/nixos/tests/nix-config.nix +++ b/nixos/tests/nix-config.nix @@ -1,22 +1,20 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "nix-config"; - nodes.machine = - { pkgs, ... }: - { - nix.settings = { - nix-path = [ "nonextra=/etc/value.nix" ]; - extra-nix-path = [ "extra=/etc/value.nix" ]; - }; - environment.etc."value.nix".text = "42"; +{ pkgs, ... }: +{ + name = "nix-config"; + nodes.machine = + { pkgs, ... }: + { + nix.settings = { + nix-path = [ "nonextra=/etc/value.nix" ]; + extra-nix-path = [ "extra=/etc/value.nix" ]; }; - testScript = '' - start_all() - machine.wait_for_unit("nix-daemon.socket") - # regression test for the workaround for https://github.com/NixOS/nix/issues/9487 - print(machine.succeed("nix-instantiate --find-file extra")) - print(machine.succeed("nix-instantiate --find-file nonextra")) - ''; - } -) + environment.etc."value.nix".text = "42"; + }; + testScript = '' + start_all() + machine.wait_for_unit("nix-daemon.socket") + # regression test for the workaround for https://github.com/NixOS/nix/issues/9487 + print(machine.succeed("nix-instantiate --find-file extra")) + print(machine.succeed("nix-instantiate --find-file nonextra")) + ''; +} diff --git a/nixos/tests/nix-serve-ssh.nix b/nixos/tests/nix-serve-ssh.nix index 554ea5a69f20..e5b3adc37b3a 100644 --- a/nixos/tests/nix-serve-ssh.nix +++ b/nixos/tests/nix-serve-ssh.nix @@ -1,50 +1,48 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - inherit (import ./ssh-keys.nix pkgs) - snakeOilPrivateKey - snakeOilPublicKey - ; - ssh-config = builtins.toFile "ssh.conf" '' - UserKnownHostsFile=/dev/null - StrictHostKeyChecking=no - ''; - in - { - name = "nix-ssh-serve"; - meta.maintainers = [ lib.maintainers.shlevy ]; - nodes = { - server.nix.sshServe = { - enable = true; - keys = [ snakeOilPublicKey ]; - protocol = "ssh-ng"; - }; - server.nix.package = pkgs.nix; - client.nix.package = pkgs.nix; +{ pkgs, lib, ... }: +let + inherit (import ./ssh-keys.nix pkgs) + snakeOilPrivateKey + snakeOilPublicKey + ; + ssh-config = builtins.toFile "ssh.conf" '' + UserKnownHostsFile=/dev/null + StrictHostKeyChecking=no + ''; +in +{ + name = "nix-ssh-serve"; + meta.maintainers = [ lib.maintainers.shlevy ]; + nodes = { + server.nix.sshServe = { + enable = true; + keys = [ snakeOilPublicKey ]; + protocol = "ssh-ng"; }; - testScript = '' - start_all() + server.nix.package = pkgs.nix; + client.nix.package = pkgs.nix; + }; + testScript = '' + start_all() - client.succeed("mkdir -m 700 /root/.ssh") - client.succeed( - "cat ${ssh-config} > /root/.ssh/config" - ) - client.succeed( - "cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa" - ) - client.succeed("chmod 600 /root/.ssh/id_ecdsa") + client.succeed("mkdir -m 700 /root/.ssh") + client.succeed( + "cat ${ssh-config} > /root/.ssh/config" + ) + client.succeed( + "cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa" + ) + client.succeed("chmod 600 /root/.ssh/id_ecdsa") - client.succeed("nix-store --add /etc/machine-id > mach-id-path") + client.succeed("nix-store --add /etc/machine-id > mach-id-path") - server.wait_for_unit("sshd") + server.wait_for_unit("sshd") - client.fail("diff /root/other-store$(cat mach-id-path) /etc/machine-id") - # Currently due to shared store this is a noop :( - client.succeed("nix copy --experimental-features 'nix-command' --to ssh-ng://nix-ssh@server $(cat mach-id-path)") - client.succeed( - "nix-store --realise $(cat mach-id-path) --store /root/other-store --substituters ssh-ng://nix-ssh@server" - ) - client.succeed("diff /root/other-store$(cat mach-id-path) /etc/machine-id") - ''; - } -) + client.fail("diff /root/other-store$(cat mach-id-path) /etc/machine-id") + # Currently due to shared store this is a noop :( + client.succeed("nix copy --experimental-features 'nix-command' --to ssh-ng://nix-ssh@server $(cat mach-id-path)") + client.succeed( + "nix-store --realise $(cat mach-id-path) --store /root/other-store --substituters ssh-ng://nix-ssh@server" + ) + client.succeed("diff /root/other-store$(cat mach-id-path) /etc/machine-id") + ''; +} diff --git a/nixos/tests/nixos-generate-config.nix b/nixos/tests/nixos-generate-config.nix index d9b2f502365e..6579b3646e21 100644 --- a/nixos/tests/nixos-generate-config.nix +++ b/nixos/tests/nixos-generate-config.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "nixos-generate-config"; - meta.maintainers = with lib.maintainers; [ basvandijk ]; - nodes.machine = { - system.nixos-generate-config.configuration = '' - # OVERRIDDEN - { config, pkgs, ... }: { - imports = [ ./hardware-configuration.nix ]; - $bootLoaderConfig - $desktopConfiguration - } - ''; - - system.nixos-generate-config.desktopConfiguration = [ - '' - # DESKTOP - services.xserver.displayManager.gdm.enable = true; - services.xserver.desktopManager.gnome.enable = true; - '' - ]; - }; - testScript = '' - start_all() - machine.wait_for_unit("multi-user.target") - machine.succeed("nixos-generate-config") - - machine.succeed("nix-instantiate --parse /etc/nixos/configuration.nix /etc/nixos/hardware-configuration.nix") - - # Test if the configuration really is overridden - machine.succeed("grep 'OVERRIDDEN' /etc/nixos/configuration.nix") - - # Test if desktop configuration really is overridden - machine.succeed("grep 'DESKTOP' /etc/nixos/configuration.nix") - - # Test of if the Perl variable $bootLoaderConfig is spliced correctly: - machine.succeed( - "grep 'boot\\.loader\\.grub\\.enable = true;' /etc/nixos/configuration.nix" - ) - - # Test if the Perl variable $desktopConfiguration is spliced correctly - machine.succeed( - "grep 'services\\.xserver\\.desktopManager\\.gnome\\.enable = true;' /etc/nixos/configuration.nix" - ) - - machine.succeed("rm -rf /etc/nixos") - machine.succeed("nixos-generate-config --flake") - machine.succeed("nix-instantiate --parse /etc/nixos/flake.nix /etc/nixos/configuration.nix /etc/nixos/hardware-configuration.nix") +{ lib, ... }: +{ + name = "nixos-generate-config"; + meta.maintainers = with lib.maintainers; [ basvandijk ]; + nodes.machine = { + system.nixos-generate-config.configuration = '' + # OVERRIDDEN + { config, pkgs, ... }: { + imports = [ ./hardware-configuration.nix ]; + $bootLoaderConfig + $desktopConfiguration + } ''; - } -) + + system.nixos-generate-config.desktopConfiguration = [ + '' + # DESKTOP + services.xserver.displayManager.gdm.enable = true; + services.xserver.desktopManager.gnome.enable = true; + '' + ]; + }; + testScript = '' + start_all() + machine.wait_for_unit("multi-user.target") + machine.succeed("nixos-generate-config") + + machine.succeed("nix-instantiate --parse /etc/nixos/configuration.nix /etc/nixos/hardware-configuration.nix") + + # Test if the configuration really is overridden + machine.succeed("grep 'OVERRIDDEN' /etc/nixos/configuration.nix") + + # Test if desktop configuration really is overridden + machine.succeed("grep 'DESKTOP' /etc/nixos/configuration.nix") + + # Test of if the Perl variable $bootLoaderConfig is spliced correctly: + machine.succeed( + "grep 'boot\\.loader\\.grub\\.enable = true;' /etc/nixos/configuration.nix" + ) + + # Test if the Perl variable $desktopConfiguration is spliced correctly + machine.succeed( + "grep 'services\\.xserver\\.desktopManager\\.gnome\\.enable = true;' /etc/nixos/configuration.nix" + ) + + machine.succeed("rm -rf /etc/nixos") + machine.succeed("nixos-generate-config --flake") + machine.succeed("nix-instantiate --parse /etc/nixos/flake.nix /etc/nixos/configuration.nix /etc/nixos/hardware-configuration.nix") + ''; +} diff --git a/nixos/tests/nixos-test-driver/extra-python-packages.nix b/nixos/tests/nixos-test-driver/extra-python-packages.nix index 6c187e605591..700299c6a118 100644 --- a/nixos/tests/nixos-test-driver/extra-python-packages.nix +++ b/nixos/tests/nixos-test-driver/extra-python-packages.nix @@ -1,15 +1,13 @@ -import ../make-test-python.nix ( - { ... }: - { - name = "extra-python-packages"; +{ ... }: +{ + name = "extra-python-packages"; - extraPythonPackages = p: [ p.numpy ]; + extraPythonPackages = p: [ p.numpy ]; - nodes = { }; + nodes = { }; - testScript = '' - import numpy as np - assert str(np.zeros(4) == "array([0., 0., 0., 0.])") - ''; - } -) + testScript = '' + import numpy as np + assert str(np.zeros(4) == "array([0., 0., 0., 0.])") + ''; +} diff --git a/nixos/tests/nixseparatedebuginfod.nix b/nixos/tests/nixseparatedebuginfod.nix index bb9f8c9f43be..39fd6356340d 100644 --- a/nixos/tests/nixseparatedebuginfod.nix +++ b/nixos/tests/nixseparatedebuginfod.nix @@ -1,84 +1,82 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - secret-key = "key-name:/COlMSRbehSh6YSruJWjL+R0JXQUKuPEn96fIb+pLokEJUjcK/2Gv8Ai96D7JGay5gDeUTx5wdpPgNvum9YtwA=="; - public-key = "key-name:BCVI3Cv9hr/AIveg+yRmsuYA3lE8ecHaT4Db7pvWLcA="; - in - { - name = "nixseparatedebuginfod"; - # A binary cache with debug info and source for nix - nodes.cache = - { pkgs, ... }: - { - services.nix-serve = { - enable = true; - secretKeyFile = builtins.toFile "secret-key" secret-key; - openFirewall = true; - }; - system.extraDependencies = [ - pkgs.nix.debug - pkgs.nix.src - pkgs.sl - ]; +{ pkgs, lib, ... }: +let + secret-key = "key-name:/COlMSRbehSh6YSruJWjL+R0JXQUKuPEn96fIb+pLokEJUjcK/2Gv8Ai96D7JGay5gDeUTx5wdpPgNvum9YtwA=="; + public-key = "key-name:BCVI3Cv9hr/AIveg+yRmsuYA3lE8ecHaT4Db7pvWLcA="; +in +{ + name = "nixseparatedebuginfod"; + # A binary cache with debug info and source for nix + nodes.cache = + { pkgs, ... }: + { + services.nix-serve = { + enable = true; + secretKeyFile = builtins.toFile "secret-key" secret-key; + openFirewall = true; }; - # the machine where we need the debuginfo - nodes.machine = { - imports = [ - ../modules/installer/cd-dvd/channel.nix - ]; - services.nixseparatedebuginfod.enable = true; - nix.settings = { - substituters = lib.mkForce [ "http://cache:5000" ]; - trusted-public-keys = [ public-key ]; - }; - environment.systemPackages = [ - pkgs.valgrind - pkgs.gdb - (pkgs.writeShellScriptBin "wait_for_indexation" '' - set -x - while debuginfod-find debuginfo /run/current-system/sw/bin/nix |& grep 'File too large'; do - sleep 1; - done - '') + system.extraDependencies = [ + pkgs.nix.debug + pkgs.nix.src + pkgs.sl ]; }; - testScript = '' - start_all() - cache.wait_for_unit("nix-serve.service") - cache.wait_for_open_port(5000) - machine.wait_for_unit("nixseparatedebuginfod.service") - machine.wait_for_open_port(1949) + # the machine where we need the debuginfo + nodes.machine = { + imports = [ + ../modules/installer/cd-dvd/channel.nix + ]; + services.nixseparatedebuginfod.enable = true; + nix.settings = { + substituters = lib.mkForce [ "http://cache:5000" ]; + trusted-public-keys = [ public-key ]; + }; + environment.systemPackages = [ + pkgs.valgrind + pkgs.gdb + (pkgs.writeShellScriptBin "wait_for_indexation" '' + set -x + while debuginfod-find debuginfo /run/current-system/sw/bin/nix |& grep 'File too large'; do + sleep 1; + done + '') + ]; + }; + testScript = '' + start_all() + cache.wait_for_unit("nix-serve.service") + cache.wait_for_open_port(5000) + machine.wait_for_unit("nixseparatedebuginfod.service") + machine.wait_for_open_port(1949) - with subtest("show the config to debug the test"): - machine.succeed("nix --extra-experimental-features nix-command show-config |& logger") - machine.succeed("cat /etc/nix/nix.conf |& logger") - with subtest("check that the binary cache works"): - machine.succeed("nix-store -r ${pkgs.sl}") + with subtest("show the config to debug the test"): + machine.succeed("nix --extra-experimental-features nix-command show-config |& logger") + machine.succeed("cat /etc/nix/nix.conf |& logger") + with subtest("check that the binary cache works"): + machine.succeed("nix-store -r ${pkgs.sl}") - # nixseparatedebuginfod needs .drv to associate executable -> source - # on regular systems this would be provided by nixos-rebuild - machine.succeed("nix-instantiate '' -A nix") + # nixseparatedebuginfod needs .drv to associate executable -> source + # on regular systems this would be provided by nixos-rebuild + machine.succeed("nix-instantiate '' -A nix") - machine.succeed("timeout 600 wait_for_indexation") + machine.succeed("timeout 600 wait_for_indexation") - # test debuginfod-find - machine.succeed("debuginfod-find debuginfo /run/current-system/sw/bin/nix") + # test debuginfod-find + machine.succeed("debuginfod-find debuginfo /run/current-system/sw/bin/nix") - # test that gdb can fetch source - out = machine.succeed("gdb /run/current-system/sw/bin/nix --batch -x ${builtins.toFile "commands" '' - start - l - ''}") - print(out) - assert 'int main(' in out + # test that gdb can fetch source + out = machine.succeed("gdb /run/current-system/sw/bin/nix --batch -x ${builtins.toFile "commands" '' + start + l + ''}") + print(out) + assert 'int main(' in out - # test that valgrind can display location information - # this relies on the fact that valgrind complains about nix - # libgc helps in this regard, and we also ask valgrind to show leak kinds - # which are usually false positives. - out = machine.succeed("valgrind --leak-check=full --show-leak-kinds=all nix-env --version 2>&1") - print(out) - assert 'main.cc' in out - ''; - } -) + # test that valgrind can display location information + # this relies on the fact that valgrind complains about nix + # libgc helps in this regard, and we also ask valgrind to show leak kinds + # which are usually false positives. + out = machine.succeed("valgrind --leak-check=full --show-leak-kinds=all nix-env --version 2>&1") + print(out) + assert 'main.cc' in out + ''; +} diff --git a/nixos/tests/noto-fonts-cjk-qt-default-weight.nix b/nixos/tests/noto-fonts-cjk-qt-default-weight.nix index 44d9cf8083c1..636dfa3cfb72 100644 --- a/nixos/tests/noto-fonts-cjk-qt-default-weight.nix +++ b/nixos/tests/noto-fonts-cjk-qt-default-weight.nix @@ -1,37 +1,35 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "noto-fonts-cjk-qt"; - meta.maintainers = with lib.maintainers; [ oxalica ]; +{ pkgs, lib, ... }: +{ + name = "noto-fonts-cjk-qt"; + meta.maintainers = with lib.maintainers; [ oxalica ]; - nodes.machine = { - imports = [ ./common/x11.nix ]; - fonts = { - enableDefaultPackages = false; - fonts = [ pkgs.noto-fonts-cjk-sans ]; - }; + nodes.machine = { + imports = [ ./common/x11.nix ]; + fonts = { + enableDefaultPackages = false; + fonts = [ pkgs.noto-fonts-cjk-sans ]; }; + }; - testScript = - let - script = - pkgs.writers.writePython3 "qt-default-weight" - { - libraries = [ pkgs.python3Packages.pyqt6 ]; - } - '' - from PyQt6.QtWidgets import QApplication - from PyQt6.QtGui import QFont, QRawFont + testScript = + let + script = + pkgs.writers.writePython3 "qt-default-weight" + { + libraries = [ pkgs.python3Packages.pyqt6 ]; + } + '' + from PyQt6.QtWidgets import QApplication + from PyQt6.QtGui import QFont, QRawFont - app = QApplication([]) - f = QRawFont.fromFont(QFont("Noto Sans CJK SC", 20)) + app = QApplication([]) + f = QRawFont.fromFont(QFont("Noto Sans CJK SC", 20)) - assert f.styleName() == "Regular", f.styleName() - ''; - in - '' - machine.wait_for_x() - machine.succeed("${script}") - ''; - } -) + assert f.styleName() == "Regular", f.styleName() + ''; + in + '' + machine.wait_for_x() + machine.succeed("${script}") + ''; +} diff --git a/nixos/tests/npmrc.nix b/nixos/tests/npmrc.nix index 0377124433f9..d4209d0f85cd 100644 --- a/nixos/tests/npmrc.nix +++ b/nixos/tests/npmrc.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { ... }: - let - machineName = "machine"; - settingName = "prefix"; - settingValue = "/some/path"; - in - { - name = "npmrc"; +{ ... }: +let + machineName = "machine"; + settingName = "prefix"; + settingValue = "/some/path"; +in +{ + name = "npmrc"; - nodes."${machineName}".programs.npm = { - enable = true; - npmrc = '' - ${settingName} = ${settingValue} - ''; - }; - - testScript = '' - ${machineName}.start() - - assert ${machineName}.succeed("npm config get ${settingName}") == "${settingValue}\n" + nodes."${machineName}".programs.npm = { + enable = true; + npmrc = '' + ${settingName} = ${settingValue} ''; - } -) + }; + + testScript = '' + ${machineName}.start() + + assert ${machineName}.succeed("npm config get ${settingName}") == "${settingValue}\n" + ''; +} diff --git a/nixos/tests/nscd.nix b/nixos/tests/nscd.nix index 75e478725d49..475660e91090 100644 --- a/nixos/tests/nscd.nix +++ b/nixos/tests/nscd.nix @@ -1,151 +1,149 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - # build a getent that itself doesn't see anything in /etc/hosts and - # /etc/nsswitch.conf, by using libredirect to steer its own requests to - # /dev/null. - # This means is /has/ to go via nscd to actually resolve any of the - # additionally configured hosts. - getent' = pkgs.writeScript "getent-without-etc-hosts" '' - export NIX_REDIRECTS=/etc/hosts=/dev/null:/etc/nsswitch.conf=/dev/null - export LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so - exec getent $@ - ''; - in - { - name = "nscd"; +{ pkgs, ... }: +let + # build a getent that itself doesn't see anything in /etc/hosts and + # /etc/nsswitch.conf, by using libredirect to steer its own requests to + # /dev/null. + # This means is /has/ to go via nscd to actually resolve any of the + # additionally configured hosts. + getent' = pkgs.writeScript "getent-without-etc-hosts" '' + export NIX_REDIRECTS=/etc/hosts=/dev/null:/etc/nsswitch.conf=/dev/null + export LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so + exec getent $@ + ''; +in +{ + name = "nscd"; - nodes.machine = - { pkgs, ... }: - { - imports = [ common/user-account.nix ]; - networking.extraHosts = '' - 2001:db8::1 somehost.test - 192.0.2.1 somehost.test - ''; + nodes.machine = + { pkgs, ... }: + { + imports = [ common/user-account.nix ]; + networking.extraHosts = '' + 2001:db8::1 somehost.test + 192.0.2.1 somehost.test + ''; - systemd.services.sockdump = { - wantedBy = [ "multi-user.target" ]; - path = [ - # necessary for bcc to unpack kernel headers and invoke modprobe - pkgs.gnutar - pkgs.xz.bin - pkgs.kmod - ]; - environment.PYTHONUNBUFFERED = "1"; + systemd.services.sockdump = { + wantedBy = [ "multi-user.target" ]; + path = [ + # necessary for bcc to unpack kernel headers and invoke modprobe + pkgs.gnutar + pkgs.xz.bin + pkgs.kmod + ]; + environment.PYTHONUNBUFFERED = "1"; - serviceConfig = { - ExecStart = "${pkgs.sockdump}/bin/sockdump /var/run/nscd/socket"; - Restart = "on-failure"; - RestartSec = "1"; - Type = "simple"; - }; - }; - - specialisation = { - withGlibcNscd.configuration = - { ... }: - { - services.nscd.enableNsncd = false; - }; - withUnscd.configuration = - { ... }: - { - services.nscd.enableNsncd = false; - services.nscd.package = pkgs.unscd; - }; + serviceConfig = { + ExecStart = "${pkgs.sockdump}/bin/sockdump /var/run/nscd/socket"; + Restart = "on-failure"; + RestartSec = "1"; + Type = "simple"; }; }; - testScript = - { nodes, ... }: - let - specialisations = "${nodes.machine.system.build.toplevel}/specialisation"; - in - '' - # Regression test for https://github.com/NixOS/nixpkgs/issues/50273 - def test_dynamic_user(): - with subtest("DynamicUser actually allocates a user"): - assert "iamatest" in machine.succeed( - "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami" - ) + specialisation = { + withGlibcNscd.configuration = + { ... }: + { + services.nscd.enableNsncd = false; + }; + withUnscd.configuration = + { ... }: + { + services.nscd.enableNsncd = false; + services.nscd.package = pkgs.unscd; + }; + }; + }; - # Test resolution of somehost.test with getent', to make sure we go via - # nscd protocol - def test_host_lookups(): - with subtest("host lookups via nscd protocol"): - # ahosts - output = machine.succeed("${getent'} ahosts somehost.test") - assert "192.0.2.1" in output - assert "2001:db8::1" in output + testScript = + { nodes, ... }: + let + specialisations = "${nodes.machine.system.build.toplevel}/specialisation"; + in + '' + # Regression test for https://github.com/NixOS/nixpkgs/issues/50273 + def test_dynamic_user(): + with subtest("DynamicUser actually allocates a user"): + assert "iamatest" in machine.succeed( + "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami" + ) - # ahostsv4 - output = machine.succeed("${getent'} ahostsv4 somehost.test") - assert "192.0.2.1" in output - assert "2001:db8::1" not in output + # Test resolution of somehost.test with getent', to make sure we go via + # nscd protocol + def test_host_lookups(): + with subtest("host lookups via nscd protocol"): + # ahosts + output = machine.succeed("${getent'} ahosts somehost.test") + assert "192.0.2.1" in output + assert "2001:db8::1" in output - # ahostsv6 - output = machine.succeed("${getent'} ahostsv6 somehost.test") - assert "192.0.2.1" not in output - assert "2001:db8::1" in output + # ahostsv4 + output = machine.succeed("${getent'} ahostsv4 somehost.test") + assert "192.0.2.1" in output + assert "2001:db8::1" not in output - # reverse lookups (hosts) - assert "somehost.test" in machine.succeed("${getent'} hosts 2001:db8::1") - assert "somehost.test" in machine.succeed("${getent'} hosts 192.0.2.1") + # ahostsv6 + output = machine.succeed("${getent'} ahostsv6 somehost.test") + assert "192.0.2.1" not in output + assert "2001:db8::1" in output + + # reverse lookups (hosts) + assert "somehost.test" in machine.succeed("${getent'} hosts 2001:db8::1") + assert "somehost.test" in machine.succeed("${getent'} hosts 192.0.2.1") - # Test host resolution via nss modules works - # We rely on nss-myhostname in this case, which resolves *.localhost and - # _gateway. - # We don't need to use getent' here, as non-glibc nss modules can only be - # discovered via nscd. - def test_nss_myhostname(): - with subtest("nss-myhostname provides hostnames (ahosts)"): - # ahosts - output = machine.succeed("getent ahosts foobar.localhost") - assert "::1" in output - assert "127.0.0.1" in output + # Test host resolution via nss modules works + # We rely on nss-myhostname in this case, which resolves *.localhost and + # _gateway. + # We don't need to use getent' here, as non-glibc nss modules can only be + # discovered via nscd. + def test_nss_myhostname(): + with subtest("nss-myhostname provides hostnames (ahosts)"): + # ahosts + output = machine.succeed("getent ahosts foobar.localhost") + assert "::1" in output + assert "127.0.0.1" in output - # ahostsv4 - output = machine.succeed("getent ahostsv4 foobar.localhost") - assert "::1" not in output - assert "127.0.0.1" in output + # ahostsv4 + output = machine.succeed("getent ahostsv4 foobar.localhost") + assert "::1" not in output + assert "127.0.0.1" in output - # ahostsv6 - output = machine.succeed("getent ahostsv6 foobar.localhost") - assert "::1" in output - assert "127.0.0.1" not in output + # ahostsv6 + output = machine.succeed("getent ahostsv6 foobar.localhost") + assert "::1" in output + assert "127.0.0.1" not in output - start_all() - machine.wait_for_unit("default.target") + start_all() + machine.wait_for_unit("default.target") - # give sockdump some time to finish attaching. - machine.sleep(5) + # give sockdump some time to finish attaching. + machine.sleep(5) - # Test all tests with glibc-nscd. - test_dynamic_user() - test_host_lookups() - test_nss_myhostname() + # Test all tests with glibc-nscd. + test_dynamic_user() + test_host_lookups() + test_nss_myhostname() - with subtest("glibc-nscd"): - machine.succeed('${specialisations}/withGlibcNscd/bin/switch-to-configuration test') - machine.wait_for_unit("default.target") + with subtest("glibc-nscd"): + machine.succeed('${specialisations}/withGlibcNscd/bin/switch-to-configuration test') + machine.wait_for_unit("default.target") - test_dynamic_user() - test_host_lookups() - test_nss_myhostname() + test_dynamic_user() + test_host_lookups() + test_nss_myhostname() - with subtest("unscd"): - machine.succeed('${specialisations}/withUnscd/bin/switch-to-configuration test') - machine.wait_for_unit("default.target") + with subtest("unscd"): + machine.succeed('${specialisations}/withUnscd/bin/switch-to-configuration test') + machine.wait_for_unit("default.target") - # known to fail, unscd doesn't load external NSS modules - # test_dynamic_user() + # known to fail, unscd doesn't load external NSS modules + # test_dynamic_user() - test_host_lookups() + test_host_lookups() - # known to fail, unscd doesn't load external NSS modules - # test_nss_myhostname() - ''; - } -) + # known to fail, unscd doesn't load external NSS modules + # test_nss_myhostname() + ''; +} diff --git a/nixos/tests/nsd.nix b/nixos/tests/nsd.nix index b77e28d80504..747d691a87d6 100644 --- a/nixos/tests/nsd.nix +++ b/nixos/tests/nsd.nix @@ -8,129 +8,127 @@ let environment.systemPackages = [ pkgs.bind ]; }; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "nsd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ aszlig ]; - }; +{ pkgs, ... }: +{ + name = "nsd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ aszlig ]; + }; - nodes = { - clientv4 = - { lib, nodes, ... }: - { - imports = [ common ]; - networking.nameservers = lib.mkForce [ - (lib.head nodes.server.config.networking.interfaces.eth1.ipv4.addresses).address - ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.2"; - prefixLength = 24; - } - ]; - }; + nodes = { + clientv4 = + { lib, nodes, ... }: + { + imports = [ common ]; + networking.nameservers = lib.mkForce [ + (lib.head nodes.server.config.networking.interfaces.eth1.ipv4.addresses).address + ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.2"; + prefixLength = 24; + } + ]; + }; - clientv6 = - { lib, nodes, ... }: - { - imports = [ common ]; - networking.nameservers = lib.mkForce [ - (lib.head nodes.server.config.networking.interfaces.eth1.ipv6.addresses).address - ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "dead:beef::2"; - prefixLength = 24; - } - ]; - }; + clientv6 = + { lib, nodes, ... }: + { + imports = [ common ]; + networking.nameservers = lib.mkForce [ + (lib.head nodes.server.config.networking.interfaces.eth1.ipv6.addresses).address + ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "dead:beef::2"; + prefixLength = 24; + } + ]; + }; - server = - { lib, ... }: - { - imports = [ common ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.1"; - prefixLength = 24; - } - ]; - networking.interfaces.eth1.ipv6.addresses = [ - { - address = "dead:beef::1"; - prefixLength = 64; - } - ]; - services.nsd.enable = true; - services.nsd.rootServer = true; - services.nsd.interfaces = lib.mkForce [ ]; - services.nsd.keys."tsig.example.com." = { - algorithm = "hmac-sha256"; - keyFile = pkgs.writeTextFile { - name = "tsig.example.com."; - text = "aR3FJA92+bxRSyosadsJ8Aeeav5TngQW/H/EF9veXbc="; - }; + server = + { lib, ... }: + { + imports = [ common ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.1"; + prefixLength = 24; + } + ]; + networking.interfaces.eth1.ipv6.addresses = [ + { + address = "dead:beef::1"; + prefixLength = 64; + } + ]; + services.nsd.enable = true; + services.nsd.rootServer = true; + services.nsd.interfaces = lib.mkForce [ ]; + services.nsd.keys."tsig.example.com." = { + algorithm = "hmac-sha256"; + keyFile = pkgs.writeTextFile { + name = "tsig.example.com."; + text = "aR3FJA92+bxRSyosadsJ8Aeeav5TngQW/H/EF9veXbc="; }; - services.nsd.zones."example.com.".data = '' - @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600 - ipv4 A 1.2.3.4 - ipv6 AAAA abcd::eeff - deleg NS ns.example.com - ns A 192.168.0.1 - ns AAAA dead:beef::1 - ''; - services.nsd.zones."example.com.".provideXFR = [ "0.0.0.0 tsig.example.com." ]; - services.nsd.zones."deleg.example.com.".data = '' - @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600 - @ A 9.8.7.6 - @ AAAA fedc::bbaa - ''; - services.nsd.zones.".".data = '' - @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600 - root A 1.8.7.4 - root AAAA acbd::4 - ''; }; - }; + services.nsd.zones."example.com.".data = '' + @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600 + ipv4 A 1.2.3.4 + ipv6 AAAA abcd::eeff + deleg NS ns.example.com + ns A 192.168.0.1 + ns AAAA dead:beef::1 + ''; + services.nsd.zones."example.com.".provideXFR = [ "0.0.0.0 tsig.example.com." ]; + services.nsd.zones."deleg.example.com.".data = '' + @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600 + @ A 9.8.7.6 + @ AAAA fedc::bbaa + ''; + services.nsd.zones.".".data = '' + @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600 + root A 1.8.7.4 + root AAAA acbd::4 + ''; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - clientv4.wait_for_unit("network.target") - clientv6.wait_for_unit("network.target") - server.wait_for_unit("nsd.service") + clientv4.wait_for_unit("network.target") + clientv6.wait_for_unit("network.target") + server.wait_for_unit("nsd.service") - with subtest("server tsig.example.com."): - expected_tsig = " secret: \"aR3FJA92+bxRSyosadsJ8Aeeav5TngQW/H/EF9veXbc=\"\n" - tsig=server.succeed("cat /var/lib/nsd/private/tsig.example.com.") - assert expected_tsig == tsig, f"Expected /var/lib/nsd/private/tsig.example.com. to contain '{expected_tsig}', but found '{tsig}'" + with subtest("server tsig.example.com."): + expected_tsig = " secret: \"aR3FJA92+bxRSyosadsJ8Aeeav5TngQW/H/EF9veXbc=\"\n" + tsig=server.succeed("cat /var/lib/nsd/private/tsig.example.com.") + assert expected_tsig == tsig, f"Expected /var/lib/nsd/private/tsig.example.com. to contain '{expected_tsig}', but found '{tsig}'" - def assert_host(type, rr, query, expected): - self = clientv4 if type == 4 else clientv6 - out = self.succeed(f"host -{type} -t {rr} {query}").rstrip() - self.log(f"output: {out}") - import re - assert re.search( - expected, out - ), f"DNS IPv{type} query on {query} gave '{out}' instead of '{expected}'" + def assert_host(type, rr, query, expected): + self = clientv4 if type == 4 else clientv6 + out = self.succeed(f"host -{type} -t {rr} {query}").rstrip() + self.log(f"output: {out}") + import re + assert re.search( + expected, out + ), f"DNS IPv{type} query on {query} gave '{out}' instead of '{expected}'" - for ipv in 4, 6: - with subtest(f"IPv{ipv}"): - assert_host(ipv, "a", "example.com", "has no [^ ]+ record") - assert_host(ipv, "aaaa", "example.com", "has no [^ ]+ record") + for ipv in 4, 6: + with subtest(f"IPv{ipv}"): + assert_host(ipv, "a", "example.com", "has no [^ ]+ record") + assert_host(ipv, "aaaa", "example.com", "has no [^ ]+ record") - assert_host(ipv, "soa", "example.com", "SOA.*?noc\.example\.com") - assert_host(ipv, "a", "ipv4.example.com", "address 1.2.3.4$") - assert_host(ipv, "aaaa", "ipv6.example.com", "address abcd::eeff$") + assert_host(ipv, "soa", "example.com", "SOA.*?noc\.example\.com") + assert_host(ipv, "a", "ipv4.example.com", "address 1.2.3.4$") + assert_host(ipv, "aaaa", "ipv6.example.com", "address abcd::eeff$") - assert_host(ipv, "a", "deleg.example.com", "address 9.8.7.6$") - assert_host(ipv, "aaaa", "deleg.example.com", "address fedc::bbaa$") + assert_host(ipv, "a", "deleg.example.com", "address 9.8.7.6$") + assert_host(ipv, "aaaa", "deleg.example.com", "address fedc::bbaa$") - assert_host(ipv, "a", "root", "address 1.8.7.4$") - assert_host(ipv, "aaaa", "root", "address acbd::4$") - ''; - } -) + assert_host(ipv, "a", "root", "address 1.8.7.4$") + assert_host(ipv, "aaaa", "root", "address acbd::4$") + ''; +} diff --git a/nixos/tests/ntpd-rs.nix b/nixos/tests/ntpd-rs.nix index 9459a9f4ac51..050d0a20f57d 100644 --- a/nixos/tests/ntpd-rs.nix +++ b/nixos/tests/ntpd-rs.nix @@ -1,77 +1,75 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "ntpd-rs"; +{ lib, ... }: +{ + name = "ntpd-rs"; - meta = { - maintainers = with lib.maintainers; [ fpletz ]; - }; + meta = { + maintainers = with lib.maintainers; [ fpletz ]; + }; - nodes = { - client = { - services.ntpd-rs = { - enable = true; - metrics.enable = false; - useNetworkingTimeServers = false; - settings = { - source = [ - { - mode = "server"; - address = "server"; - } - ]; - synchronization = { - minimum-agreeing-sources = 1; - }; - }; - }; - }; - server = { - networking.firewall = { - allowedTCPPorts = [ - 9975 + nodes = { + client = { + services.ntpd-rs = { + enable = true; + metrics.enable = false; + useNetworkingTimeServers = false; + settings = { + source = [ + { + mode = "server"; + address = "server"; + } ]; - allowedUDPPorts = [ - 123 - ]; - }; - - services.ntpd-rs = { - enable = true; - metrics.enable = true; - settings = { - observability = { - metrics-exporter-listen = "[::]:9975"; - }; - server = [ - { listen = "[::]:123"; } - ]; + synchronization = { + minimum-agreeing-sources = 1; }; }; }; }; + server = { + networking.firewall = { + allowedTCPPorts = [ + 9975 + ]; + allowedUDPPorts = [ + 123 + ]; + }; - testScript = - { nodes, ... }: - '' - start_all() + services.ntpd-rs = { + enable = true; + metrics.enable = true; + settings = { + observability = { + metrics-exporter-listen = "[::]:9975"; + }; + server = [ + { listen = "[::]:123"; } + ]; + }; + }; + }; + }; - for machine in (server, client): - machine.wait_for_unit('multi-user.target') - machine.succeed('systemctl is-active ntpd-rs.service') + testScript = + { nodes, ... }: + '' + start_all() - client.fail('systemctl is-active ntpd-rs-metrics.service') - server.succeed('systemctl is-active ntpd-rs-metrics.service') + for machine in (server, client): + machine.wait_for_unit('multi-user.target') + machine.succeed('systemctl is-active ntpd-rs.service') - server.wait_for_open_port(9975) - client.succeed('curl http://server:9975/metrics | grep ntp_uptime_seconds') - server.fail('curl --fail --connect-timeout 2 http://client:9975/metrics | grep ntp_uptime_seconds') + client.fail('systemctl is-active ntpd-rs-metrics.service') + server.succeed('systemctl is-active ntpd-rs-metrics.service') - client.succeed("ntp-ctl status | grep server:123") - server.succeed("ntp-ctl status | grep '\[::\]:123'") + server.wait_for_open_port(9975) + client.succeed('curl http://server:9975/metrics | grep ntp_uptime_seconds') + server.fail('curl --fail --connect-timeout 2 http://client:9975/metrics | grep ntp_uptime_seconds') - client.succeed("grep '^mode = \"server\"' $(systemctl status ntpd-rs | grep -oE '/nix/store[^ ]*ntpd-rs.toml')") - server.succeed("grep '^mode = \"pool\"' $(systemctl status ntpd-rs | grep -oE '/nix/store[^ ]*ntpd-rs.toml')") - ''; - } -) + client.succeed("ntp-ctl status | grep server:123") + server.succeed("ntp-ctl status | grep '\[::\]:123'") + + client.succeed("grep '^mode = \"server\"' $(systemctl status ntpd-rs | grep -oE '/nix/store[^ ]*ntpd-rs.toml')") + server.succeed("grep '^mode = \"pool\"' $(systemctl status ntpd-rs | grep -oE '/nix/store[^ ]*ntpd-rs.toml')") + ''; +} diff --git a/nixos/tests/ntpd.nix b/nixos/tests/ntpd.nix index 67a5a95e6fe5..e961cb7614a7 100644 --- a/nixos/tests/ntpd.nix +++ b/nixos/tests/ntpd.nix @@ -1,27 +1,25 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "ntpd"; +{ lib, ... }: +{ + name = "ntpd"; - meta = { - maintainers = with lib.maintainers; [ pyrox0 ]; + meta = { + maintainers = with lib.maintainers; [ pyrox0 ]; + }; + + nodes.machine = { + services.ntp = { + enable = true; }; + }; - nodes.machine = { - services.ntp = { - enable = true; - }; - }; + testScript = '' + start_all() - testScript = '' - start_all() - - machine.wait_for_unit('ntpd.service') - machine.wait_for_console_text('Listen normally on 10 eth*') - machine.succeed('systemctl is-active ntpd.service') - machine.succeed('ntpq -p') - # ntp user must be able to create drift files - machine.succeed('su -s /bin/sh -c "touch /var/lib/ntp/ntp.drift" ntp') - ''; - } -) + machine.wait_for_unit('ntpd.service') + machine.wait_for_console_text('Listen normally on 10 eth*') + machine.succeed('systemctl is-active ntpd.service') + machine.succeed('ntpq -p') + # ntp user must be able to create drift files + machine.succeed('su -s /bin/sh -c "touch /var/lib/ntp/ntp.drift" ntp') + ''; +} diff --git a/nixos/tests/nvmetcfg.nix b/nixos/tests/nvmetcfg.nix index 6f937251bb35..4720ac754378 100644 --- a/nixos/tests/nvmetcfg.nix +++ b/nixos/tests/nvmetcfg.nix @@ -1,54 +1,52 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "nvmetcfg"; +{ lib, ... }: +{ + name = "nvmetcfg"; - meta = { - maintainers = with lib.maintainers; [ nickcao ]; - }; + meta = { + maintainers = with lib.maintainers; [ nickcao ]; + }; - nodes = { - server = - { pkgs, ... }: - { - boot.kernelModules = [ "nvmet" ]; - environment.systemPackages = [ pkgs.nvmetcfg ]; - networking.firewall.allowedTCPPorts = [ 4420 ]; - virtualisation.emptyDiskImages = [ 512 ]; - }; - client = - { pkgs, ... }: - { - boot.kernelModules = [ "nvme-fabrics" ]; - environment.systemPackages = [ pkgs.nvme-cli ]; - }; - }; + nodes = { + server = + { pkgs, ... }: + { + boot.kernelModules = [ "nvmet" ]; + environment.systemPackages = [ pkgs.nvmetcfg ]; + networking.firewall.allowedTCPPorts = [ 4420 ]; + virtualisation.emptyDiskImages = [ 512 ]; + }; + client = + { pkgs, ... }: + { + boot.kernelModules = [ "nvme-fabrics" ]; + environment.systemPackages = [ pkgs.nvme-cli ]; + }; + }; - testScript = - let - subsystem = "nqn.2014-08.org.nixos:server"; - in - '' - import json + testScript = + let + subsystem = "nqn.2014-08.org.nixos:server"; + in + '' + import json - with subtest("Create subsystem and namespace"): - server.succeed("nvmet subsystem add ${subsystem}") - server.succeed("nvmet namespace add ${subsystem} 1 /dev/vdb") + with subtest("Create subsystem and namespace"): + server.succeed("nvmet subsystem add ${subsystem}") + server.succeed("nvmet namespace add ${subsystem} 1 /dev/vdb") - with subtest("Bind subsystem to port"): - server.wait_for_unit("network-online.target") - server.succeed("nvmet port add 1 tcp [::]:4420") - server.succeed("nvmet port add-subsystem 1 ${subsystem}") + with subtest("Bind subsystem to port"): + server.wait_for_unit("network-online.target") + server.succeed("nvmet port add 1 tcp [::]:4420") + server.succeed("nvmet port add-subsystem 1 ${subsystem}") - with subtest("Discover and connect to available subsystems"): - client.wait_for_unit("network-online.target") - assert "subnqn: ${subsystem}" in client.succeed("nvme discover --transport=tcp --traddr=server --trsvcid=4420") - client.succeed("nvme connect-all --transport=tcp --traddr=server --trsvcid=4420") + with subtest("Discover and connect to available subsystems"): + client.wait_for_unit("network-online.target") + assert "subnqn: ${subsystem}" in client.succeed("nvme discover --transport=tcp --traddr=server --trsvcid=4420") + client.succeed("nvme connect-all --transport=tcp --traddr=server --trsvcid=4420") - with subtest("Write to the connected subsystem"): - devices = json.loads(client.succeed("lsblk --nvme --paths --json"))["blockdevices"] - assert len(devices) == 1 - client.succeed(f"dd if=/dev/zero of={devices[0]['name']} bs=1M count=64") - ''; - } -) + with subtest("Write to the connected subsystem"): + devices = json.loads(client.succeed("lsblk --nvme --paths --json"))["blockdevices"] + assert len(devices) == 1 + client.succeed(f"dd if=/dev/zero of={devices[0]['name']} bs=1M count=64") + ''; +} diff --git a/nixos/tests/nzbget.nix b/nixos/tests/nzbget.nix index 0fd80c6d554a..3b5ee0ce0c43 100644 --- a/nixos/tests/nzbget.nix +++ b/nixos/tests/nzbget.nix @@ -1,56 +1,54 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "nzbget"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - aanderse - flokli - ]; - }; +{ pkgs, ... }: +{ + name = "nzbget"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + aanderse + flokli + ]; + }; - nodes = { - server = - { ... }: - { - services.nzbget.enable = true; + nodes = { + server = + { ... }: + { + services.nzbget.enable = true; - # provide some test settings - services.nzbget.settings = { - "MainDir" = "/var/lib/nzbget"; - "DirectRename" = true; - "DiskSpace" = 0; - "Server1.Name" = "this is a test"; - }; - - # hack, don't add (unfree) unrar to nzbget's path, - # so we can run this test in CI - systemd.services.nzbget.path = pkgs.lib.mkForce [ pkgs.p7zip ]; + # provide some test settings + services.nzbget.settings = { + "MainDir" = "/var/lib/nzbget"; + "DirectRename" = true; + "DiskSpace" = 0; + "Server1.Name" = "this is a test"; }; - }; - testScript = - { nodes, ... }: - '' - start_all() + # hack, don't add (unfree) unrar to nzbget's path, + # so we can run this test in CI + systemd.services.nzbget.path = pkgs.lib.mkForce [ pkgs.p7zip ]; + }; + }; - server.wait_for_unit("nzbget.service") - server.wait_for_unit("network.target") - server.wait_for_open_port(6789) - assert "This file is part of nzbget" in server.succeed( - "curl -f -s -u nzbget:tegbzn6789 http://127.0.0.1:6789" - ) - server.succeed( - "${pkgs.nzbget}/bin/nzbget -n -o Control_iP=127.0.0.1 -o Control_port=6789 -o Control_password=tegbzn6789 -V" - ) + testScript = + { nodes, ... }: + '' + start_all() - config = server.succeed("${nodes.server.systemd.services.nzbget.serviceConfig.ExecStart} --printconfig") + server.wait_for_unit("nzbget.service") + server.wait_for_unit("network.target") + server.wait_for_open_port(6789) + assert "This file is part of nzbget" in server.succeed( + "curl -f -s -u nzbget:tegbzn6789 http://127.0.0.1:6789" + ) + server.succeed( + "${pkgs.nzbget}/bin/nzbget -n -o Control_iP=127.0.0.1 -o Control_port=6789 -o Control_password=tegbzn6789 -V" + ) - # confirm the test settings are applied - assert 'MainDir = "/var/lib/nzbget"' in config - assert 'DirectRename = "yes"' in config - assert 'DiskSpace = "0"' in config - assert 'Server1.Name = "this is a test"' in config - ''; - } -) + config = server.succeed("${nodes.server.systemd.services.nzbget.serviceConfig.ExecStart} --printconfig") + + # confirm the test settings are applied + assert 'MainDir = "/var/lib/nzbget"' in config + assert 'DirectRename = "yes"' in config + assert 'DiskSpace = "0"' in config + assert 'Server1.Name = "this is a test"' in config + ''; +} diff --git a/nixos/tests/nzbhydra2.nix b/nixos/tests/nzbhydra2.nix index 9f6f2e98423a..b446d8236977 100644 --- a/nixos/tests/nzbhydra2.nix +++ b/nixos/tests/nzbhydra2.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "nzbhydra2"; - meta.maintainers = with lib.maintainers; [ matteopacini ]; +{ lib, ... }: +{ + name = "nzbhydra2"; + meta.maintainers = with lib.maintainers; [ matteopacini ]; - nodes.machine = - { pkgs, ... }: - { - services.nzbhydra2.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.nzbhydra2.enable = true; + }; - testScript = '' - machine.start() - machine.wait_for_unit("nzbhydra2.service") - machine.wait_for_open_port(5076) - machine.succeed("curl --fail http://localhost:5076/") - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("nzbhydra2.service") + machine.wait_for_open_port(5076) + machine.succeed("curl --fail http://localhost:5076/") + ''; +} diff --git a/nixos/tests/ocis.nix b/nixos/tests/ocis.nix index a529b4385663..275a13799035 100644 --- a/nixos/tests/ocis.nix +++ b/nixos/tests/ocis.nix @@ -1,217 +1,215 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - let - # this is a demo user created by IDM_CREATE_DEMO_USERS=true - demoUser = "einstein"; - demoPassword = "relativity"; +let + # this is a demo user created by IDM_CREATE_DEMO_USERS=true + demoUser = "einstein"; + demoPassword = "relativity"; - adminUser = "admin"; - adminPassword = "hunter2"; - testRunner = - pkgs.writers.writePython3Bin "test-runner" - { - libraries = [ pkgs.python3Packages.selenium ]; - flakeIgnore = [ "E501" ]; - } - '' - import sys - from selenium.webdriver.common.by import By - from selenium.webdriver import Firefox - from selenium.webdriver.firefox.options import Options - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC + adminUser = "admin"; + adminPassword = "hunter2"; + testRunner = + pkgs.writers.writePython3Bin "test-runner" + { + libraries = [ pkgs.python3Packages.selenium ]; + flakeIgnore = [ "E501" ]; + } + '' + import sys + from selenium.webdriver.common.by import By + from selenium.webdriver import Firefox + from selenium.webdriver.firefox.options import Options + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC - options = Options() - options.add_argument('--headless') - driver = Firefox(options=options) + options = Options() + options.add_argument('--headless') + driver = Firefox(options=options) - user = sys.argv[1] - password = sys.argv[2] - driver.implicitly_wait(20) - driver.get('https://localhost:9200/login') - wait = WebDriverWait(driver, 10) - wait.until(EC.title_contains("Sign in")) - driver.find_element(By.XPATH, '//*[@id="oc-login-username"]').send_keys(user) - driver.find_element(By.XPATH, '//*[@id="oc-login-password"]').send_keys(password) - driver.find_element(By.XPATH, '//*[@id="root"]//button').click() - wait.until(EC.title_contains("Personal")) - ''; + user = sys.argv[1] + password = sys.argv[2] + driver.implicitly_wait(20) + driver.get('https://localhost:9200/login') + wait = WebDriverWait(driver, 10) + wait.until(EC.title_contains("Sign in")) + driver.find_element(By.XPATH, '//*[@id="oc-login-username"]').send_keys(user) + driver.find_element(By.XPATH, '//*[@id="oc-login-password"]').send_keys(password) + driver.find_element(By.XPATH, '//*[@id="root"]//button').click() + wait.until(EC.title_contains("Personal")) + ''; - # This was generated with `ocis init --config-path testconfig/ --admin-password "hunter2" --insecure true`. - testConfig = '' - token_manager: - jwt_secret: kaKYgfso*d9GA-yTM.&BTOUEuMz%Ai0H - machine_auth_api_key: sGWRG1JZ&qe&pe@N1HKK4#qH*B&@xLnO - system_user_api_key: h+m4aHPUtOtUJFKrc5B2=04C=7fDZaT- - transfer_secret: 4-R6AfUjQn0P&+h2+$skf0lJqmre$j=x - system_user_id: db180e0a-b38a-4edf-a4cd-a3d358248537 - admin_user_id: ea623f50-742d-4fd0-95bb-c61767b070d4 - graph: - application: - id: 11971eab-d560-4b95-a2d4-50726676bbd0 - events: - tls_insecure: true - spaces: - insecure: true - identity: - ldap: - bind_password: ^F&Vn7@mYGYGuxr$#qm^gGy@FVq=.w=y - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - idp: + # This was generated with `ocis init --config-path testconfig/ --admin-password "hunter2" --insecure true`. + testConfig = '' + token_manager: + jwt_secret: kaKYgfso*d9GA-yTM.&BTOUEuMz%Ai0H + machine_auth_api_key: sGWRG1JZ&qe&pe@N1HKK4#qH*B&@xLnO + system_user_api_key: h+m4aHPUtOtUJFKrc5B2=04C=7fDZaT- + transfer_secret: 4-R6AfUjQn0P&+h2+$skf0lJqmre$j=x + system_user_id: db180e0a-b38a-4edf-a4cd-a3d358248537 + admin_user_id: ea623f50-742d-4fd0-95bb-c61767b070d4 + graph: + application: + id: 11971eab-d560-4b95-a2d4-50726676bbd0 + events: + tls_insecure: true + spaces: + insecure: true + identity: ldap: - bind_password: bv53IjS28x.nxth*%aRbE70%4TGNXbLU - idm: - service_user_passwords: - admin_password: hunter2 - idm_password: ^F&Vn7@mYGYGuxr$#qm^gGy@FVq=.w=y - reva_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb - idp_password: bv53IjS28x.nxth*%aRbE70%4TGNXbLU - proxy: + bind_password: ^F&Vn7@mYGYGuxr$#qm^gGy@FVq=.w=y + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + idp: + ldap: + bind_password: bv53IjS28x.nxth*%aRbE70%4TGNXbLU + idm: + service_user_passwords: + admin_password: hunter2 + idm_password: ^F&Vn7@mYGYGuxr$#qm^gGy@FVq=.w=y + reva_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb + idp_password: bv53IjS28x.nxth*%aRbE70%4TGNXbLU + proxy: + oidc: + insecure: true + insecure_backends: true + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + frontend: + app_handler: + insecure: true + archiver: + insecure: true + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + auth_basic: + auth_providers: + ldap: + bind_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb + auth_bearer: + auth_providers: oidc: insecure: true - insecure_backends: true - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - frontend: - app_handler: - insecure: true - archiver: - insecure: true - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - auth_basic: - auth_providers: - ldap: - bind_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb - auth_bearer: - auth_providers: - oidc: - insecure: true - users: - drivers: - ldap: - bind_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb - groups: - drivers: - ldap: - bind_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb - ocdav: - insecure: true - ocm: - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - thumbnails: - thumbnail: - transfer_secret: 2%11!zAu*AYE&=d*8dfoZs8jK&5ZMm*% - webdav_allow_insecure: true - cs3_allow_insecure: true - search: - events: - tls_insecure: true - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - audit: - events: - tls_insecure: true - settings: - service_account_ids: - - df39a290-3f3e-4e39-b67b-8b810ca2abac - sharing: - events: - tls_insecure: true - storage_users: - events: - tls_insecure: true - mount_id: ef72cb8b-809c-4592-bfd2-1df603295205 - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + users: + drivers: + ldap: + bind_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb + groups: + drivers: + ldap: + bind_password: z-%@fWipLliR8lD#fl.0teC#9QbhJ^eb + ocdav: + insecure: true + ocm: + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + thumbnails: + thumbnail: + transfer_secret: 2%11!zAu*AYE&=d*8dfoZs8jK&5ZMm*% + webdav_allow_insecure: true + cs3_allow_insecure: true + search: + events: + tls_insecure: true + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + audit: + events: + tls_insecure: true + settings: + service_account_ids: + - df39a290-3f3e-4e39-b67b-8b810ca2abac + sharing: + events: + tls_insecure: true + storage_users: + events: + tls_insecure: true + mount_id: ef72cb8b-809c-4592-bfd2-1df603295205 + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + notifications: notifications: - notifications: - events: - tls_insecure: true - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + events: + tls_insecure: true + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + nats: nats: - nats: - tls_skip_verify_client_cert: true - gateway: - storage_registry: - storage_users_mount_id: ef72cb8b-809c-4592-bfd2-1df603295205 - userlog: - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - auth_service: - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE - clientlog: - service_account: - service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac - service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE''; - in + tls_skip_verify_client_cert: true + gateway: + storage_registry: + storage_users_mount_id: ef72cb8b-809c-4592-bfd2-1df603295205 + userlog: + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + auth_service: + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE + clientlog: + service_account: + service_account_id: df39a290-3f3e-4e39-b67b-8b810ca2abac + service_account_secret: .demKypQ$=pGl+yRar!#YaFjLYCr4YwE''; +in - { - name = "ocis"; +{ + name = "ocis"; - meta.maintainers = with lib.maintainers; [ - bhankas - ramblurr - ]; + meta.maintainers = with lib.maintainers; [ + bhankas + ramblurr + ]; - nodes.machine = - { config, ... }: - { - virtualisation.memorySize = 2048; - environment.systemPackages = [ - pkgs.firefox-unwrapped - pkgs.geckodriver - testRunner - ]; + nodes.machine = + { config, ... }: + { + virtualisation.memorySize = 2048; + environment.systemPackages = [ + pkgs.firefox-unwrapped + pkgs.geckodriver + testRunner + ]; - # if you do this in production, dont put secrets in this file because it will be written to the world readable nix store - environment.etc."ocis/ocis.env".text = '' - ADMIN_PASSWORD=${adminPassword} - IDM_CREATE_DEMO_USERS=true - ''; + # if you do this in production, dont put secrets in this file because it will be written to the world readable nix store + environment.etc."ocis/ocis.env".text = '' + ADMIN_PASSWORD=${adminPassword} + IDM_CREATE_DEMO_USERS=true + ''; - # if you do this in production, dont put secrets in this file because it will be written to the world readable nix store - environment.etc."ocis/config/ocis.yaml".text = testConfig; + # if you do this in production, dont put secrets in this file because it will be written to the world readable nix store + environment.etc."ocis/config/ocis.yaml".text = testConfig; - services.ocis = { - enable = true; - configDir = "/etc/ocis/config"; - environment = { - OCIS_INSECURE = "true"; - }; - environmentFile = "/etc/ocis/ocis.env"; + services.ocis = { + enable = true; + configDir = "/etc/ocis/config"; + environment = { + OCIS_INSECURE = "true"; }; + environmentFile = "/etc/ocis/ocis.env"; }; + }; - testScript = '' - start_all() - machine.wait_for_unit("ocis.service") - machine.wait_for_open_port(9200) - # wait for ocis to fully come up - machine.sleep(5) + testScript = '' + start_all() + machine.wait_for_unit("ocis.service") + machine.wait_for_open_port(9200) + # wait for ocis to fully come up + machine.sleep(5) - with subtest("ocis bin works"): - machine.succeed("${lib.getExe pkgs.ocis_5-bin} version") + with subtest("ocis bin works"): + machine.succeed("${lib.getExe pkgs.ocis_5-bin} version") - with subtest("use the web interface to log in with a demo user"): - machine.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner ${demoUser} ${demoPassword}") + with subtest("use the web interface to log in with a demo user"): + machine.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner ${demoUser} ${demoPassword}") - with subtest("use the web interface to log in with the provisioned admin user"): - machine.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner ${adminUser} ${adminPassword}") - ''; - } -) + with subtest("use the web interface to log in with the provisioned admin user"): + machine.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner ${adminUser} ${adminPassword}") + ''; +} diff --git a/nixos/tests/octoprint.nix b/nixos/tests/octoprint.nix index 9473797d5047..805b7c764b61 100644 --- a/nixos/tests/octoprint.nix +++ b/nixos/tests/octoprint.nix @@ -1,69 +1,67 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - apikey = "testapikey"; - in - { - name = "octoprint"; - meta.maintainers = with lib.maintainers; [ gador ]; +let + apikey = "testapikey"; +in +{ + name = "octoprint"; + meta.maintainers = with lib.maintainers; [ gador ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ jq ]; - services.octoprint = { - enable = true; - extraConfig = { - server = { - firstRun = false; - }; - api = { - enabled = true; - key = apikey; - }; - plugins = { - # these need internet access and pollute the output with connection failed errors - _disabled = [ - "softwareupdate" - "announcements" - "pluginmanager" - ]; - }; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ jq ]; + services.octoprint = { + enable = true; + extraConfig = { + server = { + firstRun = false; + }; + api = { + enabled = true; + key = apikey; + }; + plugins = { + # these need internet access and pollute the output with connection failed errors + _disabled = [ + "softwareupdate" + "announcements" + "pluginmanager" + ]; }; }; }; + }; - testScript = '' - import json + testScript = '' + import json - @polling_condition - def octoprint_running(): - machine.succeed("pgrep octoprint") + @polling_condition + def octoprint_running(): + machine.succeed("pgrep octoprint") - with subtest("Wait for octoprint service to start"): - machine.wait_for_unit("octoprint.service") - machine.wait_until_succeeds("pgrep octoprint") + with subtest("Wait for octoprint service to start"): + machine.wait_for_unit("octoprint.service") + machine.wait_until_succeeds("pgrep octoprint") - with subtest("Wait for final boot"): - # this appears whe octoprint is almost finished starting - machine.wait_for_file("/var/lib/octoprint/uploads") + with subtest("Wait for final boot"): + # this appears whe octoprint is almost finished starting + machine.wait_for_file("/var/lib/octoprint/uploads") - # octoprint takes some time to start. This makes sure we'll retry just in case it takes longer - # retry-all-errors in necessary, since octoprint will report a 404 error when not yet ready - curl_cmd = "curl --retry-all-errors --connect-timeout 5 --max-time 10 --retry 5 --retry-delay 0 \ - --retry-max-time 40 -X GET --header 'X-API-Key: ${apikey}' " + # octoprint takes some time to start. This makes sure we'll retry just in case it takes longer + # retry-all-errors in necessary, since octoprint will report a 404 error when not yet ready + curl_cmd = "curl --retry-all-errors --connect-timeout 5 --max-time 10 --retry 5 --retry-delay 0 \ + --retry-max-time 40 -X GET --header 'X-API-Key: ${apikey}' " - # used to fail early, in case octoprint first starts and then crashes - with octoprint_running: # type: ignore[union-attr] - with subtest("Check for web interface"): - machine.wait_until_succeeds("curl -s localhost:5000") + # used to fail early, in case octoprint first starts and then crashes + with octoprint_running: # type: ignore[union-attr] + with subtest("Check for web interface"): + machine.wait_until_succeeds("curl -s localhost:5000") - with subtest("Check API"): - version = json.loads(machine.succeed(curl_cmd + "localhost:5000/api/version")) - server = json.loads(machine.succeed(curl_cmd + "localhost:5000/api/server")) - assert version["server"] == str("${pkgs.octoprint.version}") - assert server["safemode"] == None - ''; - } -) + with subtest("Check API"): + version = json.loads(machine.succeed(curl_cmd + "localhost:5000/api/version")) + server = json.loads(machine.succeed(curl_cmd + "localhost:5000/api/server")) + assert version["server"] == str("${pkgs.octoprint.version}") + assert server["safemode"] == None + ''; +} diff --git a/nixos/tests/odoo.nix b/nixos/tests/odoo.nix index 54120b234364..7749fec7f3d9 100644 --- a/nixos/tests/odoo.nix +++ b/nixos/tests/odoo.nix @@ -1,39 +1,37 @@ -import ./make-test-python.nix ( - { - pkgs, - lib, - package ? pkgs.odoo, - ... - }: - { - name = "odoo"; - meta.maintainers = with lib.maintainers; [ mkg20001 ]; +{ + pkgs, + lib, + package ? pkgs.odoo, + ... +}: +{ + name = "odoo"; + meta.maintainers = with lib.maintainers; [ mkg20001 ]; - nodes = { - server = - { ... }: - { - services.nginx = { - enable = true; - recommendedProxySettings = true; - }; - - services.odoo = { - enable = true; - package = package; - autoInit = true; - autoInitExtraFlags = [ "--without-demo=all" ]; - domain = "localhost"; - }; + nodes = { + server = + { ... }: + { + services.nginx = { + enable = true; + recommendedProxySettings = true; }; - }; - testScript = - { nodes, ... }: - '' - server.wait_for_unit("odoo.service") - server.wait_until_succeeds("curl -s http://localhost:8069/web/database/selector | grep 'Odoo'") - server.succeed("curl -s http://localhost/web/database/selector | grep 'Odoo'") - ''; - } -) + services.odoo = { + enable = true; + package = package; + autoInit = true; + autoInitExtraFlags = [ "--without-demo=all" ]; + domain = "localhost"; + }; + }; + }; + + testScript = + { nodes, ... }: + '' + server.wait_for_unit("odoo.service") + server.wait_until_succeeds("curl -s http://localhost:8069/web/database/selector | grep 'Odoo'") + server.succeed("curl -s http://localhost/web/database/selector | grep 'Odoo'") + ''; +} diff --git a/nixos/tests/oh-my-zsh.nix b/nixos/tests/oh-my-zsh.nix index 25c4e8ebfe97..026e8628c11c 100644 --- a/nixos/tests/oh-my-zsh.nix +++ b/nixos/tests/oh-my-zsh.nix @@ -1,22 +1,20 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "oh-my-zsh"; +{ pkgs, ... }: +{ + name = "oh-my-zsh"; - nodes.machine = - { pkgs, ... }: + nodes.machine = + { pkgs, ... }: - { - programs.zsh = { - enable = true; - ohMyZsh.enable = true; - }; + { + programs.zsh = { + enable = true; + ohMyZsh.enable = true; }; + }; - testScript = '' - start_all() - machine.succeed("touch ~/.zshrc") - machine.succeed("zsh -c 'source /etc/zshrc && echo $ZSH | grep oh-my-zsh-${pkgs.oh-my-zsh.version}'") - ''; - } -) + testScript = '' + start_all() + machine.succeed("touch ~/.zshrc") + machine.succeed("zsh -c 'source /etc/zshrc && echo $ZSH | grep oh-my-zsh-${pkgs.oh-my-zsh.version}'") + ''; +} diff --git a/nixos/tests/ombi.nix b/nixos/tests/ombi.nix index eeeaba5e2d0e..6d77aa3c5b70 100644 --- a/nixos/tests/ombi.nix +++ b/nixos/tests/ombi.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "ombi"; - meta.maintainers = with lib.maintainers; [ woky ]; +{ + name = "ombi"; + meta.maintainers = with lib.maintainers; [ woky ]; - nodes.machine = - { pkgs, ... }: - { - services.ombi.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.ombi.enable = true; + }; - testScript = '' - machine.wait_for_unit("ombi.service") - machine.wait_for_open_port(5000) - machine.succeed("curl --fail http://localhost:5000/") - ''; - } -) + testScript = '' + machine.wait_for_unit("ombi.service") + machine.wait_for_open_port(5000) + machine.succeed("curl --fail http://localhost:5000/") + ''; +} diff --git a/nixos/tests/openarena.nix b/nixos/tests/openarena.nix index 6ac38729d3f1..2d514bac4c73 100644 --- a/nixos/tests/openarena.nix +++ b/nixos/tests/openarena.nix @@ -1,79 +1,77 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - client = - { pkgs, ... }: +let + client = + { pkgs, ... }: - { - imports = [ ./common/x11.nix ]; - hardware.graphics.enable = true; - environment.systemPackages = [ pkgs.openarena ]; - }; - - in - { - name = "openarena"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fpletz ]; + { + imports = [ ./common/x11.nix ]; + hardware.graphics.enable = true; + environment.systemPackages = [ pkgs.openarena ]; }; - nodes = { - server = { - services.openarena = { - enable = true; - extraFlags = [ - "+set g_gametype 0" - "+map oa_dm7" - "+addbot Angelyss" - "+addbot Arachna" - ]; - openPorts = true; - }; - }; +in +{ + name = "openarena"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fpletz ]; + }; - client1 = client; - client2 = client; + nodes = { + server = { + services.openarena = { + enable = true; + extraFlags = [ + "+set g_gametype 0" + "+map oa_dm7" + "+addbot Angelyss" + "+addbot Arachna" + ]; + openPorts = true; + }; }; - testScript = '' - start_all() + client1 = client; + client2 = client; + }; - server.wait_for_unit("openarena") - server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 27960") + testScript = '' + start_all() - client1.wait_for_x() - client2.wait_for_x() + server.wait_for_unit("openarena") + server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 27960") - client1.execute("openarena +set r_fullscreen 0 +set name Foo +connect server >&2 &") - client2.execute("openarena +set r_fullscreen 0 +set name Bar +connect server >&2 &") + client1.wait_for_x() + client2.wait_for_x() - server.wait_until_succeeds( - "journalctl -u openarena -e | grep -q 'Foo.*entered the game'" - ) - server.wait_until_succeeds( - "journalctl -u openarena -e | grep -q 'Bar.*entered the game'" - ) + client1.execute("openarena +set r_fullscreen 0 +set name Foo +connect server >&2 &") + client2.execute("openarena +set r_fullscreen 0 +set name Bar +connect server >&2 &") - server.sleep(10) # wait for a while to get a nice screenshot + server.wait_until_succeeds( + "journalctl -u openarena -e | grep -q 'Foo.*entered the game'" + ) + server.wait_until_succeeds( + "journalctl -u openarena -e | grep -q 'Bar.*entered the game'" + ) - client1.screenshot("screen_client1_1") - client2.screenshot("screen_client2_1") + server.sleep(10) # wait for a while to get a nice screenshot - client1.block() + client1.screenshot("screen_client1_1") + client2.screenshot("screen_client2_1") - server.sleep(10) + client1.block() - client1.screenshot("screen_client1_2") - client2.screenshot("screen_client2_2") + server.sleep(10) - client1.unblock() + client1.screenshot("screen_client1_2") + client2.screenshot("screen_client2_2") - server.sleep(10) + client1.unblock() - client1.screenshot("screen_client1_3") - client2.screenshot("screen_client2_3") - ''; + server.sleep(10) - } -) + client1.screenshot("screen_client1_3") + client2.screenshot("screen_client2_3") + ''; + +} diff --git a/nixos/tests/openldap.nix b/nixos/tests/openldap.nix index f66aaec78048..49af853cb9a3 100644 --- a/nixos/tests/openldap.nix +++ b/nixos/tests/openldap.nix @@ -1,177 +1,175 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - dbContents = '' - dn: dc=example - objectClass: domain - dc: example +{ pkgs, ... }: +let + dbContents = '' + dn: dc=example + objectClass: domain + dc: example - dn: ou=users,dc=example - objectClass: organizationalUnit - ou: users - ''; + dn: ou=users,dc=example + objectClass: organizationalUnit + ou: users + ''; - ldifConfig = '' - dn: cn=config - cn: config - objectClass: olcGlobal - olcLogLevel: stats + ldifConfig = '' + dn: cn=config + cn: config + objectClass: olcGlobal + olcLogLevel: stats - dn: cn=schema,cn=config - cn: schema - objectClass: olcSchemaConfig + dn: cn=schema,cn=config + cn: schema + objectClass: olcSchemaConfig - include: file://${pkgs.openldap}/etc/schema/core.ldif - include: file://${pkgs.openldap}/etc/schema/cosine.ldif - include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif + include: file://${pkgs.openldap}/etc/schema/core.ldif + include: file://${pkgs.openldap}/etc/schema/cosine.ldif + include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif - dn: olcDatabase={0}config,cn=config - olcDatabase: {0}config - objectClass: olcDatabaseConfig - olcRootDN: cn=root,cn=config - olcRootPW: configpassword + dn: olcDatabase={0}config,cn=config + olcDatabase: {0}config + objectClass: olcDatabaseConfig + olcRootDN: cn=root,cn=config + olcRootPW: configpassword - dn: olcDatabase={1}mdb,cn=config - objectClass: olcDatabaseConfig - objectClass: olcMdbConfig - olcDatabase: {1}mdb - olcDbDirectory: /var/db/openldap - olcDbIndex: objectClass eq - olcSuffix: dc=example - olcRootDN: cn=root,dc=example - olcRootPW: notapassword - ''; + dn: olcDatabase={1}mdb,cn=config + objectClass: olcDatabaseConfig + objectClass: olcMdbConfig + olcDatabase: {1}mdb + olcDbDirectory: /var/db/openldap + olcDbIndex: objectClass eq + olcSuffix: dc=example + olcRootDN: cn=root,dc=example + olcRootPW: notapassword + ''; - ldapClientConfig = { - enable = true; - loginPam = false; - nsswitch = false; - server = "ldap://"; - base = "dc=example"; - }; + ldapClientConfig = { + enable = true; + loginPam = false; + nsswitch = false; + server = "ldap://"; + base = "dc=example"; + }; - in - { - name = "openldap"; +in +{ + name = "openldap"; - nodes.machine = - { pkgs, ... }: - { - environment.etc."openldap/root_password".text = "notapassword"; + nodes.machine = + { pkgs, ... }: + { + environment.etc."openldap/root_password".text = "notapassword"; - users.ldap = ldapClientConfig; + users.ldap = ldapClientConfig; - services.openldap = { - enable = true; - urlList = [ - "ldapi:///" - "ldap://" - ]; - settings = { - children = { - "cn=schema".includes = [ - "${pkgs.openldap}/etc/schema/core.ldif" - "${pkgs.openldap}/etc/schema/cosine.ldif" - "${pkgs.openldap}/etc/schema/inetorgperson.ldif" - "${pkgs.openldap}/etc/schema/nis.ldif" - ]; - "olcDatabase={0}config" = { - attrs = { - objectClass = [ "olcDatabaseConfig" ]; - olcDatabase = "{0}config"; - olcRootDN = "cn=root,cn=config"; - olcRootPW = "configpassword"; - }; + services.openldap = { + enable = true; + urlList = [ + "ldapi:///" + "ldap://" + ]; + settings = { + children = { + "cn=schema".includes = [ + "${pkgs.openldap}/etc/schema/core.ldif" + "${pkgs.openldap}/etc/schema/cosine.ldif" + "${pkgs.openldap}/etc/schema/inetorgperson.ldif" + "${pkgs.openldap}/etc/schema/nis.ldif" + ]; + "olcDatabase={0}config" = { + attrs = { + objectClass = [ "olcDatabaseConfig" ]; + olcDatabase = "{0}config"; + olcRootDN = "cn=root,cn=config"; + olcRootPW = "configpassword"; }; - "olcDatabase={1}mdb" = { - # This tests string, base64 and path values, as well as lists of string values - attrs = { - objectClass = [ - "olcDatabaseConfig" - "olcMdbConfig" - ]; - olcDatabase = "{1}mdb"; - olcDbDirectory = "/var/lib/openldap/db"; - olcSuffix = "dc=example"; - olcRootDN = { - # cn=root,dc=example - base64 = "Y249cm9vdCxkYz1leGFtcGxl"; - }; - olcRootPW = { - path = "/etc/openldap/root_password"; - }; + }; + "olcDatabase={1}mdb" = { + # This tests string, base64 and path values, as well as lists of string values + attrs = { + objectClass = [ + "olcDatabaseConfig" + "olcMdbConfig" + ]; + olcDatabase = "{1}mdb"; + olcDbDirectory = "/var/lib/openldap/db"; + olcSuffix = "dc=example"; + olcRootDN = { + # cn=root,dc=example + base64 = "Y249cm9vdCxkYz1leGFtcGxl"; + }; + olcRootPW = { + path = "/etc/openldap/root_password"; }; }; }; }; }; - - specialisation = { - declarativeContents.configuration = - { ... }: - { - services.openldap.declarativeContents."dc=example" = dbContents; - }; - mutableConfig.configuration = - { ... }: - { - services.openldap = { - declarativeContents."dc=example" = dbContents; - mutableConfig = true; - }; - }; - manualConfigDir = { - inheritParentConfig = false; - configuration = - { ... }: - { - users.ldap = ldapClientConfig; - services.openldap = { - enable = true; - configDir = "/var/db/slapd.d"; - }; - }; - }; - }; }; - testScript = - { nodes, ... }: - let - specializations = "${nodes.machine.system.build.toplevel}/specialisation"; - changeRootPw = '' - dn: olcDatabase={1}mdb,cn=config - changetype: modify - replace: olcRootPW - olcRootPW: foobar - ''; - in - '' - # Test startup with empty DB - machine.wait_for_unit("openldap.service") - with subtest("declarative contents"): - machine.succeed('${specializations}/declarativeContents/bin/switch-to-configuration test') - machine.wait_for_unit("openldap.service") - machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword') - machine.fail('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}') - - with subtest("mutable config"): - machine.succeed('${specializations}/mutableConfig/bin/switch-to-configuration test') - machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword') - machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}') - machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar') - - with subtest("manual config dir"): - machine.succeed( - 'mkdir /var/db/slapd.d /var/db/openldap', - 'slapadd -F /var/db/slapd.d -n0 -l ${pkgs.writeText "config.ldif" ldifConfig}', - 'slapadd -F /var/db/slapd.d -n1 -l ${pkgs.writeText "contents.ldif" dbContents}', - 'chown -R openldap:openldap /var/db/slapd.d /var/db/openldap', - '${specializations}/manualConfigDir/bin/switch-to-configuration test', - ) - machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword') - machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}') - machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar') + specialisation = { + declarativeContents.configuration = + { ... }: + { + services.openldap.declarativeContents."dc=example" = dbContents; + }; + mutableConfig.configuration = + { ... }: + { + services.openldap = { + declarativeContents."dc=example" = dbContents; + mutableConfig = true; + }; + }; + manualConfigDir = { + inheritParentConfig = false; + configuration = + { ... }: + { + users.ldap = ldapClientConfig; + services.openldap = { + enable = true; + configDir = "/var/db/slapd.d"; + }; + }; + }; + }; + }; + testScript = + { nodes, ... }: + let + specializations = "${nodes.machine.system.build.toplevel}/specialisation"; + changeRootPw = '' + dn: olcDatabase={1}mdb,cn=config + changetype: modify + replace: olcRootPW + olcRootPW: foobar ''; - } -) + in + '' + # Test startup with empty DB + machine.wait_for_unit("openldap.service") + + with subtest("declarative contents"): + machine.succeed('${specializations}/declarativeContents/bin/switch-to-configuration test') + machine.wait_for_unit("openldap.service") + machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword') + machine.fail('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}') + + with subtest("mutable config"): + machine.succeed('${specializations}/mutableConfig/bin/switch-to-configuration test') + machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword') + machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}') + machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar') + + with subtest("manual config dir"): + machine.succeed( + 'mkdir /var/db/slapd.d /var/db/openldap', + 'slapadd -F /var/db/slapd.d -n0 -l ${pkgs.writeText "config.ldif" ldifConfig}', + 'slapadd -F /var/db/slapd.d -n1 -l ${pkgs.writeText "contents.ldif" dbContents}', + 'chown -R openldap:openldap /var/db/slapd.d /var/db/openldap', + '${specializations}/manualConfigDir/bin/switch-to-configuration test', + ) + machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword') + machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}') + machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar') + ''; +} diff --git a/nixos/tests/openresty-lua.nix b/nixos/tests/openresty-lua.nix index 7141ef181039..8af2a0720eda 100644 --- a/nixos/tests/openresty-lua.nix +++ b/nixos/tests/openresty-lua.nix @@ -1,105 +1,103 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - luaLibs = [ - pkgs.lua.pkgs.markdown - ]; +{ pkgs, lib, ... }: +let + luaLibs = [ + pkgs.lua.pkgs.markdown + ]; - getLuaPath = lib: "${lib}/share/lua/${pkgs.lua.luaversion}/?.lua"; - luaPath = lib.concatStringsSep ";" (map getLuaPath luaLibs); - in - { - name = "openresty-lua"; - meta = with pkgs.lib.maintainers; { - maintainers = [ bbigras ]; - }; + getLuaPath = lib: "${lib}/share/lua/${pkgs.lua.luaversion}/?.lua"; + luaPath = lib.concatStringsSep ";" (map getLuaPath luaLibs); +in +{ + name = "openresty-lua"; + meta = with pkgs.lib.maintainers; { + maintainers = [ bbigras ]; + }; - nodes = { - webserver = - { pkgs, lib, ... }: - { - networking = { - extraHosts = '' - 127.0.0.1 default.test - 127.0.0.1 sandbox.test - ''; - }; - services.nginx = { - enable = true; - package = pkgs.openresty; + nodes = { + webserver = + { pkgs, lib, ... }: + { + networking = { + extraHosts = '' + 127.0.0.1 default.test + 127.0.0.1 sandbox.test + ''; + }; + services.nginx = { + enable = true; + package = pkgs.openresty; - commonHttpConfig = '' - lua_package_path '${luaPath};;'; - ''; + commonHttpConfig = '' + lua_package_path '${luaPath};;'; + ''; - virtualHosts."default.test" = { - default = true; - locations."/" = { - extraConfig = '' - default_type text/html; - access_by_lua ' - local markdown = require "markdown" - markdown("source") - '; - ''; - }; + virtualHosts."default.test" = { + default = true; + locations."/" = { + extraConfig = '' + default_type text/html; + access_by_lua ' + local markdown = require "markdown" + markdown("source") + '; + ''; }; + }; - virtualHosts."sandbox.test" = { - locations."/test1-write" = { - extraConfig = '' - content_by_lua_block { - local create = os.execute('${pkgs.coreutils}/bin/mkdir /tmp/test1-read') - local create = os.execute('${pkgs.coreutils}/bin/touch /tmp/test1-read/foo.txt') - local echo = os.execute('${pkgs.coreutils}/bin/echo worked > /tmp/test1-read/foo.txt') - } - ''; - }; - locations."/test1-read" = { - root = "/tmp"; - }; - locations."/test2-write" = { - extraConfig = '' - content_by_lua_block { - local create = os.execute('${pkgs.coreutils}/bin/mkdir /var/web/test2-read') - local create = os.execute('${pkgs.coreutils}/bin/touch /var/web/test2-read/bar.txt') - local echo = os.execute('${pkgs.coreutils}/bin/echo error-worked > /var/web/test2-read/bar.txt') - } - ''; - }; - locations."/test2-read" = { - root = "/var/web"; - }; + virtualHosts."sandbox.test" = { + locations."/test1-write" = { + extraConfig = '' + content_by_lua_block { + local create = os.execute('${pkgs.coreutils}/bin/mkdir /tmp/test1-read') + local create = os.execute('${pkgs.coreutils}/bin/touch /tmp/test1-read/foo.txt') + local echo = os.execute('${pkgs.coreutils}/bin/echo worked > /tmp/test1-read/foo.txt') + } + ''; + }; + locations."/test1-read" = { + root = "/tmp"; + }; + locations."/test2-write" = { + extraConfig = '' + content_by_lua_block { + local create = os.execute('${pkgs.coreutils}/bin/mkdir /var/web/test2-read') + local create = os.execute('${pkgs.coreutils}/bin/touch /var/web/test2-read/bar.txt') + local echo = os.execute('${pkgs.coreutils}/bin/echo error-worked > /var/web/test2-read/bar.txt') + } + ''; + }; + locations."/test2-read" = { + root = "/var/web"; }; }; }; - }; + }; + }; - testScript = - { nodes, ... }: - '' - url = "http://localhost" + testScript = + { nodes, ... }: + '' + url = "http://localhost" - webserver.wait_for_unit("nginx") - webserver.wait_for_open_port(80) + webserver.wait_for_unit("nginx") + webserver.wait_for_open_port(80) - http_code = webserver.succeed( - f"curl -w '%{{http_code}}' --head --fail {url}" - ) - assert http_code.split("\n")[-1] == "200" + http_code = webserver.succeed( + f"curl -w '%{{http_code}}' --head --fail {url}" + ) + assert http_code.split("\n")[-1] == "200" - # This test checks the creation and reading of a file in sandbox mode. - # Checking write in temporary folder - webserver.succeed("$(curl -vvv http://sandbox.test/test1-write)") - webserver.succeed('test "$(curl -fvvv http://sandbox.test/test1-read/foo.txt)" = worked') - # Checking write in protected folder. In sandbox mode for the nginx service, the folder /var/web is mounted - # in read-only mode. - webserver.succeed("mkdir -p /var/web") - webserver.succeed("chown nginx:nginx /var/web") - webserver.succeed("$(curl -vvv http://sandbox.test/test2-write)") - assert "404 Not Found" in machine.succeed( - "curl -vvv -s http://sandbox.test/test2-read/bar.txt" - ) - ''; - } -) + # This test checks the creation and reading of a file in sandbox mode. + # Checking write in temporary folder + webserver.succeed("$(curl -vvv http://sandbox.test/test1-write)") + webserver.succeed('test "$(curl -fvvv http://sandbox.test/test1-read/foo.txt)" = worked') + # Checking write in protected folder. In sandbox mode for the nginx service, the folder /var/web is mounted + # in read-only mode. + webserver.succeed("mkdir -p /var/web") + webserver.succeed("chown nginx:nginx /var/web") + webserver.succeed("$(curl -vvv http://sandbox.test/test2-write)") + assert "404 Not Found" in machine.succeed( + "curl -vvv -s http://sandbox.test/test2-read/bar.txt" + ) + ''; +} diff --git a/nixos/tests/opensnitch.nix b/nixos/tests/opensnitch.nix index dbb536e9a769..3f279037df2e 100644 --- a/nixos/tests/opensnitch.nix +++ b/nixos/tests/opensnitch.nix @@ -1,85 +1,83 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - monitorMethods = [ - "ebpf" - "proc" - "ftrace" - "audit" - ]; - in - { - name = "opensnitch"; +{ pkgs, lib, ... }: +let + monitorMethods = [ + "ebpf" + "proc" + "ftrace" + "audit" + ]; +in +{ + name = "opensnitch"; - meta = with pkgs.lib.maintainers; { - maintainers = [ onny ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ onny ]; + }; - nodes = - { - server = { - networking.firewall.allowedTCPPorts = [ 80 ]; - services.caddy = { - enable = true; - virtualHosts."localhost".extraConfig = '' - respond "Hello, world!" - ''; - }; + nodes = + { + server = { + networking.firewall.allowedTCPPorts = [ 80 ]; + services.caddy = { + enable = true; + virtualHosts."localhost".extraConfig = '' + respond "Hello, world!" + ''; }; - } - // (lib.listToAttrs ( - map ( - m: - lib.nameValuePair "client_blocked_${m}" { - services.opensnitch = { - enable = true; - settings.DefaultAction = "deny"; - settings.ProcMonitorMethod = m; - }; - } - ) monitorMethods - )) - // (lib.listToAttrs ( - map ( - m: - lib.nameValuePair "client_allowed_${m}" { - services.opensnitch = { - enable = true; - settings.DefaultAction = "deny"; - settings.ProcMonitorMethod = m; - rules = { - curl = { - name = "curl"; - enabled = true; - action = "allow"; - duration = "always"; - operator = { - type = "simple"; - sensitive = false; - operand = "process.path"; - data = "${pkgs.curl}/bin/curl"; - }; + }; + } + // (lib.listToAttrs ( + map ( + m: + lib.nameValuePair "client_blocked_${m}" { + services.opensnitch = { + enable = true; + settings.DefaultAction = "deny"; + settings.ProcMonitorMethod = m; + }; + } + ) monitorMethods + )) + // (lib.listToAttrs ( + map ( + m: + lib.nameValuePair "client_allowed_${m}" { + services.opensnitch = { + enable = true; + settings.DefaultAction = "deny"; + settings.ProcMonitorMethod = m; + rules = { + curl = { + name = "curl"; + enabled = true; + action = "allow"; + duration = "always"; + operator = { + type = "simple"; + sensitive = false; + operand = "process.path"; + data = "${pkgs.curl}/bin/curl"; }; }; }; - } - ) monitorMethods - )); + }; + } + ) monitorMethods + )); - testScript = - '' - start_all() - server.wait_for_unit("caddy.service") - server.wait_for_open_port(80) - '' - + lib.concatLines ( - map (m: '' - client_blocked_${m}.wait_for_unit("opensnitchd.service") - client_blocked_${m}.fail("curl http://server") + testScript = + '' + start_all() + server.wait_for_unit("caddy.service") + server.wait_for_open_port(80) + '' + + lib.concatLines ( + map (m: '' + client_blocked_${m}.wait_for_unit("opensnitchd.service") + client_blocked_${m}.fail("curl http://server") - client_allowed_${m}.wait_for_unit("opensnitchd.service") - client_allowed_${m}.succeed("curl http://server") - '') monitorMethods - ); - } -) + client_allowed_${m}.wait_for_unit("opensnitchd.service") + client_allowed_${m}.succeed("curl http://server") + '') monitorMethods + ); +} diff --git a/nixos/tests/openssh.nix b/nixos/tests/openssh.nix index d7ddf478d91a..5f2b3e7969d1 100644 --- a/nixos/tests/openssh.nix +++ b/nixos/tests/openssh.nix @@ -1,359 +1,357 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - inherit (import ./ssh-keys.nix pkgs) - snakeOilPrivateKey - snakeOilPublicKey - snakeOilEd25519PrivateKey - snakeOilEd25519PublicKey - ; - in - { - name = "openssh"; - meta = with pkgs.lib.maintainers; { - maintainers = [ aszlig ]; - }; +let + inherit (import ./ssh-keys.nix pkgs) + snakeOilPrivateKey + snakeOilPublicKey + snakeOilEd25519PrivateKey + snakeOilEd25519PublicKey + ; +in +{ + name = "openssh"; + meta = with pkgs.lib.maintainers; { + maintainers = [ aszlig ]; + }; - nodes = { + nodes = { - server = - { ... }: + server = + { ... }: - { - services.openssh.enable = true; - security.pam.services.sshd.limits = [ - { - domain = "*"; - item = "memlock"; - type = "-"; - value = 1024; - } - ]; - users.users.root.openssh.authorizedKeys.keys = [ - snakeOilPublicKey - ]; - }; - - server-allowed-users = - { ... }: - - { - services.openssh = { - enable = true; - settings.AllowUsers = [ - "alice" - "bob" - ]; - }; - users.groups = { - alice = { }; - bob = { }; - carol = { }; - }; - users.users = { - alice = { - isNormalUser = true; - group = "alice"; - openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; - bob = { - isNormalUser = true; - group = "bob"; - openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; - carol = { - isNormalUser = true; - group = "carol"; - openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; - }; - }; - - server-lazy = - { ... }: - - { - services.openssh = { - enable = true; - startWhenNeeded = true; - }; - security.pam.services.sshd.limits = [ - { - domain = "*"; - item = "memlock"; - type = "-"; - value = 1024; - } - ]; - users.users.root.openssh.authorizedKeys.keys = [ - snakeOilPublicKey - ]; - }; - - server-lazy-socket = { - virtualisation.vlans = [ - 1 - 2 + { + services.openssh.enable = true; + security.pam.services.sshd.limits = [ + { + domain = "*"; + item = "memlock"; + type = "-"; + value = 1024; + } ]; + users.users.root.openssh.authorizedKeys.keys = [ + snakeOilPublicKey + ]; + }; + + server-allowed-users = + { ... }: + + { + services.openssh = { + enable = true; + settings.AllowUsers = [ + "alice" + "bob" + ]; + }; + users.groups = { + alice = { }; + bob = { }; + carol = { }; + }; + users.users = { + alice = { + isNormalUser = true; + group = "alice"; + openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + }; + bob = { + isNormalUser = true; + group = "bob"; + openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + }; + carol = { + isNormalUser = true; + group = "carol"; + openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + }; + }; + }; + + server-lazy = + { ... }: + + { services.openssh = { enable = true; startWhenNeeded = true; - ports = [ 2222 ]; - listenAddresses = [ { addr = "0.0.0.0"; } ]; + }; + security.pam.services.sshd.limits = [ + { + domain = "*"; + item = "memlock"; + type = "-"; + value = 1024; + } + ]; + users.users.root.openssh.authorizedKeys.keys = [ + snakeOilPublicKey + ]; + }; + + server-lazy-socket = { + virtualisation.vlans = [ + 1 + 2 + ]; + services.openssh = { + enable = true; + startWhenNeeded = true; + ports = [ 2222 ]; + listenAddresses = [ { addr = "0.0.0.0"; } ]; + }; + users.users.root.openssh.authorizedKeys.keys = [ + snakeOilPublicKey + ]; + }; + + server-localhost-only = + { ... }: + + { + services.openssh = { + enable = true; + listenAddresses = [ + { + addr = "127.0.0.1"; + port = 22; + } + ]; + }; + }; + + server-localhost-only-lazy = + { ... }: + + { + services.openssh = { + enable = true; + startWhenNeeded = true; + listenAddresses = [ + { + addr = "127.0.0.1"; + port = 22; + } + ]; + }; + }; + + server-match-rule = + { ... }: + + { + services.openssh = { + enable = true; + listenAddresses = [ + { + addr = "127.0.0.1"; + port = 22; + } + { + addr = "[::]"; + port = 22; + } + ]; + extraConfig = '' + # Combined test for two (predictable) Match criterias + Match LocalAddress 127.0.0.1 LocalPort 22 + PermitRootLogin yes + + # Separate tests for Match criterias + Match User root + PermitRootLogin yes + Match Group root + PermitRootLogin yes + Match Host nohost.example + PermitRootLogin yes + Match LocalAddress 127.0.0.1 + PermitRootLogin yes + Match LocalPort 22 + PermitRootLogin yes + Match RDomain nohost.example + PermitRootLogin yes + Match Address 127.0.0.1 + PermitRootLogin yes + ''; + }; + }; + + server-no-openssl = + { ... }: + { + services.openssh = { + enable = true; + package = pkgs.opensshPackages.openssh.override { + linkOpenssl = false; + }; + hostKeys = [ + { + type = "ed25519"; + path = "/etc/ssh/ssh_host_ed25519_key"; + } + ]; + settings = { + # Since this test is against an OpenSSH-without-OpenSSL, + # we have to override NixOS's defaults ciphers (which require OpenSSL) + # and instead set these to null, which will mean OpenSSH uses its defaults. + # Expectedly, OpenSSH's defaults don't require OpenSSL when it's compiled + # without OpenSSL. + Ciphers = null; + KexAlgorithms = null; + Macs = null; + }; + }; + users.users.root.openssh.authorizedKeys.keys = [ + snakeOilEd25519PublicKey + ]; + }; + + server-no-pam = + { pkgs, ... }: + { + services.openssh = { + enable = true; + package = pkgs.opensshPackages.openssh.override { + withPAM = false; + }; + settings = { + UsePAM = false; + }; }; users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; }; - server-localhost-only = - { ... }: + client = + { ... }: + { + virtualisation.vlans = [ + 1 + 2 + ]; + }; - { - services.openssh = { - enable = true; - listenAddresses = [ - { - addr = "127.0.0.1"; - port = 22; - } - ]; - }; - }; + }; - server-localhost-only-lazy = - { ... }: + testScript = '' + start_all() - { - services.openssh = { - enable = true; - startWhenNeeded = true; - listenAddresses = [ - { - addr = "127.0.0.1"; - port = 22; - } - ]; - }; - }; + server.wait_for_unit("sshd", timeout=30) + server_allowed_users.wait_for_unit("sshd", timeout=30) + server_localhost_only.wait_for_unit("sshd", timeout=30) + server_match_rule.wait_for_unit("sshd", timeout=30) + server_no_openssl.wait_for_unit("sshd", timeout=30) + server_no_pam.wait_for_unit("sshd", timeout=30) - server-match-rule = - { ... }: + server_lazy.wait_for_unit("sshd.socket", timeout=30) + server_localhost_only_lazy.wait_for_unit("sshd.socket", timeout=30) + server_lazy_socket.wait_for_unit("sshd.socket", timeout=30) - { - services.openssh = { - enable = true; - listenAddresses = [ - { - addr = "127.0.0.1"; - port = 22; - } - { - addr = "[::]"; - port = 22; - } - ]; - extraConfig = '' - # Combined test for two (predictable) Match criterias - Match LocalAddress 127.0.0.1 LocalPort 22 - PermitRootLogin yes + with subtest("manual-authkey"): + client.succeed( + '${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""' + ) + public_key = client.succeed( + "${pkgs.openssh}/bin/ssh-keygen -y -f /root/.ssh/id_ed25519" + ) + public_key = public_key.strip() + client.succeed("chmod 600 /root/.ssh/id_ed25519") - # Separate tests for Match criterias - Match User root - PermitRootLogin yes - Match Group root - PermitRootLogin yes - Match Host nohost.example - PermitRootLogin yes - Match LocalAddress 127.0.0.1 - PermitRootLogin yes - Match LocalPort 22 - PermitRootLogin yes - Match RDomain nohost.example - PermitRootLogin yes - Match Address 127.0.0.1 - PermitRootLogin yes - ''; - }; - }; + server.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key)) + server_lazy.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key)) - server-no-openssl = - { ... }: - { - services.openssh = { - enable = true; - package = pkgs.opensshPackages.openssh.override { - linkOpenssl = false; - }; - hostKeys = [ - { - type = "ed25519"; - path = "/etc/ssh/ssh_host_ed25519_key"; - } - ]; - settings = { - # Since this test is against an OpenSSH-without-OpenSSL, - # we have to override NixOS's defaults ciphers (which require OpenSSL) - # and instead set these to null, which will mean OpenSSH uses its defaults. - # Expectedly, OpenSSH's defaults don't require OpenSSL when it's compiled - # without OpenSSL. - Ciphers = null; - KexAlgorithms = null; - Macs = null; - }; - }; - users.users.root.openssh.authorizedKeys.keys = [ - snakeOilEd25519PublicKey - ]; - }; + client.wait_for_unit("network.target") + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'echo hello world' >&2", + timeout=30 + ) + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'ulimit -l' | grep 1024", + timeout=30 + ) - server-no-pam = - { pkgs, ... }: - { - services.openssh = { - enable = true; - package = pkgs.opensshPackages.openssh.override { - withPAM = false; - }; - settings = { - UsePAM = false; - }; - }; - users.users.root.openssh.authorizedKeys.keys = [ - snakeOilPublicKey - ]; - }; + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'echo hello world' >&2", + timeout=30 + ) + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'ulimit -l' | grep 1024", + timeout=30 + ) - client = - { ... }: - { - virtualisation.vlans = [ - 1 - 2 - ]; - }; + with subtest("socket activation on a non-standard port"): + client.succeed( + "cat ${snakeOilPrivateKey} > privkey.snakeoil" + ) + client.succeed("chmod 600 privkey.snakeoil") + # The final segment in this IP is allocated according to the alphabetical order of machines in this test. + client.succeed( + "ssh -p 2222 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.2.5 true", + timeout=30 + ) - }; + with subtest("configured-authkey"): + client.succeed( + "cat ${snakeOilPrivateKey} > privkey.snakeoil" + ) + client.succeed("chmod 600 privkey.snakeoil") + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server true", + timeout=30 + ) + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-lazy true", + timeout=30 + ) - testScript = '' - start_all() + with subtest("localhost-only"): + server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'") + server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'") - server.wait_for_unit("sshd", timeout=30) - server_allowed_users.wait_for_unit("sshd", timeout=30) - server_localhost_only.wait_for_unit("sshd", timeout=30) - server_match_rule.wait_for_unit("sshd", timeout=30) - server_no_openssl.wait_for_unit("sshd", timeout=30) - server_no_pam.wait_for_unit("sshd", timeout=30) + with subtest("match-rules"): + server_match_rule.succeed("ss -nlt | grep '127.0.0.1:22'") - server_lazy.wait_for_unit("sshd.socket", timeout=30) - server_localhost_only_lazy.wait_for_unit("sshd.socket", timeout=30) - server_lazy_socket.wait_for_unit("sshd.socket", timeout=30) + with subtest("allowed-users"): + client.succeed( + "cat ${snakeOilPrivateKey} > privkey.snakeoil" + ) + client.succeed("chmod 600 privkey.snakeoil") + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil alice@server-allowed-users true", + timeout=30 + ) + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil bob@server-allowed-users true", + timeout=30 + ) + client.fail( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil carol@server-allowed-users true", + timeout=30 + ) - with subtest("manual-authkey"): - client.succeed( - '${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""' - ) - public_key = client.succeed( - "${pkgs.openssh}/bin/ssh-keygen -y -f /root/.ssh/id_ed25519" - ) - public_key = public_key.strip() - client.succeed("chmod 600 /root/.ssh/id_ed25519") + with subtest("no-openssl"): + client.succeed( + "cat ${snakeOilEd25519PrivateKey} > privkey.snakeoil" + ) + client.succeed("chmod 600 privkey.snakeoil") + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-openssl true", + timeout=30 + ) - server.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key)) - server_lazy.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key)) + with subtest("no-pam"): + client.succeed( + "cat ${snakeOilPrivateKey} > privkey.snakeoil" + ) + client.succeed("chmod 600 privkey.snakeoil") + client.succeed( + "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-pam true", + timeout=30 + ) - client.wait_for_unit("network.target") - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'echo hello world' >&2", - timeout=30 - ) - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'ulimit -l' | grep 1024", - timeout=30 - ) - - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'echo hello world' >&2", - timeout=30 - ) - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'ulimit -l' | grep 1024", - timeout=30 - ) - - with subtest("socket activation on a non-standard port"): - client.succeed( - "cat ${snakeOilPrivateKey} > privkey.snakeoil" - ) - client.succeed("chmod 600 privkey.snakeoil") - # The final segment in this IP is allocated according to the alphabetical order of machines in this test. - client.succeed( - "ssh -p 2222 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.2.5 true", - timeout=30 - ) - - with subtest("configured-authkey"): - client.succeed( - "cat ${snakeOilPrivateKey} > privkey.snakeoil" - ) - client.succeed("chmod 600 privkey.snakeoil") - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server true", - timeout=30 - ) - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-lazy true", - timeout=30 - ) - - with subtest("localhost-only"): - server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'") - server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'") - - with subtest("match-rules"): - server_match_rule.succeed("ss -nlt | grep '127.0.0.1:22'") - - with subtest("allowed-users"): - client.succeed( - "cat ${snakeOilPrivateKey} > privkey.snakeoil" - ) - client.succeed("chmod 600 privkey.snakeoil") - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil alice@server-allowed-users true", - timeout=30 - ) - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil bob@server-allowed-users true", - timeout=30 - ) - client.fail( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil carol@server-allowed-users true", - timeout=30 - ) - - with subtest("no-openssl"): - client.succeed( - "cat ${snakeOilEd25519PrivateKey} > privkey.snakeoil" - ) - client.succeed("chmod 600 privkey.snakeoil") - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-openssl true", - timeout=30 - ) - - with subtest("no-pam"): - client.succeed( - "cat ${snakeOilPrivateKey} > privkey.snakeoil" - ) - client.succeed("chmod 600 privkey.snakeoil") - client.succeed( - "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-pam true", - timeout=30 - ) - - # None of the per-connection units should have failed. - server_lazy.fail("systemctl is-failed 'sshd@*.service'") - ''; - } -) + # None of the per-connection units should have failed. + server_lazy.fail("systemctl is-failed 'sshd@*.service'") + ''; +} diff --git a/nixos/tests/opentabletdriver.nix b/nixos/tests/opentabletdriver.nix index b7bdb16bf005..e97e3a5fe123 100644 --- a/nixos/tests/opentabletdriver.nix +++ b/nixos/tests/opentabletdriver.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - testUser = "alice"; - in - { - name = "opentabletdriver"; - meta = { - maintainers = with pkgs.lib.maintainers; [ thiagokokada ]; +{ pkgs, ... }: +let + testUser = "alice"; +in +{ + name = "opentabletdriver"; + meta = { + maintainers = with pkgs.lib.maintainers; [ thiagokokada ]; + }; + + nodes.machine = + { pkgs, ... }: + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; + test-support.displayManager.auto.user = testUser; + hardware.opentabletdriver.enable = true; }; - nodes.machine = - { pkgs, ... }: - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; - test-support.displayManager.auto.user = testUser; - hardware.opentabletdriver.enable = true; - }; + testScript = '' + machine.start() + machine.wait_for_x() - testScript = '' - machine.start() - machine.wait_for_x() + machine.wait_for_unit('graphical.target') + machine.wait_for_unit("opentabletdriver.service", "${testUser}") - machine.wait_for_unit('graphical.target') - machine.wait_for_unit("opentabletdriver.service", "${testUser}") - - machine.succeed("cat /etc/udev/rules.d/70-opentabletdriver.rules") - # Will fail if service is not running - # Needs to run as the same user that started the service - machine.succeed("su - ${testUser} -c 'otd detect'") - ''; - } -) + machine.succeed("cat /etc/udev/rules.d/70-opentabletdriver.rules") + # Will fail if service is not running + # Needs to run as the same user that started the service + machine.succeed("su - ${testUser} -c 'otd detect'") + ''; +} diff --git a/nixos/tests/opentelemetry-collector.nix b/nixos/tests/opentelemetry-collector.nix index 1d8d4ea815c4..df319e5c7df9 100644 --- a/nixos/tests/opentelemetry-collector.nix +++ b/nixos/tests/opentelemetry-collector.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - port = 4318; - in - { - name = "opentelemetry-collector"; - meta = with pkgs.lib.maintainers; { - maintainers = [ tylerjl ]; - }; +{ pkgs, ... }: +let + port = 4318; +in +{ + name = "opentelemetry-collector"; + meta = with pkgs.lib.maintainers; { + maintainers = [ tylerjl ]; + }; - nodes.machine = - { ... }: - { - networking.firewall.allowedTCPPorts = [ port ]; - services.opentelemetry-collector = { - enable = true; - settings = { - exporters.logging.verbosity = "detailed"; - receivers.otlp.protocols = { - http.endpoint = "0.0.0.0:${toString port}"; - }; - service = { - pipelines.logs = { - receivers = [ "otlp" ]; - exporters = [ "logging" ]; - }; + nodes.machine = + { ... }: + { + networking.firewall.allowedTCPPorts = [ port ]; + services.opentelemetry-collector = { + enable = true; + settings = { + exporters.logging.verbosity = "detailed"; + receivers.otlp.protocols = { + http.endpoint = "0.0.0.0:${toString port}"; + }; + service = { + pipelines.logs = { + receivers = [ "otlp" ]; + exporters = [ "logging" ]; }; }; }; - virtualisation.forwardPorts = [ - { - host.port = port; - guest.port = port; - } - ]; }; - - extraPythonPackages = p: [ - p.requests - p.types-requests - ]; - - # Send a log event through the OTLP pipeline and check for its - # presence in the collector logs. - testScript = # python - '' - import requests - import time - - from uuid import uuid4 - - flag = str(uuid4()) - - machine.wait_for_unit("opentelemetry-collector.service") - machine.wait_for_open_port(${toString port}) - - event = { - "resourceLogs": [ - { - "resource": {"attributes": []}, - "scopeLogs": [ - { - "logRecords": [ - { - "timeUnixNano": str(time.time_ns()), - "severityNumber": 9, - "severityText": "Info", - "name": "logTest", - "body": { - "stringValue": flag - }, - "attributes": [] - }, - ] - } - ] - } - ] + virtualisation.forwardPorts = [ + { + host.port = port; + guest.port = port; } + ]; + }; - response = requests.post("http://localhost:${toString port}/v1/logs", json=event) - assert response.status_code == 200 - assert flag in machine.execute("journalctl -u opentelemetry-collector")[-1] - ''; - } -) + extraPythonPackages = p: [ + p.requests + p.types-requests + ]; + + # Send a log event through the OTLP pipeline and check for its + # presence in the collector logs. + testScript = # python + '' + import requests + import time + + from uuid import uuid4 + + flag = str(uuid4()) + + machine.wait_for_unit("opentelemetry-collector.service") + machine.wait_for_open_port(${toString port}) + + event = { + "resourceLogs": [ + { + "resource": {"attributes": []}, + "scopeLogs": [ + { + "logRecords": [ + { + "timeUnixNano": str(time.time_ns()), + "severityNumber": 9, + "severityText": "Info", + "name": "logTest", + "body": { + "stringValue": flag + }, + "attributes": [] + }, + ] + } + ] + } + ] + } + + response = requests.post("http://localhost:${toString port}/v1/logs", json=event) + assert response.status_code == 200 + assert flag in machine.execute("journalctl -u opentelemetry-collector")[-1] + ''; +} diff --git a/nixos/tests/openvscode-server.nix b/nixos/tests/openvscode-server.nix index eaef459530ba..89d3817b2cf3 100644 --- a/nixos/tests/openvscode-server.nix +++ b/nixos/tests/openvscode-server.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "openvscode-server"; +{ pkgs, lib, ... }: +{ + name = "openvscode-server"; - nodes = { - machine = - { pkgs, ... }: - { - services.openvscode-server = { - enable = true; - withoutConnectionToken = true; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.openvscode-server = { + enable = true; + withoutConnectionToken = true; }; - }; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("openvscode-server.service") - machine.wait_for_open_port(3000) - machine.succeed("curl -k --fail http://localhost:3000", timeout=10) - ''; + testScript = '' + start_all() + machine.wait_for_unit("openvscode-server.service") + machine.wait_for_open_port(3000) + machine.succeed("curl -k --fail http://localhost:3000", timeout=10) + ''; - meta.maintainers = [ lib.maintainers.drupol ]; - } -) + meta.maintainers = [ lib.maintainers.drupol ]; +} diff --git a/nixos/tests/orangefs.nix b/nixos/tests/orangefs.nix index d388826dbf62..fe9335f74981 100644 --- a/nixos/tests/orangefs.nix +++ b/nixos/tests/orangefs.nix @@ -1,91 +1,89 @@ -import ./make-test-python.nix ( - { ... }: +{ ... }: - let - server = - { pkgs, ... }: - { - networking.firewall.allowedTCPPorts = [ 3334 ]; - boot.initrd.postDeviceCommands = '' - ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb - ''; +let + server = + { pkgs, ... }: + { + networking.firewall.allowedTCPPorts = [ 3334 ]; + boot.initrd.postDeviceCommands = '' + ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb + ''; - virtualisation.emptyDiskImages = [ 4096 ]; + virtualisation.emptyDiskImages = [ 4096 ]; - virtualisation.fileSystems = { - "/data" = { - device = "/dev/disk/by-label/data"; - fsType = "ext4"; - }; - }; - - services.orangefs.server = { - enable = true; - dataStorageSpace = "/data/storage"; - metadataStorageSpace = "/data/meta"; - servers = { - server1 = "tcp://server1:3334"; - server2 = "tcp://server2:3334"; - }; + virtualisation.fileSystems = { + "/data" = { + device = "/dev/disk/by-label/data"; + fsType = "ext4"; }; }; - client = - { lib, ... }: - { - networking.firewall.enable = true; - - services.orangefs.client = { - enable = true; - fileSystems = [ - { - target = "tcp://server1:3334/orangefs"; - mountPoint = "/orangefs"; - } - ]; + services.orangefs.server = { + enable = true; + dataStorageSpace = "/data/storage"; + metadataStorageSpace = "/data/meta"; + servers = { + server1 = "tcp://server1:3334"; + server2 = "tcp://server2:3334"; }; }; - - in - { - name = "orangefs"; - - nodes = { - server1 = server; - server2 = server; - - client1 = client; - client2 = client; }; - testScript = '' - # format storage - for server in server1, server2: - server.start() - server.wait_for_unit("multi-user.target") - server.succeed("mkdir -p /data/storage /data/meta") - server.succeed("chown orangefs:orangefs /data/storage /data/meta") - server.succeed("chmod 0770 /data/storage /data/meta") - server.succeed( - "sudo -g orangefs -u orangefs pvfs2-server -f /etc/orangefs/server.conf" - ) + client = + { lib, ... }: + { + networking.firewall.enable = true; - # start services after storage is formatted on all machines - for server in server1, server2: - server.succeed("systemctl start orangefs-server.service") + services.orangefs.client = { + enable = true; + fileSystems = [ + { + target = "tcp://server1:3334/orangefs"; + mountPoint = "/orangefs"; + } + ]; + }; + }; - with subtest("clients can reach and mount the FS"): - for client in client1, client2: - client.start() - client.wait_for_unit("orangefs-client.service") - # Both servers need to be reachable - client.succeed("pvfs2-check-server -h server1 -f orangefs -n tcp -p 3334") - client.succeed("pvfs2-check-server -h server2 -f orangefs -n tcp -p 3334") - client.wait_for_unit("orangefs.mount") +in +{ + name = "orangefs"; - with subtest("R/W test between clients"): - client1.succeed("echo test > /orangefs/file1") - client2.succeed("grep test /orangefs/file1") - ''; - } -) + nodes = { + server1 = server; + server2 = server; + + client1 = client; + client2 = client; + }; + + testScript = '' + # format storage + for server in server1, server2: + server.start() + server.wait_for_unit("multi-user.target") + server.succeed("mkdir -p /data/storage /data/meta") + server.succeed("chown orangefs:orangefs /data/storage /data/meta") + server.succeed("chmod 0770 /data/storage /data/meta") + server.succeed( + "sudo -g orangefs -u orangefs pvfs2-server -f /etc/orangefs/server.conf" + ) + + # start services after storage is formatted on all machines + for server in server1, server2: + server.succeed("systemctl start orangefs-server.service") + + with subtest("clients can reach and mount the FS"): + for client in client1, client2: + client.start() + client.wait_for_unit("orangefs-client.service") + # Both servers need to be reachable + client.succeed("pvfs2-check-server -h server1 -f orangefs -n tcp -p 3334") + client.succeed("pvfs2-check-server -h server2 -f orangefs -n tcp -p 3334") + client.wait_for_unit("orangefs.mount") + + with subtest("R/W test between clients"): + client1.succeed("echo test > /orangefs/file1") + client2.succeed("grep test /orangefs/file1") + ''; +} diff --git a/nixos/tests/osrm-backend.nix b/nixos/tests/osrm-backend.nix index 304450430888..ad9485d6a944 100644 --- a/nixos/tests/osrm-backend.nix +++ b/nixos/tests/osrm-backend.nix @@ -1,66 +1,64 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - port = 5000; - in - { - name = "osrm-backend"; - meta.maintainers = [ lib.maintainers.erictapen ]; +{ pkgs, lib, ... }: +let + port = 5000; +in +{ + name = "osrm-backend"; + meta.maintainers = [ lib.maintainers.erictapen ]; - nodes.machine = - { config, pkgs, ... }: - { + nodes.machine = + { config, pkgs, ... }: + { - services.osrm = { - enable = true; - inherit port; - dataFile = - let - filename = "monaco"; - osrm-data = pkgs.stdenv.mkDerivation { - name = "osrm-data"; + services.osrm = { + enable = true; + inherit port; + dataFile = + let + filename = "monaco"; + osrm-data = pkgs.stdenv.mkDerivation { + name = "osrm-data"; - buildInputs = [ pkgs.osrm-backend ]; + buildInputs = [ pkgs.osrm-backend ]; - # This is a pbf file of monaco, downloaded at 2019-01-04 from - # http://download.geofabrik.de/europe/monaco-latest.osm.pbf - # as apparently no provider of OSM files guarantees immutability, - # this is hosted as a gist on GitHub. - src = pkgs.fetchgit { - url = "https://gist.github.com/erictapen/01e39f73a6c856eac53ba809a94cdb83"; - rev = "9b1ff0f24deb40e5cf7df51f843dbe860637b8ce"; - sha256 = "1scqhmrfnpwsy5i2a9jpggqnvfgj4hv9p4qyvc79321pzkbv59nx"; - }; - - buildCommand = '' - cp $src/${filename}.osm.pbf . - ${pkgs.osrm-backend}/bin/osrm-extract -p ${pkgs.osrm-backend}/share/osrm/profiles/car.lua ${filename}.osm.pbf - ${pkgs.osrm-backend}/bin/osrm-partition ${filename}.osrm - ${pkgs.osrm-backend}/bin/osrm-customize ${filename}.osrm - mkdir -p $out - cp ${filename}* $out/ - ''; + # This is a pbf file of monaco, downloaded at 2019-01-04 from + # http://download.geofabrik.de/europe/monaco-latest.osm.pbf + # as apparently no provider of OSM files guarantees immutability, + # this is hosted as a gist on GitHub. + src = pkgs.fetchgit { + url = "https://gist.github.com/erictapen/01e39f73a6c856eac53ba809a94cdb83"; + rev = "9b1ff0f24deb40e5cf7df51f843dbe860637b8ce"; + sha256 = "1scqhmrfnpwsy5i2a9jpggqnvfgj4hv9p4qyvc79321pzkbv59nx"; }; - in - "${osrm-data}/${filename}.osrm"; - }; - environment.systemPackages = [ pkgs.jq ]; + buildCommand = '' + cp $src/${filename}.osm.pbf . + ${pkgs.osrm-backend}/bin/osrm-extract -p ${pkgs.osrm-backend}/share/osrm/profiles/car.lua ${filename}.osm.pbf + ${pkgs.osrm-backend}/bin/osrm-partition ${filename}.osrm + ${pkgs.osrm-backend}/bin/osrm-customize ${filename}.osrm + mkdir -p $out + cp ${filename}* $out/ + ''; + }; + in + "${osrm-data}/${filename}.osrm"; }; - testScript = - let - query = "http://localhost:${toString port}/route/v1/driving/7.41720,43.73304;7.42463,43.73886?steps=true"; - in - '' - machine.wait_for_unit("osrm.service") - machine.wait_for_open_port(${toString port}) - assert "Boulevard Rainier III" in machine.succeed( - "curl --fail --silent '${query}' | jq .waypoints[0].name" - ) - assert "Avenue de la Costa" in machine.succeed( - "curl --fail --silent '${query}' | jq .waypoints[1].name" - ) - ''; - } -) + environment.systemPackages = [ pkgs.jq ]; + }; + + testScript = + let + query = "http://localhost:${toString port}/route/v1/driving/7.41720,43.73304;7.42463,43.73886?steps=true"; + in + '' + machine.wait_for_unit("osrm.service") + machine.wait_for_open_port(${toString port}) + assert "Boulevard Rainier III" in machine.succeed( + "curl --fail --silent '${query}' | jq .waypoints[0].name" + ) + assert "Avenue de la Costa" in machine.succeed( + "curl --fail --silent '${query}' | jq .waypoints[1].name" + ) + ''; +} diff --git a/nixos/tests/outline.nix b/nixos/tests/outline.nix index 52f46f533878..4beb58a3c408 100644 --- a/nixos/tests/outline.nix +++ b/nixos/tests/outline.nix @@ -1,57 +1,55 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - accessKey = "BKIKJAA5BMMU2RHO6IBB"; - secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; - secretKeyFile = pkgs.writeText "outline-secret-key" '' - ${secretKey} - ''; - rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' - MINIO_ROOT_USER=${accessKey} - MINIO_ROOT_PASSWORD=${secretKey} - ''; - in - { - name = "outline"; +{ pkgs, lib, ... }: +let + accessKey = "BKIKJAA5BMMU2RHO6IBB"; + secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; + secretKeyFile = pkgs.writeText "outline-secret-key" '' + ${secretKey} + ''; + rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' + MINIO_ROOT_USER=${accessKey} + MINIO_ROOT_PASSWORD=${secretKey} + ''; +in +{ + name = "outline"; - meta.maintainers = lib.teams.cyberus.members; + meta.maintainers = lib.teams.cyberus.members; - nodes = { - outline = - { pkgs, config, ... }: - { - nixpkgs.config.allowUnfree = true; - environment.systemPackages = [ pkgs.minio-client ]; - services.outline = { - enable = true; - forceHttps = false; - storage = { - inherit accessKey secretKeyFile; - uploadBucketUrl = "http://localhost:9000"; - uploadBucketName = "outline"; - region = config.services.minio.region; - }; - }; - services.minio = { - enable = true; - inherit rootCredentialsFile; + nodes = { + outline = + { pkgs, config, ... }: + { + nixpkgs.config.allowUnfree = true; + environment.systemPackages = [ pkgs.minio-client ]; + services.outline = { + enable = true; + forceHttps = false; + storage = { + inherit accessKey secretKeyFile; + uploadBucketUrl = "http://localhost:9000"; + uploadBucketName = "outline"; + region = config.services.minio.region; }; }; - }; + services.minio = { + enable = true; + inherit rootCredentialsFile; + }; + }; + }; - testScript = '' - machine.wait_for_unit("minio.service") - machine.wait_for_open_port(9000) + testScript = '' + machine.wait_for_unit("minio.service") + machine.wait_for_open_port(9000) - # Create a test bucket on the server - machine.succeed( - "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4" - ) - machine.succeed("mc mb minio/outline") + # Create a test bucket on the server + machine.succeed( + "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4" + ) + machine.succeed("mc mb minio/outline") - outline.wait_for_unit("outline.service") - outline.wait_for_open_port(3000) - outline.succeed("curl --fail http://localhost:3000/") - ''; - } -) + outline.wait_for_unit("outline.service") + outline.wait_for_open_port(3000) + outline.succeed("curl --fail http://localhost:3000/") + ''; +} diff --git a/nixos/tests/overlayfs.nix b/nixos/tests/overlayfs.nix index cd4843229943..93300ed8e812 100644 --- a/nixos/tests/overlayfs.nix +++ b/nixos/tests/overlayfs.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "overlayfs"; - meta.maintainers = with pkgs.lib.maintainers; [ bachp ]; +{ pkgs, ... }: +{ + name = "overlayfs"; + meta.maintainers = with pkgs.lib.maintainers; [ bachp ]; - nodes.machine = - { pkgs, ... }: - { - virtualisation.emptyDiskImages = [ 512 ]; - networking.hostId = "deadbeef"; - environment.systemPackages = with pkgs; [ parted ]; - }; + nodes.machine = + { pkgs, ... }: + { + virtualisation.emptyDiskImages = [ 512 ]; + networking.hostId = "deadbeef"; + environment.systemPackages = with pkgs; [ parted ]; + }; - testScript = '' - machine.succeed("ls /dev") + testScript = '' + machine.succeed("ls /dev") - machine.succeed("mkdir -p /tmp/mnt") + machine.succeed("mkdir -p /tmp/mnt") - # Test ext4 + overlayfs - machine.succeed( - 'mkfs.ext4 -F -L overlay-ext4 /dev/vdb', - 'mount -t ext4 /dev/vdb /tmp/mnt', - 'mkdir -p /tmp/mnt/upper /tmp/mnt/lower /tmp/mnt/work /tmp/mnt/merged', - # Setup some existing files - 'echo Replace > /tmp/mnt/lower/replace.txt', - 'echo Append > /tmp/mnt/lower/append.txt', - 'echo Overwrite > /tmp/mnt/lower/overwrite.txt', - 'mount -t overlay overlay -o lowerdir=/tmp/mnt/lower,upperdir=/tmp/mnt/upper,workdir=/tmp/mnt/work /tmp/mnt/merged', - # Test new - 'echo New > /tmp/mnt/merged/new.txt', - '[[ "$(cat /tmp/mnt/merged/new.txt)" == New ]]', - # Test replace - '[[ "$(cat /tmp/mnt/merged/replace.txt)" == Replace ]]', - 'echo Replaced > /tmp/mnt/merged/replace-tmp.txt', - 'mv /tmp/mnt/merged/replace-tmp.txt /tmp/mnt/merged/replace.txt', - '[[ "$(cat /tmp/mnt/merged/replace.txt)" == Replaced ]]', - # Overwrite - '[[ "$(cat /tmp/mnt/merged/overwrite.txt)" == Overwrite ]]', - 'echo Overwritten > /tmp/mnt/merged/overwrite.txt', - '[[ "$(cat /tmp/mnt/merged/overwrite.txt)" == Overwritten ]]', - # Test append - '[[ "$(cat /tmp/mnt/merged/append.txt)" == Append ]]', - 'echo ed >> /tmp/mnt/merged/append.txt', - '[[ "$(cat /tmp/mnt/merged/append.txt)" == "Append\ned" ]]', - 'umount /tmp/mnt/merged', - 'umount /tmp/mnt', - 'udevadm settle', - ) - ''; - } -) + # Test ext4 + overlayfs + machine.succeed( + 'mkfs.ext4 -F -L overlay-ext4 /dev/vdb', + 'mount -t ext4 /dev/vdb /tmp/mnt', + 'mkdir -p /tmp/mnt/upper /tmp/mnt/lower /tmp/mnt/work /tmp/mnt/merged', + # Setup some existing files + 'echo Replace > /tmp/mnt/lower/replace.txt', + 'echo Append > /tmp/mnt/lower/append.txt', + 'echo Overwrite > /tmp/mnt/lower/overwrite.txt', + 'mount -t overlay overlay -o lowerdir=/tmp/mnt/lower,upperdir=/tmp/mnt/upper,workdir=/tmp/mnt/work /tmp/mnt/merged', + # Test new + 'echo New > /tmp/mnt/merged/new.txt', + '[[ "$(cat /tmp/mnt/merged/new.txt)" == New ]]', + # Test replace + '[[ "$(cat /tmp/mnt/merged/replace.txt)" == Replace ]]', + 'echo Replaced > /tmp/mnt/merged/replace-tmp.txt', + 'mv /tmp/mnt/merged/replace-tmp.txt /tmp/mnt/merged/replace.txt', + '[[ "$(cat /tmp/mnt/merged/replace.txt)" == Replaced ]]', + # Overwrite + '[[ "$(cat /tmp/mnt/merged/overwrite.txt)" == Overwrite ]]', + 'echo Overwritten > /tmp/mnt/merged/overwrite.txt', + '[[ "$(cat /tmp/mnt/merged/overwrite.txt)" == Overwritten ]]', + # Test append + '[[ "$(cat /tmp/mnt/merged/append.txt)" == Append ]]', + 'echo ed >> /tmp/mnt/merged/append.txt', + '[[ "$(cat /tmp/mnt/merged/append.txt)" == "Append\ned" ]]', + 'umount /tmp/mnt/merged', + 'umount /tmp/mnt', + 'udevadm settle', + ) + ''; +} diff --git a/nixos/tests/owncast.nix b/nixos/tests/owncast.nix index a081cd5b544d..14fa2a737a71 100644 --- a/nixos/tests/owncast.nix +++ b/nixos/tests/owncast.nix @@ -1,75 +1,73 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "owncast"; - meta = with pkgs.lib.maintainers; { - maintainers = [ MayNiklas ]; - }; +{ pkgs, ... }: +{ + name = "owncast"; + meta = with pkgs.lib.maintainers; { + maintainers = [ MayNiklas ]; + }; - nodes = { - client = - { pkgs, ... }: - with pkgs.lib; - { - networking = { - dhcpcd.enable = false; - interfaces.eth1.ipv6.addresses = mkOverride 0 [ - { - address = "fd00::2"; - prefixLength = 64; - } - ]; - interfaces.eth1.ipv4.addresses = mkOverride 0 [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; + nodes = { + client = + { pkgs, ... }: + with pkgs.lib; + { + networking = { + dhcpcd.enable = false; + interfaces.eth1.ipv6.addresses = mkOverride 0 [ + { + address = "fd00::2"; + prefixLength = 64; + } + ]; + interfaces.eth1.ipv4.addresses = mkOverride 0 [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; }; - server = - { pkgs, ... }: - with pkgs.lib; - { - networking = { - dhcpcd.enable = false; - useNetworkd = true; - useDHCP = false; - interfaces.eth1.ipv6.addresses = mkOverride 0 [ - { - address = "fd00::1"; - prefixLength = 64; - } - ]; - interfaces.eth1.ipv4.addresses = mkOverride 0 [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; + }; + server = + { pkgs, ... }: + with pkgs.lib; + { + networking = { + dhcpcd.enable = false; + useNetworkd = true; + useDHCP = false; + interfaces.eth1.ipv6.addresses = mkOverride 0 [ + { + address = "fd00::1"; + prefixLength = 64; + } + ]; + interfaces.eth1.ipv4.addresses = mkOverride 0 [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; - firewall.allowedTCPPorts = [ 8080 ]; - }; - - services.owncast = { - enable = true; - listen = "0.0.0.0"; - }; + firewall.allowedTCPPorts = [ 8080 ]; }; - }; - testScript = '' - start_all() + services.owncast = { + enable = true; + listen = "0.0.0.0"; + }; + }; + }; - client.systemctl("start network-online.target") - server.systemctl("start network-online.target") - client.wait_for_unit("network-online.target") - server.wait_for_unit("network-online.target") - server.wait_for_unit("owncast.service") - server.wait_until_succeeds("ss -ntl | grep -q 8080") + testScript = '' + start_all() - client.succeed("curl http://192.168.1.1:8080/api/status") - client.succeed("curl http://[fd00::1]:8080/api/status") - ''; - } -) + client.systemctl("start network-online.target") + server.systemctl("start network-online.target") + client.wait_for_unit("network-online.target") + server.wait_for_unit("network-online.target") + server.wait_for_unit("owncast.service") + server.wait_until_succeeds("ss -ntl | grep -q 8080") + + client.succeed("curl http://192.168.1.1:8080/api/status") + client.succeed("curl http://[fd00::1]:8080/api/status") + ''; +} diff --git a/nixos/tests/pacemaker.nix b/nixos/tests/pacemaker.nix index 6b711ad94552..826c4be7df14 100644 --- a/nixos/tests/pacemaker.nix +++ b/nixos/tests/pacemaker.nix @@ -1,118 +1,116 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - rec { - name = "pacemaker"; - meta = with pkgs.lib.maintainers; { - maintainers = [ astro ]; +{ pkgs, lib, ... }: +rec { + name = "pacemaker"; + meta = with pkgs.lib.maintainers; { + maintainers = [ astro ]; + }; + + nodes = + let + node = i: { + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.${toString i}"; + prefixLength = 24; + } + ]; + + services.corosync = { + enable = true; + clusterName = "zentralwerk-network"; + nodelist = lib.imap (i: name: { + nodeid = i; + inherit name; + ring_addrs = [ + (builtins.head nodes.${name}.networking.interfaces.eth1.ipv4.addresses).address + ]; + }) (builtins.attrNames nodes); + }; + environment.etc."corosync/authkey" = { + source = + builtins.toFile "authkey" + # minimum length: 128 bytes + "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest"; + mode = "0400"; + }; + + services.pacemaker.enable = true; + + # used for pacemaker resource + systemd.services.ha-cat = { + description = "Highly available netcat"; + serviceConfig.ExecStart = "${pkgs.netcat}/bin/nc -l discard"; + }; + }; + in + { + node1 = node 1; + node2 = node 2; + node3 = node 3; }; - nodes = - let - node = i: { - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.${toString i}"; - prefixLength = 24; - } - ]; - - services.corosync = { - enable = true; - clusterName = "zentralwerk-network"; - nodelist = lib.imap (i: name: { - nodeid = i; - inherit name; - ring_addrs = [ - (builtins.head nodes.${name}.networking.interfaces.eth1.ipv4.addresses).address - ]; - }) (builtins.attrNames nodes); - }; - environment.etc."corosync/authkey" = { - source = - builtins.toFile "authkey" - # minimum length: 128 bytes - "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest"; - mode = "0400"; - }; - - services.pacemaker.enable = true; - - # used for pacemaker resource - systemd.services.ha-cat = { - description = "Highly available netcat"; - serviceConfig.ExecStart = "${pkgs.netcat}/bin/nc -l discard"; - }; - }; - in - { - node1 = node 1; - node2 = node 2; - node3 = node 3; - }; - - # sets up pacemaker with resources configuration, then crashes a - # node and waits for service restart on another node - testScript = - let - resources = builtins.toFile "cib-resources.xml" '' - - - - - - - - - - ''; - in - '' - import re - import time - - start_all() - - ${lib.concatMapStrings (node: '' - ${node}.wait_until_succeeds("corosync-quorumtool") - ${node}.wait_for_unit("pacemaker.service") - '') (builtins.attrNames nodes)} - - # No STONITH device - node1.succeed("crm_attribute -t crm_config -n stonith-enabled -v false") - # Configure the cat resource - node1.succeed("cibadmin --replace --scope resources --xml-file ${resources}") - - # wait until the service is started - while True: - output = node1.succeed("crm_resource -r cat --locate") - match = re.search("is running on: (.+)", output) - if match: - for machine in machines: - if machine.name == match.group(1): - current_node = machine - break - time.sleep(1) - - current_node.log("Service running here!") - current_node.crash() - - # pick another node that's still up - for machine in machines: - if machine.booted: - check_node = machine - # find where the service has been started next - while True: - output = check_node.succeed("crm_resource -r cat --locate") - match = re.search("is running on: (.+)", output) - # output will remain the old current_node until the crash is detected by pacemaker - if match and match.group(1) != current_node.name: - for machine in machines: - if machine.name == match.group(1): - next_node = machine - break - time.sleep(1) - - next_node.log("Service migrated here!") + # sets up pacemaker with resources configuration, then crashes a + # node and waits for service restart on another node + testScript = + let + resources = builtins.toFile "cib-resources.xml" '' + + + + + + + + + ''; - } -) + in + '' + import re + import time + + start_all() + + ${lib.concatMapStrings (node: '' + ${node}.wait_until_succeeds("corosync-quorumtool") + ${node}.wait_for_unit("pacemaker.service") + '') (builtins.attrNames nodes)} + + # No STONITH device + node1.succeed("crm_attribute -t crm_config -n stonith-enabled -v false") + # Configure the cat resource + node1.succeed("cibadmin --replace --scope resources --xml-file ${resources}") + + # wait until the service is started + while True: + output = node1.succeed("crm_resource -r cat --locate") + match = re.search("is running on: (.+)", output) + if match: + for machine in machines: + if machine.name == match.group(1): + current_node = machine + break + time.sleep(1) + + current_node.log("Service running here!") + current_node.crash() + + # pick another node that's still up + for machine in machines: + if machine.booted: + check_node = machine + # find where the service has been started next + while True: + output = check_node.succeed("crm_resource -r cat --locate") + match = re.search("is running on: (.+)", output) + # output will remain the old current_node until the crash is detected by pacemaker + if match and match.group(1) != current_node.name: + for machine in machines: + if machine.name == match.group(1): + next_node = machine + break + time.sleep(1) + + next_node.log("Service migrated here!") + ''; +} diff --git a/nixos/tests/packagekit.nix b/nixos/tests/packagekit.nix index 1dc248217b11..edeaabc5463f 100644 --- a/nixos/tests/packagekit.nix +++ b/nixos/tests/packagekit.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "packagekit"; - meta = with pkgs.lib.maintainers; { - maintainers = [ peterhoeg ]; +{ pkgs, ... }: +{ + name = "packagekit"; + meta = with pkgs.lib.maintainers; { + maintainers = [ peterhoeg ]; + }; + + nodes.machine = + { ... }: + { + environment.systemPackages = with pkgs; [ dbus ]; + services.packagekit = { + enable = true; + }; }; - nodes.machine = - { ... }: - { - environment.systemPackages = with pkgs; [ dbus ]; - services.packagekit = { - enable = true; - }; - }; + testScript = '' + start_all() - testScript = '' - start_all() + # send a dbus message to activate the service + machine.succeed( + "dbus-send --system --type=method_call --print-reply --dest=org.freedesktop.PackageKit /org/freedesktop/PackageKit org.freedesktop.DBus.Introspectable.Introspect" + ) - # send a dbus message to activate the service - machine.succeed( - "dbus-send --system --type=method_call --print-reply --dest=org.freedesktop.PackageKit /org/freedesktop/PackageKit org.freedesktop.DBus.Introspectable.Introspect" - ) - - # so now it should be running - machine.wait_for_unit("packagekit.service") - ''; - } -) + # so now it should be running + machine.wait_for_unit("packagekit.service") + ''; +} diff --git a/nixos/tests/pam/pam-file-contents.nix b/nixos/tests/pam/pam-file-contents.nix index 50551d8fd340..c0342ebaaf7b 100644 --- a/nixos/tests/pam/pam-file-contents.nix +++ b/nixos/tests/pam/pam-file-contents.nix @@ -1,32 +1,30 @@ let name = "pam"; in -import ../make-test-python.nix ( - { pkgs, ... }: - { - name = "pam-file-contents"; +{ pkgs, ... }: +{ + name = "pam-file-contents"; - nodes.machine = - { ... }: - { - imports = [ ../../modules/profiles/minimal.nix ]; + nodes.machine = + { ... }: + { + imports = [ ../../modules/profiles/minimal.nix ]; - security.krb5.enable = true; + security.krb5.enable = true; + users = { + mutableUsers = false; users = { - mutableUsers = false; - users = { - user = { - isNormalUser = true; - }; + user = { + isNormalUser = true; }; }; }; + }; - testScript = - builtins.replaceStrings - [ "@@pam@@" "@@pam_ccreds@@" "@@pam_krb5@@" ] - [ pkgs.pam.outPath pkgs.pam_ccreds.outPath pkgs.pam_krb5.outPath ] - (builtins.readFile ./test_chfn.py); - } -) + testScript = + builtins.replaceStrings + [ "@@pam@@" "@@pam_ccreds@@" "@@pam_krb5@@" ] + [ pkgs.pam.outPath pkgs.pam_ccreds.outPath pkgs.pam_krb5.outPath ] + (builtins.readFile ./test_chfn.py); +} diff --git a/nixos/tests/pam/pam-oath-login.nix b/nixos/tests/pam/pam-oath-login.nix index 25602b72d35c..f22d5607db4f 100644 --- a/nixos/tests/pam/pam-oath-login.nix +++ b/nixos/tests/pam/pam-oath-login.nix @@ -1,109 +1,107 @@ -import ../make-test-python.nix ( - { ... }: +{ ... }: - let - oathSnakeoilSecret = "cdd4083ef8ff1fa9178c6d46bfb1a3"; +let + oathSnakeoilSecret = "cdd4083ef8ff1fa9178c6d46bfb1a3"; - # With HOTP mode the password is calculated based on a counter of - # how many passwords have been made. In this env, we'll always be on - # the 0th counter, so the password is static. - # - # Generated in nix-shell -p oath-toolkit - # via: oathtool -v -d6 -w10 cdd4083ef8ff1fa9178c6d46bfb1a3 - # and picking a the first 4: - oathSnakeOilPassword1 = "143349"; - oathSnakeOilPassword2 = "801753"; + # With HOTP mode the password is calculated based on a counter of + # how many passwords have been made. In this env, we'll always be on + # the 0th counter, so the password is static. + # + # Generated in nix-shell -p oath-toolkit + # via: oathtool -v -d6 -w10 cdd4083ef8ff1fa9178c6d46bfb1a3 + # and picking a the first 4: + oathSnakeOilPassword1 = "143349"; + oathSnakeOilPassword2 = "801753"; - alicePassword = "foobar"; - # Generated via: mkpasswd -m sha-512 and passing in "foobar" - hashedAlicePassword = "$6$MsMrE1q.1HrCgTS$Vq2e/uILzYjSN836TobAyN9xh9oi7EmCmucnZID25qgPoibkw8qTCugiAPnn4eCGvn1A.7oEBFJaaGUaJsQQY."; + alicePassword = "foobar"; + # Generated via: mkpasswd -m sha-512 and passing in "foobar" + hashedAlicePassword = "$6$MsMrE1q.1HrCgTS$Vq2e/uILzYjSN836TobAyN9xh9oi7EmCmucnZID25qgPoibkw8qTCugiAPnn4eCGvn1A.7oEBFJaaGUaJsQQY."; - in - { - name = "pam-oath-login"; +in +{ + name = "pam-oath-login"; - nodes.machine = - { ... }: - { - security.pam.oath = { - enable = true; - }; - - users.users.alice = { - isNormalUser = true; - name = "alice"; - uid = 1000; - hashedPassword = hashedAlicePassword; - extraGroups = [ "wheel" ]; - createHome = true; - home = "/home/alice"; - }; - - systemd.services.setupOathSnakeoilFile = { - wantedBy = [ "default.target" ]; - before = [ "default.target" ]; - unitConfig = { - type = "oneshot"; - RemainAfterExit = true; - }; - script = '' - touch /etc/users.oath - chmod 600 /etc/users.oath - chown root /etc/users.oath - echo "HOTP/E/6 alice - ${oathSnakeoilSecret}" > /etc/users.oath - ''; - }; + nodes.machine = + { ... }: + { + security.pam.oath = { + enable = true; }; - testScript = '' - def switch_to_tty(tty_number): - machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'") - machine.send_key(f"alt-f{tty_number}") - machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]") - machine.wait_for_unit(f"getty@tty{tty_number}.service") - machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'") + users.users.alice = { + isNormalUser = true; + name = "alice"; + uid = 1000; + hashedPassword = hashedAlicePassword; + extraGroups = [ "wheel" ]; + createHome = true; + home = "/home/alice"; + }; + + systemd.services.setupOathSnakeoilFile = { + wantedBy = [ "default.target" ]; + before = [ "default.target" ]; + unitConfig = { + type = "oneshot"; + RemainAfterExit = true; + }; + script = '' + touch /etc/users.oath + chmod 600 /etc/users.oath + chown root /etc/users.oath + echo "HOTP/E/6 alice - ${oathSnakeoilSecret}" > /etc/users.oath + ''; + }; + }; + + testScript = '' + def switch_to_tty(tty_number): + machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'") + machine.send_key(f"alt-f{tty_number}") + machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]") + machine.wait_for_unit(f"getty@tty{tty_number}.service") + machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'") - def enter_user_alice(tty_number): - machine.wait_until_tty_matches(tty_number, "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches(tty_number, "login: alice") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches(tty_number, "One-time password") + def enter_user_alice(tty_number): + machine.wait_until_tty_matches(tty_number, "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches(tty_number, "login: alice") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches(tty_number, "One-time password") - machine.wait_for_unit("multi-user.target") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - machine.screenshot("postboot") + machine.wait_for_unit("multi-user.target") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + machine.screenshot("postboot") - with subtest("Invalid password"): - switch_to_tty("2") - enter_user_alice("2") + with subtest("Invalid password"): + switch_to_tty("2") + enter_user_alice("2") - machine.send_chars("${oathSnakeOilPassword1}\n") - machine.wait_until_tty_matches("2", "Password: ") - machine.send_chars("blorg\n") - machine.wait_until_tty_matches("2", "Login incorrect") + machine.send_chars("${oathSnakeOilPassword1}\n") + machine.wait_until_tty_matches("2", "Password: ") + machine.send_chars("blorg\n") + machine.wait_until_tty_matches("2", "Login incorrect") - with subtest("Invalid oath token"): - switch_to_tty("3") - enter_user_alice("3") + with subtest("Invalid oath token"): + switch_to_tty("3") + enter_user_alice("3") - machine.send_chars("000000\n") - machine.wait_until_tty_matches("3", "Login incorrect") - machine.wait_until_tty_matches("3", "login:") + machine.send_chars("000000\n") + machine.wait_until_tty_matches("3", "Login incorrect") + machine.wait_until_tty_matches("3", "login:") - with subtest("Happy path: Both passwords are mandatory to get us in"): - switch_to_tty("4") - enter_user_alice("4") + with subtest("Happy path: Both passwords are mandatory to get us in"): + switch_to_tty("4") + enter_user_alice("4") - machine.send_chars("${oathSnakeOilPassword2}\n") - machine.wait_until_tty_matches("4", "Password: ") - machine.send_chars("${alicePassword}\n") + machine.send_chars("${oathSnakeOilPassword2}\n") + machine.wait_until_tty_matches("4", "Password: ") + machine.send_chars("${alicePassword}\n") - machine.wait_until_succeeds("pgrep -u alice bash") - machine.send_chars("touch done4\n") - machine.wait_for_file("/home/alice/done4") - ''; - } -) + machine.wait_until_succeeds("pgrep -u alice bash") + machine.send_chars("touch done4\n") + machine.wait_for_file("/home/alice/done4") + ''; +} diff --git a/nixos/tests/pam/pam-u2f.nix b/nixos/tests/pam/pam-u2f.nix index c889cb9ee2fa..17b9e19e4cc5 100644 --- a/nixos/tests/pam/pam-u2f.nix +++ b/nixos/tests/pam/pam-u2f.nix @@ -1,31 +1,29 @@ -import ../make-test-python.nix ( - { ... }: +{ ... }: - { - name = "pam-u2f"; +{ + name = "pam-u2f"; - nodes.machine = - { ... }: - { - security.pam.u2f = { - enable = true; - control = "required"; - settings = { - cue = true; - debug = true; - interactive = true; - origin = "nixos-test"; - # Freeform option - userpresence = 1; - }; + nodes.machine = + { ... }: + { + security.pam.u2f = { + enable = true; + control = "required"; + settings = { + cue = true; + debug = true; + interactive = true; + origin = "nixos-test"; + # Freeform option + userpresence = 1; }; }; + }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed( - 'egrep "auth required .*/lib/security/pam_u2f.so.*cue.*debug.*interactive.*origin=nixos-test.*userpresence=1" /etc/pam.d/ -R' - ) - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed( + 'egrep "auth required .*/lib/security/pam_u2f.so.*cue.*debug.*interactive.*origin=nixos-test.*userpresence=1" /etc/pam.d/ -R' + ) + ''; +} diff --git a/nixos/tests/pam/pam-ussh.nix b/nixos/tests/pam/pam-ussh.nix index c5eefd12aa3b..bdbbfe16b0fa 100644 --- a/nixos/tests/pam/pam-ussh.nix +++ b/nixos/tests/pam/pam-ussh.nix @@ -1,83 +1,81 @@ -import ../make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - testOnlySSHCredentials = - pkgs.runCommand "pam-ussh-test-ca" - { - nativeBuildInputs = [ pkgs.openssh ]; - } - '' - mkdir $out - ssh-keygen -t ed25519 -N "" -f $out/ca - - ssh-keygen -t ed25519 -N "" -f $out/alice - ssh-keygen -s $out/ca -I "alice user key" -n "alice,root" -V 19700101:forever $out/alice.pub - - ssh-keygen -t ed25519 -N "" -f $out/bob - ssh-keygen -s $out/ca -I "bob user key" -n "bob" -V 19700101:forever $out/bob.pub - ''; - makeTestScript = - user: - pkgs.writeShellScript "pam-ussh-${user}-test-script" '' - set -euo pipefail - - eval $(${pkgs.openssh}/bin/ssh-agent) - - mkdir -p $HOME/.ssh - chmod 700 $HOME/.ssh - cp ${testOnlySSHCredentials}/${user}{,.pub,-cert.pub} $HOME/.ssh - chmod 600 $HOME/.ssh/${user} - chmod 644 $HOME/.ssh/${user}{,-cert}.pub - - set -x - - ${pkgs.openssh}/bin/ssh-add $HOME/.ssh/${user} - ${pkgs.openssh}/bin/ssh-add -l &>2 - - exec sudo id -u -n - ''; - in - { - name = "pam-ussh"; - meta.maintainers = with lib.maintainers; [ lukegb ]; - - machine = - { ... }: +let + testOnlySSHCredentials = + pkgs.runCommand "pam-ussh-test-ca" { - users.users.alice = { - isNormalUser = true; - extraGroups = [ "wheel" ]; - }; - users.users.bob = { - isNormalUser = true; - extraGroups = [ "wheel" ]; - }; + nativeBuildInputs = [ pkgs.openssh ]; + } + '' + mkdir $out + ssh-keygen -t ed25519 -N "" -f $out/ca - security.pam.ussh = { - enable = true; - authorizedPrincipals = "root"; - caFile = "${testOnlySSHCredentials}/ca.pub"; - }; + ssh-keygen -t ed25519 -N "" -f $out/alice + ssh-keygen -s $out/ca -I "alice user key" -n "alice,root" -V 19700101:forever $out/alice.pub - security.sudo = { - enable = true; - extraConfig = '' - Defaults lecture="never" - ''; - }; + ssh-keygen -t ed25519 -N "" -f $out/bob + ssh-keygen -s $out/ca -I "bob user key" -n "bob" -V 19700101:forever $out/bob.pub + ''; + makeTestScript = + user: + pkgs.writeShellScript "pam-ussh-${user}-test-script" '' + set -euo pipefail + + eval $(${pkgs.openssh}/bin/ssh-agent) + + mkdir -p $HOME/.ssh + chmod 700 $HOME/.ssh + cp ${testOnlySSHCredentials}/${user}{,.pub,-cert.pub} $HOME/.ssh + chmod 600 $HOME/.ssh/${user} + chmod 644 $HOME/.ssh/${user}{,-cert}.pub + + set -x + + ${pkgs.openssh}/bin/ssh-add $HOME/.ssh/${user} + ${pkgs.openssh}/bin/ssh-add -l &>2 + + exec sudo id -u -n + ''; +in +{ + name = "pam-ussh"; + meta.maintainers = with lib.maintainers; [ lukegb ]; + + machine = + { ... }: + { + users.users.alice = { + isNormalUser = true; + extraGroups = [ "wheel" ]; + }; + users.users.bob = { + isNormalUser = true; + extraGroups = [ "wheel" ]; }; - testScript = '' - with subtest("alice should be allowed to escalate to root"): - machine.succeed( - 'su -c "${makeTestScript "alice"}" -l alice | grep root' - ) + security.pam.ussh = { + enable = true; + authorizedPrincipals = "root"; + caFile = "${testOnlySSHCredentials}/ca.pub"; + }; - with subtest("bob should not be allowed to escalate to root"): - machine.fail( - 'su -c "${makeTestScript "bob"}" -l bob | grep root' - ) - ''; - } -) + security.sudo = { + enable = true; + extraConfig = '' + Defaults lecture="never" + ''; + }; + }; + + testScript = '' + with subtest("alice should be allowed to escalate to root"): + machine.succeed( + 'su -c "${makeTestScript "alice"}" -l alice | grep root' + ) + + with subtest("bob should not be allowed to escalate to root"): + machine.fail( + 'su -c "${makeTestScript "bob"}" -l bob | grep root' + ) + ''; +} diff --git a/nixos/tests/pam/zfs-key.nix b/nixos/tests/pam/zfs-key.nix index 2a494eaa5702..9251a3a9173c 100644 --- a/nixos/tests/pam/zfs-key.nix +++ b/nixos/tests/pam/zfs-key.nix @@ -1,86 +1,84 @@ -import ../make-test-python.nix ( - { ... }: +{ ... }: - let - userPassword = "password"; - mismatchPass = "mismatch"; - in - { - name = "pam-zfs-key"; +let + userPassword = "password"; + mismatchPass = "mismatch"; +in +{ + name = "pam-zfs-key"; - nodes.machine = - { ... }: - { - boot.supportedFilesystems = [ "zfs" ]; + nodes.machine = + { ... }: + { + boot.supportedFilesystems = [ "zfs" ]; - networking.hostId = "12345678"; + networking.hostId = "12345678"; - security.pam.zfs.enable = true; + security.pam.zfs.enable = true; - users.users = { - alice = { - isNormalUser = true; - password = userPassword; - }; - bob = { - isNormalUser = true; - password = userPassword; - }; + users.users = { + alice = { + isNormalUser = true; + password = userPassword; + }; + bob = { + isNormalUser = true; + password = userPassword; }; }; + }; - testScript = - { nodes, ... }: - let - homes = nodes.machine.security.pam.zfs.homes; - pool = builtins.head (builtins.split "/" homes); - in - '' - machine.wait_for_unit("multi-user.target") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + testScript = + { nodes, ... }: + let + homes = nodes.machine.security.pam.zfs.homes; + pool = builtins.head (builtins.split "/" homes); + in + '' + machine.wait_for_unit("multi-user.target") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - with subtest("Create encrypted ZFS datasets"): - machine.succeed("truncate -s 64M /testpool.img") - machine.succeed("zpool create -O canmount=off '${pool}' /testpool.img") - machine.succeed("zfs create -o canmount=off -p '${homes}'") - machine.succeed("echo ${userPassword} | zfs create -o canmount=noauto -o encryption=on -o keyformat=passphrase '${homes}/alice'") - machine.succeed("zfs unload-key '${homes}/alice'") - machine.succeed("echo ${mismatchPass} | zfs create -o canmount=noauto -o encryption=on -o keyformat=passphrase '${homes}/bob'") - machine.succeed("zfs unload-key '${homes}/bob'") + with subtest("Create encrypted ZFS datasets"): + machine.succeed("truncate -s 64M /testpool.img") + machine.succeed("zpool create -O canmount=off '${pool}' /testpool.img") + machine.succeed("zfs create -o canmount=off -p '${homes}'") + machine.succeed("echo ${userPassword} | zfs create -o canmount=noauto -o encryption=on -o keyformat=passphrase '${homes}/alice'") + machine.succeed("zfs unload-key '${homes}/alice'") + machine.succeed("echo ${mismatchPass} | zfs create -o canmount=noauto -o encryption=on -o keyformat=passphrase '${homes}/bob'") + machine.succeed("zfs unload-key '${homes}/bob'") - with subtest("Switch to tty2"): - machine.fail("pgrep -f 'agetty.*tty2'") - machine.send_key("alt-f2") - machine.wait_until_succeeds("[ $(fgconsole) = 2 ]") - machine.wait_for_unit("getty@tty2.service") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'") + with subtest("Switch to tty2"): + machine.fail("pgrep -f 'agetty.*tty2'") + machine.send_key("alt-f2") + machine.wait_until_succeeds("[ $(fgconsole) = 2 ]") + machine.wait_for_unit("getty@tty2.service") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'") - with subtest("Log in as user with home locked by login password"): - machine.wait_until_tty_matches("2", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("2", "login: alice") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches("2", "Password: ") - machine.send_chars("${userPassword}\n") - machine.wait_until_succeeds("pgrep -u alice bash") - machine.succeed("mount | grep ${homes}/alice") + with subtest("Log in as user with home locked by login password"): + machine.wait_until_tty_matches("2", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("2", "login: alice") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches("2", "Password: ") + machine.send_chars("${userPassword}\n") + machine.wait_until_succeeds("pgrep -u alice bash") + machine.succeed("mount | grep ${homes}/alice") - with subtest("Switch to tty3"): - machine.fail("pgrep -f 'agetty.*tty3'") - machine.send_key("alt-f3") - machine.wait_until_succeeds("[ $(fgconsole) = 3 ]") - machine.wait_for_unit("getty@tty3.service") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty3'") + with subtest("Switch to tty3"): + machine.fail("pgrep -f 'agetty.*tty3'") + machine.send_key("alt-f3") + machine.wait_until_succeeds("[ $(fgconsole) = 3 ]") + machine.wait_for_unit("getty@tty3.service") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty3'") - with subtest("Log in as user with home locked by password different from login"): - machine.wait_until_tty_matches("3", "login: ") - machine.send_chars("bob\n") - machine.wait_until_tty_matches("3", "login: bob") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches("3", "Password: ") - machine.send_chars("${userPassword}\n") - machine.wait_until_succeeds("pgrep -u bob bash") - machine.fail("mount | grep ${homes}/bob") - ''; - } -) + with subtest("Log in as user with home locked by password different from login"): + machine.wait_until_tty_matches("3", "login: ") + machine.send_chars("bob\n") + machine.wait_until_tty_matches("3", "login: bob") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches("3", "Password: ") + machine.send_chars("${userPassword}\n") + machine.wait_until_succeeds("pgrep -u bob bash") + machine.fail("mount | grep ${homes}/bob") + ''; +} diff --git a/nixos/tests/pantheon-wayland.nix b/nixos/tests/pantheon-wayland.nix index 74bc57eae78c..6bda9e508ef2 100644 --- a/nixos/tests/pantheon-wayland.nix +++ b/nixos/tests/pantheon-wayland.nix @@ -1,104 +1,102 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "pantheon-wayland"; +{ + name = "pantheon-wayland"; - meta.maintainers = lib.teams.pantheon.members; + meta.maintainers = lib.teams.pantheon.members; - nodes.machine = - { nodes, ... }: + nodes.machine = + { nodes, ... }: - let - videosAutostart = pkgs.writeTextFile { - name = "autostart-elementary-videos"; - destination = "/etc/xdg/autostart/io.elementary.videos.desktop"; - text = '' - [Desktop Entry] - Version=1.0 - Name=Videos - Type=Application - Terminal=false - Exec=io.elementary.videos %U - ''; - }; - in - { - imports = [ ./common/user-account.nix ]; + let + videosAutostart = pkgs.writeTextFile { + name = "autostart-elementary-videos"; + destination = "/etc/xdg/autostart/io.elementary.videos.desktop"; + text = '' + [Desktop Entry] + Version=1.0 + Name=Videos + Type=Application + Terminal=false + Exec=io.elementary.videos %U + ''; + }; + in + { + imports = [ ./common/user-account.nix ]; - # Workaround ".gala-wrapped invoked oom-killer" - virtualisation.memorySize = 2047; + # Workaround ".gala-wrapped invoked oom-killer" + virtualisation.memorySize = 2047; - services.xserver.enable = true; - services.xserver.desktopManager.pantheon.enable = true; - services.displayManager = { - autoLogin.enable = true; - autoLogin.user = nodes.machine.users.users.alice.name; - defaultSession = "pantheon-wayland"; - }; - - # We ship pantheon.appcenter by default when this is enabled. - services.flatpak.enable = true; - - # For basic OCR tests. - environment.systemPackages = [ videosAutostart ]; - - # We don't ship gnome-text-editor in Pantheon module, we add this line mainly - # to catch eval issues related to this option. - environment.pantheon.excludePackages = [ pkgs.gnome-text-editor ]; + services.xserver.enable = true; + services.xserver.desktopManager.pantheon.enable = true; + services.displayManager = { + autoLogin.enable = true; + autoLogin.user = nodes.machine.users.users.alice.name; + defaultSession = "pantheon-wayland"; }; - enableOCR = true; + # We ship pantheon.appcenter by default when this is enabled. + services.flatpak.enable = true; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - '' - machine.wait_for_unit("display-manager.service") + # For basic OCR tests. + environment.systemPackages = [ videosAutostart ]; - with subtest("Wait for wayland server"): - machine.wait_for_file("/run/user/${toString user.uid}/wayland-0") + # We don't ship gnome-text-editor in Pantheon module, we add this line mainly + # to catch eval issues related to this option. + environment.pantheon.excludePackages = [ pkgs.gnome-text-editor ]; + }; - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + enableOCR = true; - with subtest("Check if Pantheon components actually start"): - # We specifically check gsd-xsettings here since it is manually pulled up by gala. - # https://github.com/elementary/gala/pull/2140 - for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock", "gsd-media-keys", "gsd-xsettings", "io.elementary.desktop.agent-polkit"]: - machine.wait_until_succeeds(f"pgrep -f {i}") - machine.wait_until_succeeds("pgrep -xf ${pkgs.pantheon.elementary-files}/libexec/io.elementary.files.xdg-desktop-portal") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + '' + machine.wait_for_unit("display-manager.service") - with subtest("Check if various environment variables are set"): - cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf ${pkgs.pantheon.gala}/bin/gala)/environ" - machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'") - machine.succeed(f"{cmd} | grep 'XDG_SESSION_TYPE' | grep 'wayland'") - # Hopefully from the sessionPath option. - machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'") - # Hopefully from login shell. - machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'") + with subtest("Wait for wayland server"): + machine.wait_for_file("/run/user/${toString user.uid}/wayland-0") - with subtest("Wait for elementary videos autostart"): - machine.wait_until_succeeds("pgrep -f io.elementary.videos") - machine.wait_for_text("No Videos Open") - machine.screenshot("videos") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Trigger multitasking view"): - cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1" - env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus" - machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") - machine.sleep(5) - machine.screenshot("multitasking") - machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") + with subtest("Check if Pantheon components actually start"): + # We specifically check gsd-xsettings here since it is manually pulled up by gala. + # https://github.com/elementary/gala/pull/2140 + for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock", "gsd-media-keys", "gsd-xsettings", "io.elementary.desktop.agent-polkit"]: + machine.wait_until_succeeds(f"pgrep -f {i}") + machine.wait_until_succeeds("pgrep -xf ${pkgs.pantheon.elementary-files}/libexec/io.elementary.files.xdg-desktop-portal") - with subtest("Check if gala has ever coredumped"): - machine.fail("coredumpctl --json=short | grep gala") - # So we can see the dock. - machine.execute("pkill -f -9 io.elementary.videos") - machine.sleep(10) - machine.screenshot("screen") - ''; - } -) + with subtest("Check if various environment variables are set"): + cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf ${pkgs.pantheon.gala}/bin/gala)/environ" + machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'") + machine.succeed(f"{cmd} | grep 'XDG_SESSION_TYPE' | grep 'wayland'") + # Hopefully from the sessionPath option. + machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'") + # Hopefully from login shell. + machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'") + + with subtest("Wait for elementary videos autostart"): + machine.wait_until_succeeds("pgrep -f io.elementary.videos") + machine.wait_for_text("No Videos Open") + machine.screenshot("videos") + + with subtest("Trigger multitasking view"): + cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1" + env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus" + machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") + machine.sleep(5) + machine.screenshot("multitasking") + machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") + + with subtest("Check if gala has ever coredumped"): + machine.fail("coredumpctl --json=short | grep gala") + # So we can see the dock. + machine.execute("pkill -f -9 io.elementary.videos") + machine.sleep(10) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/pantheon.nix b/nixos/tests/pantheon.nix index 6392d193a34c..b8cb0512c542 100644 --- a/nixos/tests/pantheon.nix +++ b/nixos/tests/pantheon.nix @@ -1,116 +1,114 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "pantheon"; +{ + name = "pantheon"; - meta.maintainers = lib.teams.pantheon.members; + meta.maintainers = lib.teams.pantheon.members; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ ./common/user-account.nix ]; + { + imports = [ ./common/user-account.nix ]; - # Workaround ".gala-wrapped invoked oom-killer" - virtualisation.memorySize = 2047; + # Workaround ".gala-wrapped invoked oom-killer" + virtualisation.memorySize = 2047; - services.xserver.enable = true; - services.xserver.desktopManager.pantheon.enable = true; + services.xserver.enable = true; + services.xserver.desktopManager.pantheon.enable = true; - # We ship pantheon.appcenter by default when this is enabled. - services.flatpak.enable = true; + # We ship pantheon.appcenter by default when this is enabled. + services.flatpak.enable = true; - # We don't ship gnome-text-editor in Pantheon module, we add this line mainly - # to catch eval issues related to this option. - environment.pantheon.excludePackages = [ pkgs.gnome-text-editor ]; + # We don't ship gnome-text-editor in Pantheon module, we add this line mainly + # to catch eval issues related to this option. + environment.pantheon.excludePackages = [ pkgs.gnome-text-editor ]; - environment.systemPackages = [ pkgs.xdotool ]; - }; + environment.systemPackages = [ pkgs.xdotool ]; + }; - enableOCR = true; + enableOCR = true; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - bob = nodes.machine.users.users.bob; - in - '' - machine.wait_for_unit("display-manager.service") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + bob = nodes.machine.users.users.bob; + in + '' + machine.wait_for_unit("display-manager.service") - with subtest("Test we can see usernames in elementary-greeter"): - machine.wait_for_text("${user.description}") - machine.wait_until_succeeds("pgrep -f io.elementary.greeter-compositor") - # OCR was struggling with this one. - # machine.wait_for_text("${bob.description}") - # Ensure the password box is focused by clicking it. - # Workaround for https://github.com/NixOS/nixpkgs/issues/211366. - machine.succeed("XAUTHORITY=/var/lib/lightdm/.Xauthority DISPLAY=:0 xdotool mousemove 512 505 click 1") - machine.sleep(2) - machine.screenshot("elementary_greeter_lightdm") + with subtest("Test we can see usernames in elementary-greeter"): + machine.wait_for_text("${user.description}") + machine.wait_until_succeeds("pgrep -f io.elementary.greeter-compositor") + # OCR was struggling with this one. + # machine.wait_for_text("${bob.description}") + # Ensure the password box is focused by clicking it. + # Workaround for https://github.com/NixOS/nixpkgs/issues/211366. + machine.succeed("XAUTHORITY=/var/lib/lightdm/.Xauthority DISPLAY=:0 xdotool mousemove 512 505 click 1") + machine.sleep(2) + machine.screenshot("elementary_greeter_lightdm") - with subtest("Login with elementary-greeter"): - machine.send_chars("${user.password}\n") - machine.wait_for_x() - machine.wait_for_file("${user.home}/.Xauthority") - machine.succeed("xauth merge ${user.home}/.Xauthority") - machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"') + with subtest("Login with elementary-greeter"): + machine.send_chars("${user.password}\n") + machine.wait_for_x() + machine.wait_for_file("${user.home}/.Xauthority") + machine.succeed("xauth merge ${user.home}/.Xauthority") + machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"') - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Check if Pantheon components actually start"): - for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock", "gsd-media-keys", "io.elementary.desktop.agent-polkit"]: - machine.wait_until_succeeds(f"pgrep -f {i}") - for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock"]: - machine.wait_for_window(i) - machine.wait_until_succeeds("pgrep -xf ${pkgs.pantheon.elementary-files}/libexec/io.elementary.files.xdg-desktop-portal") + with subtest("Check if Pantheon components actually start"): + for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock", "gsd-media-keys", "io.elementary.desktop.agent-polkit"]: + machine.wait_until_succeeds(f"pgrep -f {i}") + for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock"]: + machine.wait_for_window(i) + machine.wait_until_succeeds("pgrep -xf ${pkgs.pantheon.elementary-files}/libexec/io.elementary.files.xdg-desktop-portal") - with subtest("Check if various environment variables are set"): - cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf ${pkgs.pantheon.gala}/bin/gala)/environ" - machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'") - # Hopefully from the sessionPath option. - machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'") - # Hopefully from login shell. - machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'") + with subtest("Check if various environment variables are set"): + cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf ${pkgs.pantheon.gala}/bin/gala)/environ" + machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'") + # Hopefully from the sessionPath option. + machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'") + # Hopefully from login shell. + machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'") - with subtest("Open elementary videos"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'") - machine.sleep(2) - machine.wait_for_window("io.elementary.videos") - machine.wait_for_text("No Videos Open") + with subtest("Open elementary videos"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'") + machine.sleep(2) + machine.wait_for_window("io.elementary.videos") + machine.wait_for_text("No Videos Open") - with subtest("Open elementary calendar"): - machine.wait_until_succeeds("pgrep -f evolution-calendar-factory") - machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'") - machine.sleep(2) - machine.wait_for_window("io.elementary.calendar") + with subtest("Open elementary calendar"): + machine.wait_until_succeeds("pgrep -f evolution-calendar-factory") + machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'") + machine.sleep(2) + machine.wait_for_window("io.elementary.calendar") - with subtest("Open system settings"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.settings >&2 &'") - # Wait for all plugins to be loaded before we check if the window is still there. - machine.sleep(5) - machine.wait_for_window("io.elementary.settings") + with subtest("Open system settings"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.settings >&2 &'") + # Wait for all plugins to be loaded before we check if the window is still there. + machine.sleep(5) + machine.wait_for_window("io.elementary.settings") - with subtest("Open elementary terminal"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'") - machine.wait_for_window("io.elementary.terminal") + with subtest("Open elementary terminal"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'") + machine.wait_for_window("io.elementary.terminal") - with subtest("Trigger multitasking view"): - cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1" - env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0" - machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") - machine.sleep(5) - machine.screenshot("multitasking") - machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") + with subtest("Trigger multitasking view"): + cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1" + env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0" + machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") + machine.sleep(5) + machine.screenshot("multitasking") + machine.succeed(f"su - ${user.name} -c '{env} {cmd}'") - with subtest("Check if gala has ever coredumped"): - machine.fail("coredumpctl --json=short | grep gala") - # So you can see the dock in the below screenshot. - machine.succeed("su - ${user.name} -c 'DISPLAY=:0 xdotool mousemove 450 1000 >&2 &'") - machine.sleep(10) - machine.screenshot("screen") - ''; - } -) + with subtest("Check if gala has ever coredumped"): + machine.fail("coredumpctl --json=short | grep gala") + # So you can see the dock in the below screenshot. + machine.succeed("su - ${user.name} -c 'DISPLAY=:0 xdotool mousemove 450 1000 >&2 &'") + machine.sleep(10) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/paperless.nix b/nixos/tests/paperless.nix index 28ec877f2435..d35d909e690f 100644 --- a/nixos/tests/paperless.nix +++ b/nixos/tests/paperless.nix @@ -1,127 +1,125 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "paperless"; - meta.maintainers = with lib.maintainers; [ - leona - SuperSandro2000 - erikarvstedt - ]; +{ lib, ... }: +{ + name = "paperless"; + meta.maintainers = with lib.maintainers; [ + leona + SuperSandro2000 + erikarvstedt + ]; - nodes = - let - self = { - simple = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - imagemagick - jq - ]; - services.paperless = { + nodes = + let + self = { + simple = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + imagemagick + jq + ]; + services.paperless = { + enable = true; + passwordFile = builtins.toFile "password" "admin"; + + exporter = { enable = true; - passwordFile = builtins.toFile "password" "admin"; - exporter = { - enable = true; - - settings = { - "no-color" = lib.mkForce false; # override a default option - "no-thumbnail" = true; # add a new option - }; + settings = { + "no-color" = lib.mkForce false; # override a default option + "no-thumbnail" = true; # add a new option }; }; }; - postgres = - { config, pkgs, ... }: - { - imports = [ self.simple ]; - services.paperless.database.createLocally = true; - services.paperless.settings = { - PAPERLESS_OCR_LANGUAGE = "deu"; - }; + }; + postgres = + { config, pkgs, ... }: + { + imports = [ self.simple ]; + services.paperless.database.createLocally = true; + services.paperless.settings = { + PAPERLESS_OCR_LANGUAGE = "deu"; }; - }; - in - self; + }; + }; + in + self; - testScript = '' - import json + testScript = '' + import json - def test_paperless(node): - node.wait_for_unit("paperless-consumer.service") + def test_paperless(node): + node.wait_for_unit("paperless-consumer.service") - with subtest("Add a document via the file system"): - node.succeed( - "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black " - "-annotate +5+20 'hello world 16-10-2005' /var/lib/paperless/consume/doc.png" - ) + with subtest("Add a document via the file system"): + node.succeed( + "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black " + "-annotate +5+20 'hello world 16-10-2005' /var/lib/paperless/consume/doc.png" + ) - with subtest("Web interface gets ready"): + with subtest("Web interface gets ready"): + node.wait_for_unit("paperless-web.service") + # Wait until server accepts connections + node.wait_until_succeeds("curl -fs localhost:28981") + + # Required for consuming documents via the web interface + with subtest("Task-queue gets ready"): + node.wait_for_unit("paperless-task-queue.service") + + with subtest("Add a png document via the web interface"): + node.succeed( + "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black " + "-annotate +5+20 'hello web 16-10-2005' /tmp/webdoc.png" + ) + node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.png -fs localhost:28981/api/documents/post_document/") + + with subtest("Add a txt document via the web interface"): + node.succeed( + "echo 'hello web 16-10-2005' > /tmp/webdoc.txt" + ) + node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.txt -fs localhost:28981/api/documents/post_document/") + + with subtest("Documents are consumed"): + node.wait_until_succeeds( + "(($(curl -u admin:admin -fs localhost:28981/api/documents/ | jq .count) == 3))" + ) + docs = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/"))['results'] + assert "2005-10-16" in docs[0]['created'] + assert "2005-10-16" in docs[1]['created'] + assert "2005-10-16" in docs[2]['created'] + + # Detects gunicorn issues, see PR #190888 + with subtest("Document metadata can be accessed"): + metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/1/metadata/")) + assert "original_checksum" in metadata + + metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/2/metadata/")) + assert "original_checksum" in metadata + + metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/3/metadata/")) + assert "original_checksum" in metadata + + with subtest("Exporter"): + node.succeed("systemctl start --wait paperless-exporter") node.wait_for_unit("paperless-web.service") - # Wait until server accepts connections - node.wait_until_succeeds("curl -fs localhost:28981") - - # Required for consuming documents via the web interface - with subtest("Task-queue gets ready"): + node.wait_for_unit("paperless-consumer.service") + node.wait_for_unit("paperless-scheduler.service") node.wait_for_unit("paperless-task-queue.service") - with subtest("Add a png document via the web interface"): - node.succeed( - "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black " - "-annotate +5+20 'hello web 16-10-2005' /tmp/webdoc.png" - ) - node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.png -fs localhost:28981/api/documents/post_document/") + node.succeed("ls -lah /var/lib/paperless/export/manifest.json") - with subtest("Add a txt document via the web interface"): - node.succeed( - "echo 'hello web 16-10-2005' > /tmp/webdoc.txt" - ) - node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.txt -fs localhost:28981/api/documents/post_document/") + timers = node.succeed("systemctl list-timers paperless-exporter") + print(timers) + assert "paperless-exporter.timer paperless-exporter.service" in timers, "missing timer" + assert "1 timers listed." in timers, "incorrect number of timers" - with subtest("Documents are consumed"): - node.wait_until_succeeds( - "(($(curl -u admin:admin -fs localhost:28981/api/documents/ | jq .count) == 3))" - ) - docs = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/"))['results'] - assert "2005-10-16" in docs[0]['created'] - assert "2005-10-16" in docs[1]['created'] - assert "2005-10-16" in docs[2]['created'] + # Double check that our attrset option override works as expected + cmdline = node.succeed("grep 'paperless-manage' $(systemctl cat paperless-exporter | grep ExecStart | cut -f 2 -d=)") + print(f"Exporter command line {cmdline!r}") + assert cmdline.strip() == "paperless-manage document_exporter /var/lib/paperless/export --compare-checksums --delete --no-progress-bar --no-thumbnail", "Unexpected exporter command line" - # Detects gunicorn issues, see PR #190888 - with subtest("Document metadata can be accessed"): - metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/1/metadata/")) - assert "original_checksum" in metadata - - metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/2/metadata/")) - assert "original_checksum" in metadata - - metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/3/metadata/")) - assert "original_checksum" in metadata - - with subtest("Exporter"): - node.succeed("systemctl start --wait paperless-exporter") - node.wait_for_unit("paperless-web.service") - node.wait_for_unit("paperless-consumer.service") - node.wait_for_unit("paperless-scheduler.service") - node.wait_for_unit("paperless-task-queue.service") - - node.succeed("ls -lah /var/lib/paperless/export/manifest.json") - - timers = node.succeed("systemctl list-timers paperless-exporter") - print(timers) - assert "paperless-exporter.timer paperless-exporter.service" in timers, "missing timer" - assert "1 timers listed." in timers, "incorrect number of timers" - - # Double check that our attrset option override works as expected - cmdline = node.succeed("grep 'paperless-manage' $(systemctl cat paperless-exporter | grep ExecStart | cut -f 2 -d=)") - print(f"Exporter command line {cmdline!r}") - assert cmdline.strip() == "paperless-manage document_exporter /var/lib/paperless/export --compare-checksums --delete --no-progress-bar --no-thumbnail", "Unexpected exporter command line" - - test_paperless(simple) - simple.send_monitor_command("quit") - simple.wait_for_shutdown() - test_paperless(postgres) - ''; - } -) + test_paperless(simple) + simple.send_monitor_command("quit") + simple.wait_for_shutdown() + test_paperless(postgres) + ''; +} diff --git a/nixos/tests/pass-secret-service.nix b/nixos/tests/pass-secret-service.nix index 9e85dd30610b..de07cfd47a49 100644 --- a/nixos/tests/pass-secret-service.nix +++ b/nixos/tests/pass-secret-service.nix @@ -1,75 +1,73 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "pass-secret-service"; - meta.maintainers = [ lib.maintainers.aidalgol ]; +{ pkgs, lib, ... }: +{ + name = "pass-secret-service"; + meta.maintainers = [ lib.maintainers.aidalgol ]; - nodes.machine = - { nodes, pkgs, ... }: - { - imports = [ ./common/user-account.nix ]; + nodes.machine = + { nodes, pkgs, ... }: + { + imports = [ ./common/user-account.nix ]; - services.passSecretService.enable = true; + services.passSecretService.enable = true; - environment.systemPackages = [ - # Create a script that tries to make a request to the D-Bus secrets API. - (pkgs.writers.writePython3Bin "secrets-dbus-init" - { - libraries = [ pkgs.python3Packages.secretstorage ]; - } - '' - import secretstorage - print("Initializing dbus connection...") - connection = secretstorage.dbus_init() - print("Requesting default collection...") - collection = secretstorage.get_default_collection(connection) - print("Done! dbus-org.freedesktop.secrets should now be active.") - '' - ) - pkgs.pass - ]; + environment.systemPackages = [ + # Create a script that tries to make a request to the D-Bus secrets API. + (pkgs.writers.writePython3Bin "secrets-dbus-init" + { + libraries = [ pkgs.python3Packages.secretstorage ]; + } + '' + import secretstorage + print("Initializing dbus connection...") + connection = secretstorage.dbus_init() + print("Requesting default collection...") + collection = secretstorage.get_default_collection(connection) + print("Done! dbus-org.freedesktop.secrets should now be active.") + '' + ) + pkgs.pass + ]; - programs.gnupg = { - agent.enable = true; - dirmngr.enable = true; - }; + programs.gnupg = { + agent.enable = true; + dirmngr.enable = true; }; + }; - # Some of the commands are run via a virtual console because they need to be - # run under a real login session, with D-Bus running in the environment. - testScript = - { nodes, ... }: - let - user = nodes.machine.config.users.users.alice; - gpg-uid = "alice@example.net"; - gpg-pw = "foobar9000"; - ready-file = "/tmp/secrets-dbus-init.done"; - in - '' - # Initialise the pass(1) storage. - machine.succeed(""" - sudo -u alice gpg --pinentry-mode loopback --batch --passphrase ${gpg-pw} \ - --quick-gen-key ${gpg-uid} \ - """) - machine.succeed("sudo -u alice pass init ${gpg-uid}") + # Some of the commands are run via a virtual console because they need to be + # run under a real login session, with D-Bus running in the environment. + testScript = + { nodes, ... }: + let + user = nodes.machine.config.users.users.alice; + gpg-uid = "alice@example.net"; + gpg-pw = "foobar9000"; + ready-file = "/tmp/secrets-dbus-init.done"; + in + '' + # Initialise the pass(1) storage. + machine.succeed(""" + sudo -u alice gpg --pinentry-mode loopback --batch --passphrase ${gpg-pw} \ + --quick-gen-key ${gpg-uid} \ + """) + machine.succeed("sudo -u alice pass init ${gpg-uid}") - with subtest("Service is not running on login"): - machine.wait_until_tty_matches("1", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("1", "login: alice") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches("1", "Password: ") - machine.send_chars("${user.password}\n") - machine.wait_until_succeeds("pgrep -u alice bash") + with subtest("Service is not running on login"): + machine.wait_until_tty_matches("1", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("1", "login: alice") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches("1", "Password: ") + machine.send_chars("${user.password}\n") + machine.wait_until_succeeds("pgrep -u alice bash") - _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice") - assert "Active: inactive (dead)" in output + _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice") + assert "Active: inactive (dead)" in output - with subtest("Service starts after a client tries to talk to the D-Bus API"): - machine.send_chars("secrets-dbus-init; touch ${ready-file}\n") - machine.wait_for_file("${ready-file}") - _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice") - assert "Active: active (running)" in output - ''; - } -) + with subtest("Service starts after a client tries to talk to the D-Bus API"): + machine.send_chars("secrets-dbus-init; touch ${ready-file}\n") + machine.wait_for_file("${ready-file}") + _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice") + assert "Active: active (running)" in output + ''; +} diff --git a/nixos/tests/password-option-override-ordering.nix b/nixos/tests/password-option-override-ordering.nix index 5b06ab0bdbff..c2e4b1768108 100644 --- a/nixos/tests/password-option-override-ordering.nix +++ b/nixos/tests/password-option-override-ordering.nix @@ -6,166 +6,164 @@ let hashed_sha512crypt = "$6$ymzs8WINZ5wGwQcV$VC2S0cQiX8NVukOLymysTPn4v1zJoJp3NGyhnqyv/dAf4NWZsBWYveQcj6gEJr4ZUjRBRjM0Pj1L8TCQ8hUUp0"; # meow in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "password-option-override-ordering"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fidgetingbits ]; - }; +{ pkgs, ... }: +{ + name = "password-option-override-ordering"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fidgetingbits ]; + }; - nodes = - let - # The following users are expected to have the same behavior between immutable and mutable systems - # NOTE: Below given A -> B it implies B overrides A . Each entry below builds off the next - users = { - # mutable true/false: initialHashedPassword -> hashedPassword - fran = { - isNormalUser = true; - initialHashedPassword = hashed_yeshash; - hashedPassword = hashed_sha512crypt; - }; - - # mutable false: initialHashedPassword -> hashedPassword -> initialPassword - # mutable true: initialHashedPassword -> initialPassword -> hashedPassword - greg = { - isNormalUser = true; - hashedPassword = hashed_sha512crypt; - initialPassword = password1; - }; - - # mutable false: initialHashedPassword -> hashedPassword -> initialPassword -> password - # mutable true: initialHashedPassword -> initialPassword -> hashedPassword -> password - egon = { - isNormalUser = true; - initialPassword = password2; - password = password1; - }; - - # mutable true/false: hashedPassword -> password - # NOTE: minor duplication of test above, but to verify no initialXXX use is consistent - alice = { - isNormalUser = true; - hashedPassword = hashed_sha512crypt; - password = password1; - }; - - # mutable false: initialHashedPassword -> hashedPassword -> initialPassword -> password -> hashedPasswordFile - # mutable true: initialHashedPassword -> initialPassword -> hashedPassword -> password -> hashedPasswordFile - bob = { - isNormalUser = true; - hashedPassword = hashed_sha512crypt; - password = password1; - hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; # Expect override of everything above - }; - - # Show hashedPassword -> password -> hashedPasswordFile -> initialPassword is false - # to explicitly show the following lib.trace warning in users-groups.nix (which was - # the wording prior to PR 310484) is in fact wrong: - # ``` - # The user 'root' has multiple of the options - # `hashedPassword`, `password`, `hashedPasswordFile`, `initialPassword` - # & `initialHashedPassword` set to a non-null value. - # The options silently discard others by the order of precedence - # given above which can lead to surprising results. To resolve this warning, - # set at most one of the options above to a non-`null` value. - # ``` - cat = { - isNormalUser = true; - hashedPassword = hashed_sha512crypt; - password = password1; - hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; - initialPassword = password2; # lib.trace message implies this overrides everything above - }; - - # Show hashedPassword -> password -> hashedPasswordFile -> initialHashedPassword is false - # to also explicitly show the lib.trace explained above (see cat user) is wrong - dan = { - isNormalUser = true; - hashedPassword = hashed_sha512crypt; - initialPassword = password2; - password = password1; - hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; - initialHashedPassword = hashed_yeshash; # lib.trace message implies this overrides everything above - }; + nodes = + let + # The following users are expected to have the same behavior between immutable and mutable systems + # NOTE: Below given A -> B it implies B overrides A . Each entry below builds off the next + users = { + # mutable true/false: initialHashedPassword -> hashedPassword + fran = { + isNormalUser = true; + initialHashedPassword = hashed_yeshash; + hashedPassword = hashed_sha512crypt; }; - mkTestMachine = mutable: { - environment.systemPackages = [ pkgs.shadow ]; - users = { - mutableUsers = mutable; - inherit users; - }; + # mutable false: initialHashedPassword -> hashedPassword -> initialPassword + # mutable true: initialHashedPassword -> initialPassword -> hashedPassword + greg = { + isNormalUser = true; + hashedPassword = hashed_sha512crypt; + initialPassword = password1; + }; + + # mutable false: initialHashedPassword -> hashedPassword -> initialPassword -> password + # mutable true: initialHashedPassword -> initialPassword -> hashedPassword -> password + egon = { + isNormalUser = true; + initialPassword = password2; + password = password1; + }; + + # mutable true/false: hashedPassword -> password + # NOTE: minor duplication of test above, but to verify no initialXXX use is consistent + alice = { + isNormalUser = true; + hashedPassword = hashed_sha512crypt; + password = password1; + }; + + # mutable false: initialHashedPassword -> hashedPassword -> initialPassword -> password -> hashedPasswordFile + # mutable true: initialHashedPassword -> initialPassword -> hashedPassword -> password -> hashedPasswordFile + bob = { + isNormalUser = true; + hashedPassword = hashed_sha512crypt; + password = password1; + hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; # Expect override of everything above + }; + + # Show hashedPassword -> password -> hashedPasswordFile -> initialPassword is false + # to explicitly show the following lib.trace warning in users-groups.nix (which was + # the wording prior to PR 310484) is in fact wrong: + # ``` + # The user 'root' has multiple of the options + # `hashedPassword`, `password`, `hashedPasswordFile`, `initialPassword` + # & `initialHashedPassword` set to a non-null value. + # The options silently discard others by the order of precedence + # given above which can lead to surprising results. To resolve this warning, + # set at most one of the options above to a non-`null` value. + # ``` + cat = { + isNormalUser = true; + hashedPassword = hashed_sha512crypt; + password = password1; + hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; + initialPassword = password2; # lib.trace message implies this overrides everything above + }; + + # Show hashedPassword -> password -> hashedPasswordFile -> initialHashedPassword is false + # to also explicitly show the lib.trace explained above (see cat user) is wrong + dan = { + isNormalUser = true; + hashedPassword = hashed_sha512crypt; + initialPassword = password2; + password = password1; + hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; + initialHashedPassword = hashed_yeshash; # lib.trace message implies this overrides everything above }; - in - { - immutable = mkTestMachine false; - mutable = mkTestMachine true; }; - testScript = '' - import crypt + mkTestMachine = mutable: { + environment.systemPackages = [ pkgs.shadow ]; + users = { + mutableUsers = mutable; + inherit users; + }; + }; + in + { + immutable = mkTestMachine false; + mutable = mkTestMachine true; + }; - def assert_password_match(machine, username, password): - shadow_entry = machine.succeed(f"getent shadow {username}") - print(shadow_entry) - hash = shadow_entry.split(":")[1] - seed = "$".join(hash.split("$")[:-1]) - assert crypt.crypt(password, seed) == hash, f"{username} user password does not match" + testScript = '' + import crypt - with subtest("alice user has correct password"): - for machine in machines: - assert_password_match(machine, "alice", "${password1}") - assert "${hashed_sha512crypt}" not in machine.succeed("getent shadow alice"), f"{machine}: alice user password is not correct" - - with subtest("bob user has correct password"): - for machine in machines: - print(machine.succeed("getent shadow bob")) - assert "${hashed_bcrypt}" in machine.succeed("getent shadow bob"), f"{machine}: bob user password is not correct" - - with subtest("cat user has correct password"): - for machine in machines: - print(machine.succeed("getent shadow cat")) - assert "${hashed_bcrypt}" in machine.succeed("getent shadow cat"), f"{machine}: cat user password is not correct" - - with subtest("dan user has correct password"): - for machine in machines: - print(machine.succeed("getent shadow dan")) - assert "${hashed_bcrypt}" in machine.succeed("getent shadow dan"), f"{machine}: dan user password is not correct" - - with subtest("greg user has correct password"): - print(mutable.succeed("getent shadow greg")) - assert "${hashed_sha512crypt}" in mutable.succeed("getent shadow greg"), "greg user password is not correct" - - assert_password_match(immutable, "greg", "${password1}") - assert "${hashed_sha512crypt}" not in immutable.succeed("getent shadow greg"), "greg user password is not correct" + def assert_password_match(machine, username, password): + shadow_entry = machine.succeed(f"getent shadow {username}") + print(shadow_entry) + hash = shadow_entry.split(":")[1] + seed = "$".join(hash.split("$")[:-1]) + assert crypt.crypt(password, seed) == hash, f"{username} user password does not match" + with subtest("alice user has correct password"): for machine in machines: - machine.wait_for_unit("multi-user.target") - machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + assert_password_match(machine, "alice", "${password1}") + assert "${hashed_sha512crypt}" not in machine.succeed("getent shadow alice"), f"{machine}: alice user password is not correct" - def check_login(machine: Machine, tty_number: str, username: str, password: str): - machine.send_key(f"alt-f{tty_number}") - machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]") - machine.wait_for_unit(f"getty@tty{tty_number}.service") - machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'") - machine.wait_until_tty_matches(tty_number, "login: ") - machine.send_chars(f"{username}\n") - machine.wait_until_tty_matches(tty_number, f"login: {username}") - machine.wait_until_succeeds("pgrep login") - machine.wait_until_tty_matches(tty_number, "Password: ") - machine.send_chars(f"{password}\n") - machine.send_chars(f"whoami > /tmp/{tty_number}\n") - machine.wait_for_file(f"/tmp/{tty_number}") - assert username in machine.succeed(f"cat /tmp/{tty_number}"), f"{machine}: {username} password is not correct" + with subtest("bob user has correct password"): + for machine in machines: + print(machine.succeed("getent shadow bob")) + assert "${hashed_bcrypt}" in machine.succeed("getent shadow bob"), f"{machine}: bob user password is not correct" - with subtest("Test initialPassword override"): - for machine in machines: - check_login(machine, "2", "egon", "${password1}") + with subtest("cat user has correct password"): + for machine in machines: + print(machine.succeed("getent shadow cat")) + assert "${hashed_bcrypt}" in machine.succeed("getent shadow cat"), f"{machine}: cat user password is not correct" - with subtest("Test initialHashedPassword override"): - for machine in machines: - check_login(machine, "3", "fran", "meow") - ''; - } -) + with subtest("dan user has correct password"): + for machine in machines: + print(machine.succeed("getent shadow dan")) + assert "${hashed_bcrypt}" in machine.succeed("getent shadow dan"), f"{machine}: dan user password is not correct" + + with subtest("greg user has correct password"): + print(mutable.succeed("getent shadow greg")) + assert "${hashed_sha512crypt}" in mutable.succeed("getent shadow greg"), "greg user password is not correct" + + assert_password_match(immutable, "greg", "${password1}") + assert "${hashed_sha512crypt}" not in immutable.succeed("getent shadow greg"), "greg user password is not correct" + + for machine in machines: + machine.wait_for_unit("multi-user.target") + machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + + def check_login(machine: Machine, tty_number: str, username: str, password: str): + machine.send_key(f"alt-f{tty_number}") + machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]") + machine.wait_for_unit(f"getty@tty{tty_number}.service") + machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'") + machine.wait_until_tty_matches(tty_number, "login: ") + machine.send_chars(f"{username}\n") + machine.wait_until_tty_matches(tty_number, f"login: {username}") + machine.wait_until_succeeds("pgrep login") + machine.wait_until_tty_matches(tty_number, "Password: ") + machine.send_chars(f"{password}\n") + machine.send_chars(f"whoami > /tmp/{tty_number}\n") + machine.wait_for_file(f"/tmp/{tty_number}") + assert username in machine.succeed(f"cat /tmp/{tty_number}"), f"{machine}: {username} password is not correct" + + with subtest("Test initialPassword override"): + for machine in machines: + check_login(machine, "2", "egon", "${password1}") + + with subtest("Test initialHashedPassword override"): + for machine in machines: + check_login(machine, "3", "fran", "meow") + ''; +} diff --git a/nixos/tests/pds.nix b/nixos/tests/pds.nix index 4c4ab2a64326..a07a2c1fcf72 100644 --- a/nixos/tests/pds.nix +++ b/nixos/tests/pds.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "PDS"; +{ lib, ... }: +{ + name = "PDS"; - nodes.machine = { - services.pds = { - enable = true; - settings = { - PDS_PORT = 3000; - PDS_HOSTNAME = "example.com"; + nodes.machine = { + services.pds = { + enable = true; + settings = { + PDS_PORT = 3000; + PDS_HOSTNAME = "example.com"; - # Snake oil testing credentials - PDS_JWT_SECRET = "7b93fee53be046bf59c27a32a0fb2069"; - PDS_ADMIN_PASSWORD = "3a4077bc0d5f04eca945ef0509f7e809"; - PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX = "ae4f5028d04c833ba630f29debd5ff80b7700e43e9f4bf70f729a88cd6a6ce35"; - }; + # Snake oil testing credentials + PDS_JWT_SECRET = "7b93fee53be046bf59c27a32a0fb2069"; + PDS_ADMIN_PASSWORD = "3a4077bc0d5f04eca945ef0509f7e809"; + PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX = "ae4f5028d04c833ba630f29debd5ff80b7700e43e9f4bf70f729a88cd6a6ce35"; }; }; + }; - testScript = '' - machine.wait_for_unit("pds.service") - machine.wait_for_open_port(3000) - machine.succeed("curl --fail http://localhost:3000") - ''; + testScript = '' + machine.wait_for_unit("pds.service") + machine.wait_for_open_port(3000) + machine.succeed("curl --fail http://localhost:3000") + ''; - meta.maintainers = with lib.maintainers; [ t4ccer ]; - } -) + meta.maintainers = with lib.maintainers; [ t4ccer ]; +} diff --git a/nixos/tests/peerflix.nix b/nixos/tests/peerflix.nix index b53f1b114abe..92a03157f568 100644 --- a/nixos/tests/peerflix.nix +++ b/nixos/tests/peerflix.nix @@ -1,26 +1,24 @@ # This test runs peerflix and checks if peerflix starts -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "peerflix"; - meta = with pkgs.lib.maintainers; { - maintainers = [ offline ]; - }; +{ pkgs, ... }: +{ + name = "peerflix"; + meta = with pkgs.lib.maintainers; { + maintainers = [ offline ]; + }; - nodes = { - peerflix = - { ... }: - { - services.peerflix.enable = true; - }; - }; + nodes = { + peerflix = + { ... }: + { + services.peerflix.enable = true; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - peerflix.wait_for_unit("peerflix.service") - peerflix.wait_until_succeeds("curl -f localhost:9000") - ''; - } -) + peerflix.wait_for_unit("peerflix.service") + peerflix.wait_until_succeeds("curl -f localhost:9000") + ''; +} diff --git a/nixos/tests/peroxide.nix b/nixos/tests/peroxide.nix index 3508942d2970..d5902a64ba2d 100644 --- a/nixos/tests/peroxide.nix +++ b/nixos/tests/peroxide.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "peroxide"; - meta.maintainers = with lib.maintainers; [ aidalgol ]; +{ pkgs, lib, ... }: +{ + name = "peroxide"; + meta.maintainers = with lib.maintainers; [ aidalgol ]; - nodes.machine = - { config, pkgs, ... }: - { - networking.hostName = "nixos"; - services.peroxide.enable = true; - }; + nodes.machine = + { config, pkgs, ... }: + { + networking.hostName = "nixos"; + services.peroxide.enable = true; + }; - testScript = '' - machine.wait_for_unit("peroxide.service") - machine.wait_for_open_port(1143) # IMAP - machine.wait_for_open_port(1025) # SMTP - ''; - } -) + testScript = '' + machine.wait_for_unit("peroxide.service") + machine.wait_for_open_port(1143) # IMAP + machine.wait_for_open_port(1025) # SMTP + ''; +} diff --git a/nixos/tests/pgbouncer.nix b/nixos/tests/pgbouncer.nix index 7951ad0fb084..ffe2c3c137bf 100644 --- a/nixos/tests/pgbouncer.nix +++ b/nixos/tests/pgbouncer.nix @@ -1,62 +1,60 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "pgbouncer"; +{ lib, pkgs, ... }: +{ + name = "pgbouncer"; - meta = with lib.maintainers; { - maintainers = [ _1000101 ]; - }; + meta = with lib.maintainers; { + maintainers = [ _1000101 ]; + }; - nodes = { - one = - { pkgs, ... }: - { - systemd.services.postgresql = { - postStart = '' - ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER ROLE testuser WITH LOGIN PASSWORD 'testpass'"; - ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER DATABASE testdb OWNER TO testuser;"; + nodes = { + one = + { pkgs, ... }: + { + systemd.services.postgresql = { + postStart = '' + ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER ROLE testuser WITH LOGIN PASSWORD 'testpass'"; + ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER DATABASE testdb OWNER TO testuser;"; + ''; + }; + + services = { + postgresql = { + enable = true; + ensureDatabases = [ "testdb" ]; + ensureUsers = [ { name = "testuser"; } ]; + authentication = '' + local testdb testuser scram-sha-256 ''; }; - services = { - postgresql = { - enable = true; - ensureDatabases = [ "testdb" ]; - ensureUsers = [ { name = "testuser"; } ]; - authentication = '' - local testdb testuser scram-sha-256 - ''; - }; - - pgbouncer = { - enable = true; - openFirewall = true; - settings = { - pgbouncer = { - listen_addr = "localhost"; - auth_type = "scram-sha-256"; - auth_file = builtins.toFile "pgbouncer-users.txt" '' - "testuser" "testpass" - ''; - }; - databases = { - test = "host=/run/postgresql port=5432 auth_user=testuser dbname=testdb"; - }; + pgbouncer = { + enable = true; + openFirewall = true; + settings = { + pgbouncer = { + listen_addr = "localhost"; + auth_type = "scram-sha-256"; + auth_file = builtins.toFile "pgbouncer-users.txt" '' + "testuser" "testpass" + ''; + }; + databases = { + test = "host=/run/postgresql port=5432 auth_user=testuser dbname=testdb"; }; }; }; }; - }; + }; + }; - testScript = '' - start_all() - one.wait_for_unit("default.target") - one.require_unit_state("pgbouncer.service", "active") + testScript = '' + start_all() + one.wait_for_unit("default.target") + one.require_unit_state("pgbouncer.service", "active") - # Test if we can make a query through PgBouncer - one.wait_until_succeeds( - "psql 'postgres://testuser:testpass@localhost:6432/test' -c 'SELECT 1;'" - ) - ''; - } -) + # Test if we can make a query through PgBouncer + one.wait_until_succeeds( + "psql 'postgres://testuser:testpass@localhost:6432/test' -c 'SELECT 1;'" + ) + ''; +} diff --git a/nixos/tests/pgmanage.nix b/nixos/tests/pgmanage.nix index a28a3ce44766..157f46faec9f 100644 --- a/nixos/tests/pgmanage.nix +++ b/nixos/tests/pgmanage.nix @@ -1,46 +1,44 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - role = "test"; - password = "secret"; - conn = "local"; - in - { - name = "pgmanage"; - meta = with pkgs.lib.maintainers; { - maintainers = [ basvandijk ]; - }; - nodes = { - one = - { config, pkgs, ... }: - { - services = { - postgresql = { - enable = true; - initialScript = pkgs.writeText "pg-init-script" '' - CREATE ROLE ${role} SUPERUSER LOGIN PASSWORD '${password}'; - ''; - }; - pgmanage = { - enable = true; - connections = { - ${conn} = - "hostaddr=127.0.0.1 port=${toString config.services.postgresql.settings.port} dbname=postgres"; - }; +{ pkgs, ... }: +let + role = "test"; + password = "secret"; + conn = "local"; +in +{ + name = "pgmanage"; + meta = with pkgs.lib.maintainers; { + maintainers = [ basvandijk ]; + }; + nodes = { + one = + { config, pkgs, ... }: + { + services = { + postgresql = { + enable = true; + initialScript = pkgs.writeText "pg-init-script" '' + CREATE ROLE ${role} SUPERUSER LOGIN PASSWORD '${password}'; + ''; + }; + pgmanage = { + enable = true; + connections = { + ${conn} = + "hostaddr=127.0.0.1 port=${toString config.services.postgresql.settings.port} dbname=postgres"; }; }; }; - }; + }; + }; - testScript = '' - start_all() - one.wait_for_unit("default.target") - one.require_unit_state("pgmanage.service", "active") + testScript = '' + start_all() + one.wait_for_unit("default.target") + one.require_unit_state("pgmanage.service", "active") - # Test if we can log in. - one.wait_until_succeeds( - "curl 'http://localhost:8080/pgmanage/auth' --data 'action=login&connname=${conn}&username=${role}&password=${password}' --fail" - ) - ''; - } -) + # Test if we can log in. + one.wait_until_succeeds( + "curl 'http://localhost:8080/pgmanage/auth' --data 'action=login&connname=${conn}&username=${role}&password=${password}' --fail" + ) + ''; +} diff --git a/nixos/tests/phosh.nix b/nixos/tests/phosh.nix index 9a93f64b0ac4..a276e1f57653 100644 --- a/nixos/tests/phosh.nix +++ b/nixos/tests/phosh.nix @@ -1,88 +1,86 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - pin = "1234"; - in - { - name = "phosh"; - meta = with pkgs.lib.maintainers; { - maintainers = [ zhaofengli ]; - }; +{ pkgs, ... }: +let + pin = "1234"; +in +{ + name = "phosh"; + meta = with pkgs.lib.maintainers; { + maintainers = [ zhaofengli ]; + }; - nodes = { - phone = - { config, pkgs, ... }: - { - users.users.nixos = { - isNormalUser = true; - password = pin; - }; - - services.xserver.desktopManager.phosh = { - enable = true; - user = "nixos"; - group = "users"; - - phocConfig = { - outputs.Virtual-1 = { - scale = 2; - }; - }; - }; - - environment.systemPackages = [ - pkgs.phosh-mobile-settings - ]; - - systemd.services.phosh = { - environment = { - # Accelerated graphics fail on phoc 0.20 (wlroots 0.15) - "WLR_RENDERER" = "pixman"; - }; - }; - - virtualisation.resolution = { - x = 720; - y = 1440; - }; - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci,xres=720,yres=1440" ]; + nodes = { + phone = + { config, pkgs, ... }: + { + users.users.nixos = { + isNormalUser = true; + password = pin; }; - }; - enableOCR = true; + services.xserver.desktopManager.phosh = { + enable = true; + user = "nixos"; + group = "users"; - testScript = '' - import time + phocConfig = { + outputs.Virtual-1 = { + scale = 2; + }; + }; + }; - start_all() - phone.wait_for_unit("phosh.service") + environment.systemPackages = [ + pkgs.phosh-mobile-settings + ]; - with subtest("Check that we can see the lock screen info page"): - # Saturday, January 1 - phone.succeed("timedatectl set-time '2022-01-01 07:00'") + systemd.services.phosh = { + environment = { + # Accelerated graphics fail on phoc 0.20 (wlroots 0.15) + "WLR_RENDERER" = "pixman"; + }; + }; - phone.wait_for_text("Saturday") - phone.screenshot("01lockinfo") + virtualisation.resolution = { + x = 720; + y = 1440; + }; + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci,xres=720,yres=1440" ]; + }; + }; - with subtest("Check that we can unlock the screen"): - phone.send_chars("${pin}", delay=0.2) - time.sleep(1) - phone.screenshot("02unlock") + enableOCR = true; - phone.send_chars("\n") + testScript = '' + import time - phone.wait_for_text("All Apps") - phone.screenshot("03launcher") + start_all() + phone.wait_for_unit("phosh.service") - with subtest("Check the on-screen keyboard shows"): - phone.send_chars("mobile setting", delay=0.2) - phone.wait_for_text("123") # A button on the OSK - phone.screenshot("04osk") + with subtest("Check that we can see the lock screen info page"): + # Saturday, January 1 + phone.succeed("timedatectl set-time '2022-01-01 07:00'") - with subtest("Check mobile-phosh-settings starts"): - phone.send_chars("\n") - phone.wait_for_text("Tweak advanced mobile settings"); - phone.screenshot("05settings") - ''; - } -) + phone.wait_for_text("Saturday") + phone.screenshot("01lockinfo") + + with subtest("Check that we can unlock the screen"): + phone.send_chars("${pin}", delay=0.2) + time.sleep(1) + phone.screenshot("02unlock") + + phone.send_chars("\n") + + phone.wait_for_text("All Apps") + phone.screenshot("03launcher") + + with subtest("Check the on-screen keyboard shows"): + phone.send_chars("mobile setting", delay=0.2) + phone.wait_for_text("123") # A button on the OSK + phone.screenshot("04osk") + + with subtest("Check mobile-phosh-settings starts"): + phone.send_chars("\n") + phone.wait_for_text("Tweak advanced mobile settings"); + phone.screenshot("05settings") + ''; +} diff --git a/nixos/tests/photonvision.nix b/nixos/tests/photonvision.nix index 6893e77d1554..6ad5ec8915ea 100644 --- a/nixos/tests/photonvision.nix +++ b/nixos/tests/photonvision.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "photonvision"; +{ pkgs, lib, ... }: +{ + name = "photonvision"; - nodes = { - machine = - { pkgs, ... }: - { - services.photonvision = { - enable = true; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.photonvision = { + enable = true; }; - }; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("photonvision.service") - machine.wait_for_open_port(5800) - ''; + testScript = '' + start_all() + machine.wait_for_unit("photonvision.service") + machine.wait_for_open_port(5800) + ''; - meta.maintainers = with lib.maintainers; [ max-niederman ]; - } -) + meta.maintainers = with lib.maintainers; [ max-niederman ]; +} diff --git a/nixos/tests/photoprism.nix b/nixos/tests/photoprism.nix index b8c86c9ac20a..e4cdfeb43331 100644 --- a/nixos/tests/photoprism.nix +++ b/nixos/tests/photoprism.nix @@ -1,28 +1,26 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "photoprism"; - meta.maintainers = with lib.maintainers; [ stunkymonkey ]; +{ lib, pkgs, ... }: +{ + name = "photoprism"; + meta.maintainers = with lib.maintainers; [ stunkymonkey ]; - nodes.machine = - { pkgs, ... }: - { - services.photoprism = { - enable = true; - port = 8080; - originalsPath = "/media/photos/"; - passwordFile = pkgs.writeText "password" "secret"; - }; - environment.extraInit = '' - mkdir -p /media/photos - ''; + nodes.machine = + { pkgs, ... }: + { + services.photoprism = { + enable = true; + port = 8080; + originalsPath = "/media/photos/"; + passwordFile = pkgs.writeText "password" "secret"; }; + environment.extraInit = '' + mkdir -p /media/photos + ''; + }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.wait_for_open_port(8080) - response = machine.succeed("curl -vvv -s -H 'Host: photoprism' http://127.0.0.1:8080/library/login") - assert 'PhotoPrism' in response, "Login page didn't load successfully" - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.wait_for_open_port(8080) + response = machine.succeed("curl -vvv -s -H 'Host: photoprism' http://127.0.0.1:8080/library/login") + assert 'PhotoPrism' in response, "Login page didn't load successfully" + ''; +} diff --git a/nixos/tests/pict-rs.nix b/nixos/tests/pict-rs.nix index 12109c74138d..7bd2c601f8a6 100644 --- a/nixos/tests/pict-rs.nix +++ b/nixos/tests/pict-rs.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "pict-rs"; - meta.maintainers = with lib.maintainers; [ happysalada ]; +{ pkgs, lib, ... }: +{ + name = "pict-rs"; + meta.maintainers = with lib.maintainers; [ happysalada ]; - nodes.machine = - { ... }: - { - environment.systemPackages = with pkgs; [ - curl - jq - ]; - services.pict-rs.enable = true; - }; + nodes.machine = + { ... }: + { + environment.systemPackages = with pkgs; [ + curl + jq + ]; + services.pict-rs.enable = true; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("pict-rs") - machine.wait_for_open_port(8080) - ''; - } -) + machine.wait_for_unit("pict-rs") + machine.wait_for_open_port(8080) + ''; +} diff --git a/nixos/tests/pingvin-share.nix b/nixos/tests/pingvin-share.nix index 5b1ec55add20..5cb5dcd2a3fc 100644 --- a/nixos/tests/pingvin-share.nix +++ b/nixos/tests/pingvin-share.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "pingvin-share"; - meta.maintainers = with lib.maintainers; [ ratcornu ]; +{ lib, ... }: +{ + name = "pingvin-share"; + meta.maintainers = with lib.maintainers; [ ratcornu ]; - nodes.machine = - { ... }: - { - services.pingvin-share = { - enable = true; + nodes.machine = + { ... }: + { + services.pingvin-share = { + enable = true; - backend.port = 9010; - frontend.port = 9011; - }; + backend.port = 9010; + frontend.port = 9011; }; + }; - testScript = '' - machine.wait_for_unit("pingvin-share-frontend.service") - machine.wait_for_open_port(9010) - machine.wait_for_open_port(9011) - machine.succeed("curl --fail http://127.0.0.1:9010/api/configs") - machine.succeed("curl --fail http://127.0.0.1:9011/") - ''; - } -) + testScript = '' + machine.wait_for_unit("pingvin-share-frontend.service") + machine.wait_for_open_port(9010) + machine.wait_for_open_port(9011) + machine.succeed("curl --fail http://127.0.0.1:9010/api/configs") + machine.succeed("curl --fail http://127.0.0.1:9011/") + ''; +} diff --git a/nixos/tests/plantuml-server.nix b/nixos/tests/plantuml-server.nix index b0885ea2630d..e46461bc55ac 100644 --- a/nixos/tests/plantuml-server.nix +++ b/nixos/tests/plantuml-server.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "plantuml-server"; - meta.maintainers = with lib.maintainers; [ anthonyroussel ]; +{ pkgs, lib, ... }: +{ + name = "plantuml-server"; + meta.maintainers = with lib.maintainers; [ anthonyroussel ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.curl ]; - services.plantuml-server.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.curl ]; + services.plantuml-server.enable = true; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("plantuml-server.service") - machine.wait_for_open_port(8080) + machine.wait_for_unit("plantuml-server.service") + machine.wait_for_open_port(8080) - with subtest("Generate chart"): - chart_id = machine.succeed("curl -sSf http://localhost:8080/plantuml/coder -d 'Alice -> Bob'") - machine.succeed("curl -sSf http://localhost:8080/plantuml/txt/{}".format(chart_id)) - ''; - } -) + with subtest("Generate chart"): + chart_id = machine.succeed("curl -sSf http://localhost:8080/plantuml/coder -d 'Alice -> Bob'") + machine.succeed("curl -sSf http://localhost:8080/plantuml/txt/{}".format(chart_id)) + ''; +} diff --git a/nixos/tests/plasma-bigscreen.nix b/nixos/tests/plasma-bigscreen.nix index 4a14564364ac..b429117b3a44 100644 --- a/nixos/tests/plasma-bigscreen.nix +++ b/nixos/tests/plasma-bigscreen.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "plasma-bigscreen"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - ttuegel - k900 - ]; - }; +{ + name = "plasma-bigscreen"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + ttuegel + k900 + ]; + }; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.displayManager.sddm.enable = true; - services.displayManager.defaultSession = "plasma-bigscreen-x11"; - services.xserver.desktopManager.plasma5.bigscreen.enable = true; - services.displayManager.autoLogin = { - enable = true; - user = "alice"; - }; - - users.users.alice.extraGroups = [ "uinput" ]; + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.displayManager.sddm.enable = true; + services.displayManager.defaultSession = "plasma-bigscreen-x11"; + services.xserver.desktopManager.plasma5.bigscreen.enable = true; + services.displayManager.autoLogin = { + enable = true; + user = "alice"; }; - testScript = - { nodes, ... }: - '' - with subtest("Wait for login"): - start_all() - machine.wait_for_file("/tmp/xauth_*") - machine.succeed("xauth merge /tmp/xauth_*") + users.users.alice.extraGroups = [ "uinput" ]; + }; - with subtest("Check plasmashell started"): - machine.wait_until_succeeds("pgrep plasmashell") - machine.wait_for_window("Plasma Big Screen") - ''; - } -) + testScript = + { nodes, ... }: + '' + with subtest("Wait for login"): + start_all() + machine.wait_for_file("/tmp/xauth_*") + machine.succeed("xauth merge /tmp/xauth_*") + + with subtest("Check plasmashell started"): + machine.wait_until_succeeds("pgrep plasmashell") + machine.wait_for_window("Plasma Big Screen") + ''; +} diff --git a/nixos/tests/plasma5-systemd-start.nix b/nixos/tests/plasma5-systemd-start.nix index 9f21c21714b0..6a62f356f839 100644 --- a/nixos/tests/plasma5-systemd-start.nix +++ b/nixos/tests/plasma5-systemd-start.nix @@ -1,48 +1,46 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "plasma5-systemd-start"; - meta = with pkgs.lib.maintainers; { - maintainers = [ oxalica ]; - }; +{ + name = "plasma5-systemd-start"; + meta = with pkgs.lib.maintainers; { + maintainers = [ oxalica ]; + }; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver = { - enable = true; - desktopManager.plasma5.enable = true; - desktopManager.plasma5.runUsingSystemd = true; - }; - - services.displayManager = { - sddm.enable = true; - defaultSession = "plasma"; - autoLogin = { - enable = true; - user = "alice"; - }; - }; + { + imports = [ ./common/user-account.nix ]; + services.xserver = { + enable = true; + desktopManager.plasma5.enable = true; + desktopManager.plasma5.runUsingSystemd = true; }; - testScript = - { nodes, ... }: - '' - with subtest("Wait for login"): - start_all() - machine.wait_for_file("/tmp/xauth_*") - machine.succeed("xauth merge /tmp/xauth_*") + services.displayManager = { + sddm.enable = true; + defaultSession = "plasma"; + autoLogin = { + enable = true; + user = "alice"; + }; + }; + }; - with subtest("Check plasmashell started"): - machine.wait_until_succeeds("pgrep plasmashell") - machine.wait_for_window("^Desktop ") + testScript = + { nodes, ... }: + '' + with subtest("Wait for login"): + start_all() + machine.wait_for_file("/tmp/xauth_*") + machine.succeed("xauth merge /tmp/xauth_*") - status, result = machine.systemctl('--no-pager show plasma-plasmashell.service', user='alice') - assert status == 0, 'Service not found' - assert 'ActiveState=active' in result.split('\n'), 'Systemd service not active' - ''; - } -) + with subtest("Check plasmashell started"): + machine.wait_until_succeeds("pgrep plasmashell") + machine.wait_for_window("^Desktop ") + + status, result = machine.systemctl('--no-pager show plasma-plasmashell.service', user='alice') + assert status == 0, 'Service not found' + assert 'ActiveState=active' in result.split('\n'), 'Systemd service not active' + ''; +} diff --git a/nixos/tests/plasma5.nix b/nixos/tests/plasma5.nix index d333df24eb8d..7fe677feae92 100644 --- a/nixos/tests/plasma5.nix +++ b/nixos/tests/plasma5.nix @@ -1,72 +1,70 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "plasma5"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ttuegel ]; +{ + name = "plasma5"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ttuegel ]; + }; + + nodes.machine = + { ... }: + + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.displayManager.sddm.enable = true; + services.displayManager.defaultSession = "plasma"; + services.xserver.desktopManager.plasma5.enable = true; + environment.plasma5.excludePackages = [ pkgs.plasma5Packages.elisa ]; + services.displayManager.autoLogin = { + enable = true; + user = "alice"; + }; }; - nodes.machine = - { ... }: + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + xdo = "${pkgs.xdotool}/bin/xdotool"; + in + '' + with subtest("Wait for login"): + start_all() + machine.wait_for_file("/tmp/xauth_*") + machine.succeed("xauth merge /tmp/xauth_*") - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.displayManager.sddm.enable = true; - services.displayManager.defaultSession = "plasma"; - services.xserver.desktopManager.plasma5.enable = true; - environment.plasma5.excludePackages = [ pkgs.plasma5Packages.elisa ]; - services.displayManager.autoLogin = { - enable = true; - user = "alice"; - }; - }; + with subtest("Check plasmashell started"): + machine.wait_until_succeeds("pgrep plasmashell") + machine.wait_for_window("^Desktop ") - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - xdo = "${pkgs.xdotool}/bin/xdotool"; - in - '' - with subtest("Wait for login"): - start_all() - machine.wait_for_file("/tmp/xauth_*") - machine.succeed("xauth merge /tmp/xauth_*") + with subtest("Check that KDED is running"): + machine.succeed("pgrep kded5") - with subtest("Check plasmashell started"): - machine.wait_until_succeeds("pgrep plasmashell") - machine.wait_for_window("^Desktop ") + with subtest("Check that logging in has given the user ownership of devices"): + machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") - with subtest("Check that KDED is running"): - machine.succeed("pgrep kded5") + with subtest("Ensure Elisa is not installed"): + machine.fail("which elisa") - with subtest("Check that logging in has given the user ownership of devices"): - machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}") + machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'") - with subtest("Ensure Elisa is not installed"): - machine.fail("which elisa") + with subtest("Run Dolphin"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'") + machine.wait_for_window(" Dolphin") - machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'") + with subtest("Run Konsole"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole >&2 &'") + machine.wait_for_window("Konsole") - with subtest("Run Dolphin"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'") - machine.wait_for_window(" Dolphin") + with subtest("Run systemsettings"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings5 >&2 &'") + machine.wait_for_window("Settings") - with subtest("Run Konsole"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole >&2 &'") - machine.wait_for_window("Konsole") - - with subtest("Run systemsettings"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings5 >&2 &'") - machine.wait_for_window("Settings") - - with subtest("Wait to get a screenshot"): - machine.execute( - "${xdo} key Alt+F1 sleep 10" - ) - machine.screenshot("screen") - ''; - } -) + with subtest("Wait to get a screenshot"): + machine.execute( + "${xdo} key Alt+F1 sleep 10" + ) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/plasma6.nix b/nixos/tests/plasma6.nix index afdb965d5d89..b10d829102ad 100644 --- a/nixos/tests/plasma6.nix +++ b/nixos/tests/plasma6.nix @@ -1,70 +1,68 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "plasma6"; - meta = with pkgs.lib.maintainers; { - maintainers = [ k900 ]; +{ + name = "plasma6"; + meta = with pkgs.lib.maintainers; { + maintainers = [ k900 ]; + }; + + nodes.machine = + { ... }: + + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.displayManager.sddm.enable = true; + # FIXME: this should be testing Wayland + services.displayManager.defaultSession = "plasmax11"; + services.desktopManager.plasma6.enable = true; + environment.plasma6.excludePackages = [ pkgs.kdePackages.elisa ]; + services.displayManager.autoLogin = { + enable = true; + user = "alice"; + }; }; - nodes.machine = - { ... }: + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + xdo = "${pkgs.xdotool}/bin/xdotool"; + in + '' + with subtest("Wait for login"): + start_all() + machine.wait_for_file("/tmp/xauth_*") + machine.succeed("xauth merge /tmp/xauth_*") - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.displayManager.sddm.enable = true; - # FIXME: this should be testing Wayland - services.displayManager.defaultSession = "plasmax11"; - services.desktopManager.plasma6.enable = true; - environment.plasma6.excludePackages = [ pkgs.kdePackages.elisa ]; - services.displayManager.autoLogin = { - enable = true; - user = "alice"; - }; - }; + with subtest("Check plasmashell started"): + machine.wait_until_succeeds("pgrep plasmashell") + machine.wait_for_window("^Desktop ") - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - xdo = "${pkgs.xdotool}/bin/xdotool"; - in - '' - with subtest("Wait for login"): - start_all() - machine.wait_for_file("/tmp/xauth_*") - machine.succeed("xauth merge /tmp/xauth_*") + with subtest("Check that KDED is running"): + machine.succeed("pgrep kded6") - with subtest("Check plasmashell started"): - machine.wait_until_succeeds("pgrep plasmashell") - machine.wait_for_window("^Desktop ") + with subtest("Ensure Elisa is not installed"): + machine.fail("which elisa") - with subtest("Check that KDED is running"): - machine.succeed("pgrep kded6") + machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'") - with subtest("Ensure Elisa is not installed"): - machine.fail("which elisa") + with subtest("Run Dolphin"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'") + machine.wait_for_window(" Dolphin") - machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'") + with subtest("Run Konsole"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole >&2 &'") + machine.wait_for_window("Konsole") - with subtest("Run Dolphin"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'") - machine.wait_for_window(" Dolphin") + with subtest("Run systemsettings"): + machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings >&2 &'") + machine.wait_for_window("Settings") - with subtest("Run Konsole"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole >&2 &'") - machine.wait_for_window("Konsole") - - with subtest("Run systemsettings"): - machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings >&2 &'") - machine.wait_for_window("Settings") - - with subtest("Wait to get a screenshot"): - machine.execute( - "${xdo} key Alt+F1 sleep 10" - ) - machine.screenshot("screen") - ''; - } -) + with subtest("Wait to get a screenshot"): + machine.execute( + "${xdo} key Alt+F1 sleep 10" + ) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/plausible.nix b/nixos/tests/plausible.nix index 2076667797db..5253b521c99e 100644 --- a/nixos/tests/plausible.nix +++ b/nixos/tests/plausible.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "plausible"; - meta = { - maintainers = lib.teams.cyberus.members; - }; +{ lib, ... }: +{ + name = "plausible"; + meta = { + maintainers = lib.teams.cyberus.members; + }; - nodes.machine = - { pkgs, ... }: - { - virtualisation.memorySize = 4096; - services.plausible = { - enable = true; - server = { - baseUrl = "http://localhost:8000"; - secretKeybaseFile = "${pkgs.writeText "dont-try-this-at-home" "nannannannannannannannannannannannannannannannannannannan_batman!"}"; - }; + nodes.machine = + { pkgs, ... }: + { + virtualisation.memorySize = 4096; + services.plausible = { + enable = true; + server = { + baseUrl = "http://localhost:8000"; + secretKeybaseFile = "${pkgs.writeText "dont-try-this-at-home" "nannannannannannannannannannannannannannannannannannannan_batman!"}"; }; }; + }; - testScript = '' - start_all() - machine.wait_for_unit("plausible.service") - machine.wait_for_open_port(8000) + testScript = '' + start_all() + machine.wait_for_unit("plausible.service") + machine.wait_for_open_port(8000) - # Ensure that the software does not make not make the machine - # listen on any public interfaces by default. - machine.fail("ss -tlpn 'src = 0.0.0.0 or src = [::]' | grep LISTEN") + # Ensure that the software does not make not make the machine + # listen on any public interfaces by default. + machine.fail("ss -tlpn 'src = 0.0.0.0 or src = [::]' | grep LISTEN") - machine.succeed("curl -f localhost:8000 >&2") + machine.succeed("curl -f localhost:8000 >&2") - machine.succeed("curl -f localhost:8000/js/script.js >&2") - ''; - } -) + machine.succeed("curl -f localhost:8000/js/script.js >&2") + ''; +} diff --git a/nixos/tests/playwright-python.nix b/nixos/tests/playwright-python.nix index e479f3a91517..7911dac1b428 100644 --- a/nixos/tests/playwright-python.nix +++ b/nixos/tests/playwright-python.nix @@ -1,61 +1,59 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "playwright-python"; +{ pkgs, ... }: +{ + name = "playwright-python"; - meta = with pkgs.lib.maintainers; { - maintainers = [ phaer ]; + meta = with pkgs.lib.maintainers; { + maintainers = [ phaer ]; + }; + + nodes.machine = + { pkgs, ... }: + { + environment.variables = { + NIX_MANUAL_DOCROOT = "file://${pkgs.nix.doc}/share/doc/nix/manual/index.html"; + PLAYWRIGHT_BROWSERS_PATH = pkgs.playwright-driver.browsers; + }; + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "test_playwright" + { + libraries = [ pkgs.python3Packages.playwright ]; + } + '' + import sys + import re + from playwright.sync_api import sync_playwright + from playwright.sync_api import expect + + browsers = { + "chromium": {'args': ["--headless", "--disable-gpu"], 'channel': 'chromium'}, + "firefox": {}, + "webkit": {} + } + needle = re.compile("Nix.*Reference Manual") + if len(sys.argv) != 3 or sys.argv[1] not in browsers.keys(): + print(f"usage: {sys.argv[0]} [{'|'.join(browsers.keys())}] ") + sys.exit(1) + browser_name = sys.argv[1] + url = sys.argv[2] + browser_kwargs = browsers.get(browser_name) + args = ' '.join(browser_kwargs.get('args', [])) + print(f"Running test on {browser_name} {args}") + with sync_playwright() as p: + browser = getattr(p, browser_name).launch(**browser_kwargs) + context = browser.new_context() + page = context.new_page() + page.goto(url) + expect(page.get_by_text(needle)).to_be_visible() + '' + ) + ]; }; - nodes.machine = - { pkgs, ... }: - { - environment.variables = { - NIX_MANUAL_DOCROOT = "file://${pkgs.nix.doc}/share/doc/nix/manual/index.html"; - PLAYWRIGHT_BROWSERS_PATH = pkgs.playwright-driver.browsers; - }; - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "test_playwright" - { - libraries = [ pkgs.python3Packages.playwright ]; - } - '' - import sys - import re - from playwright.sync_api import sync_playwright - from playwright.sync_api import expect + testScript = '' + # FIXME: Webkit segfaults + for browser in ["firefox", "chromium"]: + with subtest(f"Render Nix Manual in {browser}"): + machine.succeed(f"test_playwright {browser} $NIX_MANUAL_DOCROOT") + ''; - browsers = { - "chromium": {'args': ["--headless", "--disable-gpu"], 'channel': 'chromium'}, - "firefox": {}, - "webkit": {} - } - needle = re.compile("Nix.*Reference Manual") - if len(sys.argv) != 3 or sys.argv[1] not in browsers.keys(): - print(f"usage: {sys.argv[0]} [{'|'.join(browsers.keys())}] ") - sys.exit(1) - browser_name = sys.argv[1] - url = sys.argv[2] - browser_kwargs = browsers.get(browser_name) - args = ' '.join(browser_kwargs.get('args', [])) - print(f"Running test on {browser_name} {args}") - with sync_playwright() as p: - browser = getattr(p, browser_name).launch(**browser_kwargs) - context = browser.new_context() - page = context.new_page() - page.goto(url) - expect(page.get_by_text(needle)).to_be_visible() - '' - ) - ]; - }; - - testScript = '' - # FIXME: Webkit segfaults - for browser in ["firefox", "chromium"]: - with subtest(f"Render Nix Manual in {browser}"): - machine.succeed(f"test_playwright {browser} $NIX_MANUAL_DOCROOT") - ''; - - } -) +} diff --git a/nixos/tests/please.nix b/nixos/tests/please.nix index 6b461bff4938..7671266ac7e4 100644 --- a/nixos/tests/please.nix +++ b/nixos/tests/please.nix @@ -1,68 +1,66 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "please"; - meta.maintainers = [ ]; +{ lib, ... }: +{ + name = "please"; + meta.maintainers = [ ]; - nodes.machine = - { ... }: - { - users.users = lib.mkMerge [ - (lib.listToAttrs ( - map (n: lib.nameValuePair n { isNormalUser = true; }) (lib.genList (x: "user${toString x}") 6) - )) - { - user0.extraGroups = [ "wheel" ]; - } - ]; + nodes.machine = + { ... }: + { + users.users = lib.mkMerge [ + (lib.listToAttrs ( + map (n: lib.nameValuePair n { isNormalUser = true; }) (lib.genList (x: "user${toString x}") 6) + )) + { + user0.extraGroups = [ "wheel" ]; + } + ]; - security.please = { - enable = true; - wheelNeedsPassword = false; - settings = { - user2_run_true_as_root = { - name = "user2"; - target = "root"; - rule = "/run/current-system/sw/bin/true"; - require_pass = false; - }; - user4_edit_etc_hosts_as_root = { - name = "user4"; - type = "edit"; - target = "root"; - rule = "/etc/hosts"; - editmode = 644; - require_pass = false; - }; + security.please = { + enable = true; + wheelNeedsPassword = false; + settings = { + user2_run_true_as_root = { + name = "user2"; + target = "root"; + rule = "/run/current-system/sw/bin/true"; + require_pass = false; + }; + user4_edit_etc_hosts_as_root = { + name = "user4"; + type = "edit"; + target = "root"; + rule = "/etc/hosts"; + editmode = 644; + require_pass = false; }; }; }; + }; - testScript = '' - with subtest("root: can run anything by default"): - machine.succeed('please true') - with subtest("root: can edit anything by default"): - machine.succeed('EDITOR=cat pleaseedit /etc/hosts') + testScript = '' + with subtest("root: can run anything by default"): + machine.succeed('please true') + with subtest("root: can edit anything by default"): + machine.succeed('EDITOR=cat pleaseedit /etc/hosts') - with subtest("user0: can run as root because it's in the wheel group"): - machine.succeed('su - user0 -c "please -u root true"') - with subtest("user1: cannot run as root because it's not in the wheel group"): - machine.fail('su - user1 -c "please -u root true"') + with subtest("user0: can run as root because it's in the wheel group"): + machine.succeed('su - user0 -c "please -u root true"') + with subtest("user1: cannot run as root because it's not in the wheel group"): + machine.fail('su - user1 -c "please -u root true"') - with subtest("user0: can edit as root"): - machine.succeed('su - user0 -c "EDITOR=cat pleaseedit /etc/hosts"') - with subtest("user1: cannot edit as root"): - machine.fail('su - user1 -c "EDITOR=cat pleaseedit /etc/hosts"') + with subtest("user0: can edit as root"): + machine.succeed('su - user0 -c "EDITOR=cat pleaseedit /etc/hosts"') + with subtest("user1: cannot edit as root"): + machine.fail('su - user1 -c "EDITOR=cat pleaseedit /etc/hosts"') - with subtest("user2: can run 'true' as root"): - machine.succeed('su - user2 -c "please -u root true"') - with subtest("user3: cannot run 'true' as root"): - machine.fail('su - user3 -c "please -u root true"') + with subtest("user2: can run 'true' as root"): + machine.succeed('su - user2 -c "please -u root true"') + with subtest("user3: cannot run 'true' as root"): + machine.fail('su - user3 -c "please -u root true"') - with subtest("user4: can edit /etc/hosts"): - machine.succeed('su - user4 -c "EDITOR=cat pleaseedit /etc/hosts"') - with subtest("user5: cannot edit /etc/hosts"): - machine.fail('su - user5 -c "EDITOR=cat pleaseedit /etc/hosts"') - ''; - } -) + with subtest("user4: can edit /etc/hosts"): + machine.succeed('su - user4 -c "EDITOR=cat pleaseedit /etc/hosts"') + with subtest("user5: cannot edit /etc/hosts"): + machine.fail('su - user5 -c "EDITOR=cat pleaseedit /etc/hosts"') + ''; +} diff --git a/nixos/tests/plikd.nix b/nixos/tests/plikd.nix index 05a1c6017d2d..b22c26302081 100644 --- a/nixos/tests/plikd.nix +++ b/nixos/tests/plikd.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "plikd"; - meta = with lib.maintainers; { - maintainers = [ freezeboy ]; +{ lib, ... }: +{ + name = "plikd"; + meta = with lib.maintainers; { + maintainers = [ freezeboy ]; + }; + + nodes.machine = + { pkgs, ... }: + let + in + { + services.plikd.enable = true; + environment.systemPackages = [ pkgs.plik ]; }; - nodes.machine = - { pkgs, ... }: - let - in - { - services.plikd.enable = true; - environment.systemPackages = [ pkgs.plik ]; - }; + testScript = '' + # Service basic test + machine.wait_for_unit("plikd") - testScript = '' - # Service basic test - machine.wait_for_unit("plikd") + # Network test + machine.wait_for_open_port(8080) + machine.succeed("curl --fail -v http://localhost:8080") - # Network test - machine.wait_for_open_port(8080) - machine.succeed("curl --fail -v http://localhost:8080") + # Application test + machine.execute("echo test > /tmp/data.txt") + machine.succeed("plik --server http://localhost:8080 /tmp/data.txt | grep curl") - # Application test - machine.execute("echo test > /tmp/data.txt") - machine.succeed("plik --server http://localhost:8080 /tmp/data.txt | grep curl") - - machine.succeed("diff data.txt /tmp/data.txt") - ''; - } -) + machine.succeed("diff data.txt /tmp/data.txt") + ''; +} diff --git a/nixos/tests/plotinus.nix b/nixos/tests/plotinus.nix index 97041012756a..f1ea049ff449 100644 --- a/nixos/tests/plotinus.nix +++ b/nixos/tests/plotinus.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "plotinus"; - meta = { - maintainers = pkgs.plotinus.meta.maintainers; - timeout = 600; +{ pkgs, ... }: +{ + name = "plotinus"; + meta = { + maintainers = pkgs.plotinus.meta.maintainers; + timeout = 600; + }; + + nodes.machine = + { pkgs, ... }: + + { + imports = [ ./common/x11.nix ]; + programs.plotinus.enable = true; + environment.systemPackages = [ + pkgs.gnome-pomodoro + pkgs.xdotool + ]; }; - nodes.machine = - { pkgs, ... }: - - { - imports = [ ./common/x11.nix ]; - programs.plotinus.enable = true; - environment.systemPackages = [ - pkgs.gnome-pomodoro - pkgs.xdotool - ]; - }; - - testScript = '' - machine.wait_for_x() - machine.succeed("gnome-pomodoro >&2 &") - machine.wait_for_window("Pomodoro", timeout=120) - machine.succeed( - "xdotool search --sync --onlyvisible --class gnome-pomodoro " - + "windowfocus --sync key --clearmodifiers --delay 1 'ctrl+shift+p'" - ) - machine.sleep(5) # wait for the popup - machine.screenshot("popup") - machine.succeed("xdotool key --delay 100 p r e f e r e n c e s Return") - machine.wait_for_window("Preferences", timeout=120) - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.succeed("gnome-pomodoro >&2 &") + machine.wait_for_window("Pomodoro", timeout=120) + machine.succeed( + "xdotool search --sync --onlyvisible --class gnome-pomodoro " + + "windowfocus --sync key --clearmodifiers --delay 1 'ctrl+shift+p'" + ) + machine.sleep(5) # wait for the popup + machine.screenshot("popup") + machine.succeed("xdotool key --delay 100 p r e f e r e n c e s Return") + machine.wait_for_window("Preferences", timeout=120) + ''; +} diff --git a/nixos/tests/pocket-id.nix b/nixos/tests/pocket-id.nix index 753fa251473f..830ba3e8c760 100644 --- a/nixos/tests/pocket-id.nix +++ b/nixos/tests/pocket-id.nix @@ -1,47 +1,45 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "pocket-id"; - meta.maintainers = with lib.maintainers; [ - gepbird - ymstnt - ]; +{ + name = "pocket-id"; + meta.maintainers = with lib.maintainers; [ + gepbird + ymstnt + ]; - nodes = { - machine = - { ... }: - { - services.pocket-id = { - enable = true; - settings = { - PORT = 10001; - INTERNAL_BACKEND_URL = "http://localhost:10002"; - BACKEND_PORT = 10002; - }; + nodes = { + machine = + { ... }: + { + services.pocket-id = { + enable = true; + settings = { + PORT = 10001; + INTERNAL_BACKEND_URL = "http://localhost:10002"; + BACKEND_PORT = 10002; }; }; - }; + }; + }; - testScript = - { nodes, ... }: - let - inherit (nodes.machine.services.pocket-id) settings; - inherit (builtins) toString; - in - '' - machine.wait_for_unit("pocket-id-backend.service") - machine.wait_for_open_port(${toString settings.BACKEND_PORT}) - machine.wait_for_unit("pocket-id-frontend.service") - machine.wait_for_open_port(${toString settings.PORT}) + testScript = + { nodes, ... }: + let + inherit (nodes.machine.services.pocket-id) settings; + inherit (builtins) toString; + in + '' + machine.wait_for_unit("pocket-id-backend.service") + machine.wait_for_open_port(${toString settings.BACKEND_PORT}) + machine.wait_for_unit("pocket-id-frontend.service") + machine.wait_for_open_port(${toString settings.PORT}) - backend_status = machine.succeed("curl -L -o /tmp/backend-output -w '%{http_code}' http://localhost:${toString settings.BACKEND_PORT}/api/users/me") - assert backend_status == "401" - machine.succeed("grep 'You are not signed in' /tmp/backend-output") + backend_status = machine.succeed("curl -L -o /tmp/backend-output -w '%{http_code}' http://localhost:${toString settings.BACKEND_PORT}/api/users/me") + assert backend_status == "401" + machine.succeed("grep 'You are not signed in' /tmp/backend-output") - frontend_status = machine.succeed("curl -L -o /tmp/frontend-output -w '%{http_code}' http://localhost:${toString settings.PORT}") - assert frontend_status == "200" - machine.succeed("grep 'Sign in to Pocket ID' /tmp/frontend-output") - ''; - } -) + frontend_status = machine.succeed("curl -L -o /tmp/frontend-output -w '%{http_code}' http://localhost:${toString settings.PORT}") + assert frontend_status == "200" + machine.succeed("grep 'Sign in to Pocket ID' /tmp/frontend-output") + ''; +} diff --git a/nixos/tests/podgrab.nix b/nixos/tests/podgrab.nix index 05c2fd726016..cefe5d373d99 100644 --- a/nixos/tests/podgrab.nix +++ b/nixos/tests/podgrab.nix @@ -2,40 +2,38 @@ let defaultPort = 8080; customPort = 4242; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "podgrab"; +{ pkgs, ... }: +{ + name = "podgrab"; - nodes = { - default = - { ... }: - { - services.podgrab.enable = true; + nodes = { + default = + { ... }: + { + services.podgrab.enable = true; + }; + + customized = + { ... }: + { + services.podgrab = { + enable = true; + port = customPort; }; + }; + }; - customized = - { ... }: - { - services.podgrab = { - enable = true; - port = customPort; - }; - }; - }; + testScript = '' + start_all() - testScript = '' - start_all() + default.wait_for_unit("podgrab") + default.wait_for_open_port(${toString defaultPort}) + default.succeed("curl --fail http://localhost:${toString defaultPort}") - default.wait_for_unit("podgrab") - default.wait_for_open_port(${toString defaultPort}) - default.succeed("curl --fail http://localhost:${toString defaultPort}") + customized.wait_for_unit("podgrab") + customized.wait_for_open_port(${toString customPort}) + customized.succeed("curl --fail http://localhost:${toString customPort}") + ''; - customized.wait_for_unit("podgrab") - customized.wait_for_open_port(${toString customPort}) - customized.succeed("curl --fail http://localhost:${toString customPort}") - ''; - - meta.maintainers = with pkgs.lib.maintainers; [ ambroisie ]; - } -) + meta.maintainers = with pkgs.lib.maintainers; [ ambroisie ]; +} diff --git a/nixos/tests/polaris.nix b/nixos/tests/polaris.nix index 3a530996d63a..59e4b37337bf 100644 --- a/nixos/tests/polaris.nix +++ b/nixos/tests/polaris.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "polaris"; - meta.maintainers = with lib.maintainers; [ pbsds ]; +{ + name = "polaris"; + meta.maintainers = with lib.maintainers; [ pbsds ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.jq ]; - services.polaris = { - enable = true; - port = 5050; - settings.users = [ - { - name = "test_user"; - password = "very_secret_password"; - admin = true; - } - ]; - }; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.jq ]; + services.polaris = { + enable = true; + port = 5050; + settings.users = [ + { + name = "test_user"; + password = "very_secret_password"; + admin = true; + } + ]; }; + }; - testScript = '' - machine.wait_for_unit("polaris.service") - machine.wait_for_open_port(5050) - machine.succeed("curl http://localhost:5050/api/version") - machine.succeed("curl -X GET http://localhost:5050/api/initial_setup -H 'accept: application/json' | jq -e '.has_any_users == true'") - ''; - } -) + testScript = '' + machine.wait_for_unit("polaris.service") + machine.wait_for_open_port(5050) + machine.succeed("curl http://localhost:5050/api/version") + machine.succeed("curl -X GET http://localhost:5050/api/initial_setup -H 'accept: application/json' | jq -e '.has_any_users == true'") + ''; +} diff --git a/nixos/tests/portunus.nix b/nixos/tests/portunus.nix index 5bfe990943b5..5562c7f5ac8a 100644 --- a/nixos/tests/portunus.nix +++ b/nixos/tests/portunus.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "portunus"; - meta.maintainers = with lib.maintainers; [ SuperSandro2000 ]; +{ + name = "portunus"; + meta.maintainers = with lib.maintainers; [ SuperSandro2000 ]; - nodes.machine = _: { - services.portunus = { - enable = true; - ldap.suffix = "dc=example,dc=org"; - }; + nodes.machine = _: { + services.portunus = { + enable = true; + ldap.suffix = "dc=example,dc=org"; }; + }; - testScript = '' - machine.wait_for_unit("portunus.service") - machine.succeed("curl --fail -vvv http://localhost:8080/") - ''; - } -) + testScript = '' + machine.wait_for_unit("portunus.service") + machine.succeed("curl --fail -vvv http://localhost:8080/") + ''; +} diff --git a/nixos/tests/postfixadmin.nix b/nixos/tests/postfixadmin.nix index 9162de429482..2994efcfca47 100644 --- a/nixos/tests/postfixadmin.nix +++ b/nixos/tests/postfixadmin.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "postfixadmin"; - meta = with pkgs.lib.maintainers; { - maintainers = [ globin ]; - }; +{ pkgs, ... }: +{ + name = "postfixadmin"; + meta = with pkgs.lib.maintainers; { + maintainers = [ globin ]; + }; - nodes = { - postfixadmin = - { config, pkgs, ... }: - { - services.postfixadmin = { - enable = true; - hostName = "postfixadmin"; - setupPasswordFile = pkgs.writeText "insecure-test-setup-pw-file" "$2y$10$r0p63YCjd9rb9nHrV9UtVuFgGTmPDLKu.0UIJoQTkWCZZze2iuB1m"; - }; - services.nginx.virtualHosts.postfixadmin = { - forceSSL = false; - enableACME = false; - }; + nodes = { + postfixadmin = + { config, pkgs, ... }: + { + services.postfixadmin = { + enable = true; + hostName = "postfixadmin"; + setupPasswordFile = pkgs.writeText "insecure-test-setup-pw-file" "$2y$10$r0p63YCjd9rb9nHrV9UtVuFgGTmPDLKu.0UIJoQTkWCZZze2iuB1m"; }; - }; + services.nginx.virtualHosts.postfixadmin = { + forceSSL = false; + enableACME = false; + }; + }; + }; - testScript = '' - postfixadmin.start - postfixadmin.wait_for_unit("postgresql.service") - postfixadmin.wait_for_unit("phpfpm-postfixadmin.service") - postfixadmin.wait_for_unit("nginx.service") - postfixadmin.succeed( - "curl -sSfL http://postfixadmin/setup.php -X POST -F 'setup_password=not production'" - ) - postfixadmin.succeed("curl -sSfL http://postfixadmin/ | grep 'Mail admins login here'") - ''; - } -) + testScript = '' + postfixadmin.start + postfixadmin.wait_for_unit("postgresql.service") + postfixadmin.wait_for_unit("phpfpm-postfixadmin.service") + postfixadmin.wait_for_unit("nginx.service") + postfixadmin.succeed( + "curl -sSfL http://postfixadmin/setup.php -X POST -F 'setup_password=not production'" + ) + postfixadmin.succeed("curl -sSfL http://postfixadmin/ | grep 'Mail admins login here'") + ''; +} diff --git a/nixos/tests/power-profiles-daemon.nix b/nixos/tests/power-profiles-daemon.nix index a1f10afda9e6..75a45c437f63 100644 --- a/nixos/tests/power-profiles-daemon.nix +++ b/nixos/tests/power-profiles-daemon.nix @@ -1,64 +1,62 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "power-profiles-daemon"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mvnetbiz ]; +{ + name = "power-profiles-daemon"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mvnetbiz ]; + }; + nodes.machine = + { pkgs, ... }: + { + security.polkit.enable = true; + services.power-profiles-daemon.enable = true; + environment.systemPackages = [ + pkgs.glib + pkgs.power-profiles-daemon + ]; }; - nodes.machine = - { pkgs, ... }: - { - security.polkit.enable = true; - services.power-profiles-daemon.enable = true; - environment.systemPackages = [ - pkgs.glib - pkgs.power-profiles-daemon - ]; - }; - testScript = '' - def get_profile(): - return machine.succeed( - """gdbus call --system --dest org.freedesktop.UPower.PowerProfiles --object-path /org/freedesktop/UPower/PowerProfiles \ - --method org.freedesktop.DBus.Properties.Get 'org.freedesktop.UPower.PowerProfiles' 'ActiveProfile' - """ - ) + testScript = '' + def get_profile(): + return machine.succeed( + """gdbus call --system --dest org.freedesktop.UPower.PowerProfiles --object-path /org/freedesktop/UPower/PowerProfiles \ + --method org.freedesktop.DBus.Properties.Get 'org.freedesktop.UPower.PowerProfiles' 'ActiveProfile' + """ + ) - def set_profile(profile): - return machine.succeed( - """gdbus call --system --dest org.freedesktop.UPower.PowerProfiles --object-path /org/freedesktop/UPower/PowerProfiles \ - --method org.freedesktop.DBus.Properties.Set 'org.freedesktop.UPower.PowerProfiles' 'ActiveProfile' "<'{profile}'>" - """.format( - profile=profile - ) - ) + def set_profile(profile): + return machine.succeed( + """gdbus call --system --dest org.freedesktop.UPower.PowerProfiles --object-path /org/freedesktop/UPower/PowerProfiles \ + --method org.freedesktop.DBus.Properties.Set 'org.freedesktop.UPower.PowerProfiles' 'ActiveProfile' "<'{profile}'>" + """.format( + profile=profile + ) + ) - machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("multi-user.target") - set_profile("power-saver") - profile = get_profile() - if not "power-saver" in profile: - raise Exception("Unable to set power-saver profile") + set_profile("power-saver") + profile = get_profile() + if not "power-saver" in profile: + raise Exception("Unable to set power-saver profile") - set_profile("balanced") - profile = get_profile() - if not "balanced" in profile: - raise Exception("Unable to set balanced profile") + set_profile("balanced") + profile = get_profile() + if not "balanced" in profile: + raise Exception("Unable to set balanced profile") - # test powerprofilectl CLI - machine.succeed("powerprofilesctl set power-saver") - profile = get_profile() - if not "power-saver" in profile: - raise Exception("Unable to set power-saver profile with powerprofilectl") + # test powerprofilectl CLI + machine.succeed("powerprofilesctl set power-saver") + profile = get_profile() + if not "power-saver" in profile: + raise Exception("Unable to set power-saver profile with powerprofilectl") - machine.succeed("powerprofilesctl set balanced") - profile = get_profile() - if not "balanced" in profile: - raise Exception("Unable to set balanced profile with powerprofilectl") - ''; - } -) + machine.succeed("powerprofilesctl set balanced") + profile = get_profile() + if not "balanced" in profile: + raise Exception("Unable to set balanced profile with powerprofilectl") + ''; +} diff --git a/nixos/tests/powerdns.nix b/nixos/tests/powerdns.nix index 9ca6e597b6be..2813fa89bee5 100644 --- a/nixos/tests/powerdns.nix +++ b/nixos/tests/powerdns.nix @@ -2,71 +2,69 @@ # generic MySQL backend (gmysql) to connect to a # MariaDB server using UNIX sockets authentication. -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "powerdns"; +{ pkgs, lib, ... }: +{ + name = "powerdns"; - nodes.server = - { ... }: - { - services.powerdns.enable = true; - services.powerdns.extraConfig = '' - launch=gmysql - gmysql-user=pdns - zone-cache-refresh-interval=0 - ''; + nodes.server = + { ... }: + { + services.powerdns.enable = true; + services.powerdns.extraConfig = '' + launch=gmysql + gmysql-user=pdns + zone-cache-refresh-interval=0 + ''; - services.mysql = { - enable = true; - package = pkgs.mariadb; - ensureDatabases = [ "powerdns" ]; - ensureUsers = lib.singleton { - name = "pdns"; - ensurePermissions = { - "powerdns.*" = "ALL PRIVILEGES"; - }; + services.mysql = { + enable = true; + package = pkgs.mariadb; + ensureDatabases = [ "powerdns" ]; + ensureUsers = lib.singleton { + name = "pdns"; + ensurePermissions = { + "powerdns.*" = "ALL PRIVILEGES"; }; }; - - environment.systemPackages = with pkgs; [ - dnsutils - powerdns - mariadb - ]; }; - testScript = '' - with subtest("PowerDNS database exists"): - server.wait_for_unit("mysql") - server.succeed("echo 'SHOW DATABASES;' | sudo -u pdns mysql -u pdns >&2") + environment.systemPackages = with pkgs; [ + dnsutils + powerdns + mariadb + ]; + }; - with subtest("Loading the MySQL schema works"): - server.succeed( - "sudo -u pdns mysql -u pdns -D powerdns <" - "${pkgs.powerdns}/share/doc/pdns/schema.mysql.sql" - ) + testScript = '' + with subtest("PowerDNS database exists"): + server.wait_for_unit("mysql") + server.succeed("echo 'SHOW DATABASES;' | sudo -u pdns mysql -u pdns >&2") - with subtest("PowerDNS server starts"): - server.wait_for_unit("pdns") - server.succeed("dig version.bind txt chaos @127.0.0.1 >&2") + with subtest("Loading the MySQL schema works"): + server.succeed( + "sudo -u pdns mysql -u pdns -D powerdns <" + "${pkgs.powerdns}/share/doc/pdns/schema.mysql.sql" + ) - with subtest("Adding an example zone works"): - # Extract configuration file needed by pdnsutil - pdnsutil = "sudo -u pdns pdnsutil " - server.succeed(f"{pdnsutil} create-zone example.com ns1.example.com") - server.succeed(f"{pdnsutil} add-record example.com ns1 A 192.168.1.2") + with subtest("PowerDNS server starts"): + server.wait_for_unit("pdns") + server.succeed("dig version.bind txt chaos @127.0.0.1 >&2") - with subtest("Querying the example zone works"): - reply = server.succeed("dig +noall +answer ns1.example.com @127.0.0.1") - assert ( - "192.168.1.2" in reply - ), f"""" - The reply does not contain the expected IP address: - Expected: - ns1.example.com. 3600 IN A 192.168.1.2 - Reply: - {reply}""" - ''; - } -) + with subtest("Adding an example zone works"): + # Extract configuration file needed by pdnsutil + pdnsutil = "sudo -u pdns pdnsutil " + server.succeed(f"{pdnsutil} create-zone example.com ns1.example.com") + server.succeed(f"{pdnsutil} add-record example.com ns1 A 192.168.1.2") + + with subtest("Querying the example zone works"): + reply = server.succeed("dig +noall +answer ns1.example.com @127.0.0.1") + assert ( + "192.168.1.2" in reply + ), f"""" + The reply does not contain the expected IP address: + Expected: + ns1.example.com. 3600 IN A 192.168.1.2 + Reply: + {reply}""" + ''; +} diff --git a/nixos/tests/pppd.nix b/nixos/tests/pppd.nix index ad4927e9e67b..20cc03bfc7e7 100644 --- a/nixos/tests/pppd.nix +++ b/nixos/tests/pppd.nix @@ -1,75 +1,73 @@ -import ./make-test-python.nix ( - let - chap-secrets = { - text = ''"flynn" * "reindeerflotilla" *''; - mode = "0640"; - }; - in - { pkgs, ... }: - { - name = "pppd"; +let + chap-secrets = { + text = ''"flynn" * "reindeerflotilla" *''; + mode = "0640"; + }; +in +{ pkgs, ... }: +{ + name = "pppd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ stv0g ]; - }; + meta = with pkgs.lib.maintainers; { + maintainers = [ stv0g ]; + }; - nodes = { - server = - { config, pkgs, ... }: - { - config = { - # Run a PPPoE access concentrator server. It will spawn an - # appropriate PPP server process when a PPPoE client sets up a - # PPPoE session. - systemd.services.pppoe-server = { - restartTriggers = [ - config.environment.etc."ppp/pppoe-server-options".source - config.environment.etc."ppp/chap-secrets".source - ]; - after = [ "network.target" ]; - serviceConfig = { - ExecStart = "${pkgs.rpPPPoE}/sbin/pppoe-server -F -O /etc/ppp/pppoe-server-options -q ${pkgs.ppp}/sbin/pppd -I eth1 -L 192.0.2.1 -R 192.0.2.2"; - }; - wantedBy = [ "multi-user.target" ]; - }; - environment.etc = { - "ppp/pppoe-server-options".text = '' - lcp-echo-interval 10 - lcp-echo-failure 2 - plugin pppoe.so - require-chap - nobsdcomp - noccp - novj - ''; - "ppp/chap-secrets" = chap-secrets; + nodes = { + server = + { config, pkgs, ... }: + { + config = { + # Run a PPPoE access concentrator server. It will spawn an + # appropriate PPP server process when a PPPoE client sets up a + # PPPoE session. + systemd.services.pppoe-server = { + restartTriggers = [ + config.environment.etc."ppp/pppoe-server-options".source + config.environment.etc."ppp/chap-secrets".source + ]; + after = [ "network.target" ]; + serviceConfig = { + ExecStart = "${pkgs.rpPPPoE}/sbin/pppoe-server -F -O /etc/ppp/pppoe-server-options -q ${pkgs.ppp}/sbin/pppd -I eth1 -L 192.0.2.1 -R 192.0.2.2"; }; + wantedBy = [ "multi-user.target" ]; + }; + environment.etc = { + "ppp/pppoe-server-options".text = '' + lcp-echo-interval 10 + lcp-echo-failure 2 + plugin pppoe.so + require-chap + nobsdcomp + noccp + novj + ''; + "ppp/chap-secrets" = chap-secrets; }; }; - client = - { config, pkgs, ... }: - { - services.pppd = { - enable = true; - peers.test = { - config = '' - plugin pppoe.so eth1 - name "flynn" - noipdefault - persist - noauth - debug - ''; - }; + }; + client = + { config, pkgs, ... }: + { + services.pppd = { + enable = true; + peers.test = { + config = '' + plugin pppoe.so eth1 + name "flynn" + noipdefault + persist + noauth + debug + ''; }; - environment.etc."ppp/chap-secrets" = chap-secrets; }; - }; + environment.etc."ppp/chap-secrets" = chap-secrets; + }; + }; - testScript = '' - start_all() - client.wait_until_succeeds("ping -c1 -W1 192.0.2.1") - server.wait_until_succeeds("ping -c1 -W1 192.0.2.2") - ''; - } -) + testScript = '' + start_all() + client.wait_until_succeeds("ping -c1 -W1 192.0.2.1") + server.wait_until_succeeds("ping -c1 -W1 192.0.2.2") + ''; +} diff --git a/nixos/tests/private-gpt.nix b/nixos/tests/private-gpt.nix index f6bfda38aabf..f5c005165ab1 100644 --- a/nixos/tests/private-gpt.nix +++ b/nixos/tests/private-gpt.nix @@ -1,31 +1,29 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - mainPort = "8001"; - in - { - name = "private-gpt"; - meta = with lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, lib, ... }: +let + mainPort = "8001"; +in +{ + name = "private-gpt"; + meta = with lib.maintainers; { + maintainers = [ ]; + }; - nodes = { - machine = - { ... }: - { - services.private-gpt = { - enable = true; - }; + nodes = { + machine = + { ... }: + { + services.private-gpt = { + enable = true; }; - }; + }; + }; - testScript = '' - machine.start() + testScript = '' + machine.start() - machine.wait_for_unit("private-gpt.service") - machine.wait_for_open_port(${mainPort}) + machine.wait_for_unit("private-gpt.service") + machine.wait_for_open_port(${mainPort}) - machine.succeed("curl http://127.0.0.1:${mainPort}") - ''; - } -) + machine.succeed("curl http://127.0.0.1:${mainPort}") + ''; +} diff --git a/nixos/tests/privoxy.nix b/nixos/tests/privoxy.nix index 0c0cb6ab2e31..acbd1f81e878 100644 --- a/nixos/tests/privoxy.nix +++ b/nixos/tests/privoxy.nix @@ -1,155 +1,153 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - let - # Note: For some reason Privoxy can't issue valid - # certificates if the CA is generated using gnutls :( - certs = pkgs.runCommand "example-certs" { buildInputs = [ pkgs.openssl ]; } '' - mkdir $out +let + # Note: For some reason Privoxy can't issue valid + # certificates if the CA is generated using gnutls :( + certs = pkgs.runCommand "example-certs" { buildInputs = [ pkgs.openssl ]; } '' + mkdir $out - # generate CA keypair - openssl req -new -nodes -x509 \ - -extensions v3_ca -keyout $out/ca.key \ - -out $out/ca.crt -days 365 \ - -subj "/O=Privoxy CA/CN=Privoxy CA" + # generate CA keypair + openssl req -new -nodes -x509 \ + -extensions v3_ca -keyout $out/ca.key \ + -out $out/ca.crt -days 365 \ + -subj "/O=Privoxy CA/CN=Privoxy CA" - # generate server key/signing request - openssl genrsa -out $out/server.key 3072 - openssl req -new -key $out/server.key \ - -out server.csr -sha256 \ - -subj "/O=An unhappy server./CN=example.com" + # generate server key/signing request + openssl genrsa -out $out/server.key 3072 + openssl req -new -key $out/server.key \ + -out server.csr -sha256 \ + -subj "/O=An unhappy server./CN=example.com" - # sign the request/generate the certificate - openssl x509 -req -in server.csr -CA $out/ca.crt \ - -CAkey $out/ca.key -CAcreateserial -out $out/server.crt \ - -days 500 -sha256 - ''; - in + # sign the request/generate the certificate + openssl x509 -req -in server.csr -CA $out/ca.crt \ + -CAkey $out/ca.key -CAcreateserial -out $out/server.crt \ + -days 500 -sha256 + ''; +in - { - name = "privoxy"; - meta = with lib.maintainers; { - maintainers = [ rnhmjoj ]; +{ + name = "privoxy"; + meta = with lib.maintainers; { + maintainers = [ rnhmjoj ]; + }; + + nodes.machine = + { ... }: + { + services.nginx.enable = true; + services.nginx.virtualHosts."example.com" = { + addSSL = true; + sslCertificate = "${certs}/server.crt"; + sslCertificateKey = "${certs}/server.key"; + locations."/".root = pkgs.writeTextFile { + name = "bad-day"; + destination = "/how-are-you/index.html"; + text = "I've had a bad day!\n"; + }; + locations."/ads".extraConfig = '' + return 200 "Hot Nixpkgs PRs in your area. Click here!\n"; + ''; + }; + + services.privoxy = { + enable = true; + inspectHttps = true; + settings = { + ca-cert-file = "${certs}/ca.crt"; + ca-key-file = "${certs}/ca.key"; + debug = 65536; + }; + userActions = '' + {+filter{positive}} + example.com + + {+block{Fake ads}} + example.com/ads + ''; + userFilters = '' + FILTER: positive This is a filter example. + s/bad/great/ig + ''; + }; + + security.pki.certificateFiles = [ "${certs}/ca.crt" ]; + + networking.hosts."::1" = [ "example.com" ]; + networking.proxy.httpProxy = "http://localhost:8118"; + networking.proxy.httpsProxy = "http://localhost:8118"; }; - nodes.machine = - { ... }: - { - services.nginx.enable = true; - services.nginx.virtualHosts."example.com" = { - addSSL = true; - sslCertificate = "${certs}/server.crt"; - sslCertificateKey = "${certs}/server.key"; - locations."/".root = pkgs.writeTextFile { - name = "bad-day"; - destination = "/how-are-you/index.html"; - text = "I've had a bad day!\n"; - }; - locations."/ads".extraConfig = '' - return 200 "Hot Nixpkgs PRs in your area. Click here!\n"; - ''; - }; - - services.privoxy = { - enable = true; - inspectHttps = true; - settings = { - ca-cert-file = "${certs}/ca.crt"; - ca-key-file = "${certs}/ca.key"; - debug = 65536; - }; - userActions = '' - {+filter{positive}} - example.com - - {+block{Fake ads}} - example.com/ads - ''; - userFilters = '' - FILTER: positive This is a filter example. - s/bad/great/ig - ''; - }; - - security.pki.certificateFiles = [ "${certs}/ca.crt" ]; - - networking.hosts."::1" = [ "example.com" ]; - networking.proxy.httpProxy = "http://localhost:8118"; - networking.proxy.httpsProxy = "http://localhost:8118"; + nodes.machine_socks4 = + { ... }: + { + services.privoxy = { + enable = true; + settings.forward-socks4 = "/ 127.0.0.1:9050 ."; }; - - nodes.machine_socks4 = - { ... }: - { - services.privoxy = { - enable = true; - settings.forward-socks4 = "/ 127.0.0.1:9050 ."; - }; + }; + nodes.machine_socks4a = + { ... }: + { + services.privoxy = { + enable = true; + settings.forward-socks4a = "/ 127.0.0.1:9050 ."; }; - nodes.machine_socks4a = - { ... }: - { - services.privoxy = { - enable = true; - settings.forward-socks4a = "/ 127.0.0.1:9050 ."; - }; + }; + nodes.machine_socks5 = + { ... }: + { + services.privoxy = { + enable = true; + settings.forward-socks5 = "/ 127.0.0.1:9050 ."; }; - nodes.machine_socks5 = - { ... }: - { - services.privoxy = { - enable = true; - settings.forward-socks5 = "/ 127.0.0.1:9050 ."; - }; - }; - nodes.machine_socks5t = - { ... }: - { - services.privoxy = { - enable = true; - settings.forward-socks5t = "/ 127.0.0.1:9050 ."; - }; + }; + nodes.machine_socks5t = + { ... }: + { + services.privoxy = { + enable = true; + settings.forward-socks5t = "/ 127.0.0.1:9050 ."; }; + }; - testScript = '' - with subtest("Privoxy is running"): - machine.wait_for_unit("privoxy") - machine.wait_for_open_port(8118) - machine.succeed("curl -f http://config.privoxy.org") + testScript = '' + with subtest("Privoxy is running"): + machine.wait_for_unit("privoxy") + machine.wait_for_open_port(8118) + machine.succeed("curl -f http://config.privoxy.org") - with subtest("Privoxy can filter http requests"): - machine.wait_for_open_port(80) - assert "great day" in machine.succeed( - "curl -sfL http://example.com/how-are-you? | tee /dev/stderr" - ) + with subtest("Privoxy can filter http requests"): + machine.wait_for_open_port(80) + assert "great day" in machine.succeed( + "curl -sfL http://example.com/how-are-you? | tee /dev/stderr" + ) - with subtest("Privoxy can filter https requests"): - machine.wait_for_open_port(443) - assert "great day" in machine.succeed( - "curl -sfL https://example.com/how-are-you? | tee /dev/stderr" - ) + with subtest("Privoxy can filter https requests"): + machine.wait_for_open_port(443) + assert "great day" in machine.succeed( + "curl -sfL https://example.com/how-are-you? | tee /dev/stderr" + ) - with subtest("Blocks are working"): - machine.wait_for_open_port(443) - machine.fail("curl -f https://example.com/ads 1>&2") - machine.succeed("curl -f https://example.com/PRIVOXY-FORCE/ads 1>&2") + with subtest("Blocks are working"): + machine.wait_for_open_port(443) + machine.fail("curl -f https://example.com/ads 1>&2") + machine.succeed("curl -f https://example.com/PRIVOXY-FORCE/ads 1>&2") - with subtest("Temporary certificates are cleaned"): - # Count current certificates - machine.succeed("test $(ls /run/privoxy/certs | wc -l) -gt 0") - # Forward in time 12 days, trigger the timer.. - machine.succeed("date -s \"$(date --date '12 days')\"") - machine.systemctl("start systemd-tmpfiles-clean") - # ...and count again - machine.succeed("test $(ls /run/privoxy/certs | wc -l) -eq 0") + with subtest("Temporary certificates are cleaned"): + # Count current certificates + machine.succeed("test $(ls /run/privoxy/certs | wc -l) -gt 0") + # Forward in time 12 days, trigger the timer.. + machine.succeed("date -s \"$(date --date '12 days')\"") + machine.systemctl("start systemd-tmpfiles-clean") + # ...and count again + machine.succeed("test $(ls /run/privoxy/certs | wc -l) -eq 0") - with subtest("Privoxy supports socks upstream proxies"): - for m in [machine_socks4, machine_socks4a, machine_socks5, machine_socks5t]: - m.wait_for_unit("privoxy") - m.wait_for_open_port(8118) - # We expect a 503 error because the dummy upstream proxy is not reachable. - # In issue #265654, instead privoxy segfaulted causing curl to exit with "Empty reply from server". - m.succeed("http_proxy=http://localhost:8118 curl -v http://does-not-exist/ 2>&1 | grep 'HTTP/1.1 503'") - ''; - } -) + with subtest("Privoxy supports socks upstream proxies"): + for m in [machine_socks4, machine_socks4a, machine_socks5, machine_socks5t]: + m.wait_for_unit("privoxy") + m.wait_for_open_port(8118) + # We expect a 503 error because the dummy upstream proxy is not reachable. + # In issue #265654, instead privoxy segfaulted causing curl to exit with "Empty reply from server". + m.succeed("http_proxy=http://localhost:8118 curl -v http://does-not-exist/ 2>&1 | grep 'HTTP/1.1 503'") + ''; +} diff --git a/nixos/tests/proxy.nix b/nixos/tests/proxy.nix index beba1d843592..2526ce954c5b 100644 --- a/nixos/tests/proxy.nix +++ b/nixos/tests/proxy.nix @@ -1,100 +1,98 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - backend = - { pkgs, ... }: +let + backend = + { pkgs, ... }: + { + services.httpd = { + enable = true; + adminAddr = "foo@example.org"; + virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html"; + }; + networking.firewall.allowedTCPPorts = [ 80 ]; + }; +in +{ + name = "proxy"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + + nodes = { + proxy = + { nodes, ... }: { services.httpd = { enable = true; - adminAddr = "foo@example.org"; - virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html"; + adminAddr = "bar@example.org"; + extraModules = [ + "proxy_balancer" + "lbmethod_byrequests" + ]; + extraConfig = '' + ExtendedStatus on + ''; + virtualHosts.localhost = { + extraConfig = '' + + Require all granted + SetHandler server-status + + + + Require all granted + BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0 + BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0 + + + ProxyStatus full + ProxyPass /server-status ! + ProxyPass / balancer://cluster/ + ProxyPassReverse / balancer://cluster/ + + # For testing; don't want to wait forever for dead backend servers. + ProxyTimeout 5 + ''; + }; }; networking.firewall.allowedTCPPorts = [ 80 ]; }; - in - { - name = "proxy"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; - nodes = { - proxy = - { nodes, ... }: - { - services.httpd = { - enable = true; - adminAddr = "bar@example.org"; - extraModules = [ - "proxy_balancer" - "lbmethod_byrequests" - ]; - extraConfig = '' - ExtendedStatus on - ''; - virtualHosts.localhost = { - extraConfig = '' - - Require all granted - SetHandler server-status - + backend1 = backend; + backend2 = backend; - - Require all granted - BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0 - BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0 - + client = { ... }: { }; + }; - ProxyStatus full - ProxyPass /server-status ! - ProxyPass / balancer://cluster/ - ProxyPassReverse / balancer://cluster/ + testScript = '' + start_all() - # For testing; don't want to wait forever for dead backend servers. - ProxyTimeout 5 - ''; - }; - }; - networking.firewall.allowedTCPPorts = [ 80 ]; - }; + proxy.wait_for_unit("httpd") + backend1.wait_for_unit("httpd") + backend2.wait_for_unit("httpd") + client.wait_for_unit("network.target") - backend1 = backend; - backend2 = backend; + # With the back-ends up, the proxy should work. + client.succeed("curl --fail http://proxy/") - client = { ... }: { }; - }; + client.succeed("curl --fail http://proxy/server-status") - testScript = '' - start_all() + # Block the first back-end. + backend1.block() - proxy.wait_for_unit("httpd") - backend1.wait_for_unit("httpd") - backend2.wait_for_unit("httpd") - client.wait_for_unit("network.target") + # The proxy should still work. + client.succeed("curl --fail http://proxy/") + client.succeed("curl --fail http://proxy/") - # With the back-ends up, the proxy should work. - client.succeed("curl --fail http://proxy/") + # Block the second back-end. + backend2.block() - client.succeed("curl --fail http://proxy/server-status") + # Now the proxy should fail as well. + client.fail("curl --fail http://proxy/") - # Block the first back-end. - backend1.block() - - # The proxy should still work. - client.succeed("curl --fail http://proxy/") - client.succeed("curl --fail http://proxy/") - - # Block the second back-end. - backend2.block() - - # Now the proxy should fail as well. - client.fail("curl --fail http://proxy/") - - # But if the second back-end comes back, the proxy should start - # working again. - backend2.unblock() - client.succeed("curl --fail http://proxy/") - ''; - } -) + # But if the second back-end comes back, the proxy should start + # working again. + backend2.unblock() + client.succeed("curl --fail http://proxy/") + ''; +} diff --git a/nixos/tests/pt2-clone.nix b/nixos/tests/pt2-clone.nix index 98df2e8cbac3..160431175f8d 100644 --- a/nixos/tests/pt2-clone.nix +++ b/nixos/tests/pt2-clone.nix @@ -1,37 +1,35 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "pt2-clone"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "pt2-clone"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.pt2-clone ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.pt2-clone ]; - }; + testScript = '' + machine.wait_for_x() + # Add a dummy sound card, or the program won't start + machine.execute("modprobe snd-dummy") - enableOCR = true; + machine.execute("pt2-clone >&2 &") - testScript = '' - machine.wait_for_x() - # Add a dummy sound card, or the program won't start - machine.execute("modprobe snd-dummy") - - machine.execute("pt2-clone >&2 &") - - machine.wait_for_window(r"ProTracker") - machine.sleep(5) - # One of the few words that actually get recognized - if "LENGTH" not in machine.get_screen_text(): - raise Exception("Program did not start successfully") - machine.screenshot("screen") - ''; - } -) + machine.wait_for_window(r"ProTracker") + machine.sleep(5) + # One of the few words that actually get recognized + if "LENGTH" not in machine.get_screen_text(): + raise Exception("Program did not start successfully") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/public-inbox.nix b/nixos/tests/public-inbox.nix index 698dbf2eb799..23163f0bc982 100644 --- a/nixos/tests/public-inbox.nix +++ b/nixos/tests/public-inbox.nix @@ -1,244 +1,242 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - orga = "example"; - domain = "${orga}.localdomain"; +{ pkgs, lib, ... }: +let + orga = "example"; + domain = "${orga}.localdomain"; - tls-cert = pkgs.runCommand "selfSignedCert" { buildInputs = [ pkgs.openssl ]; } '' - openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 36500 \ - -subj '/CN=machine.${domain}' - install -D -t $out key.pem cert.pem - ''; + tls-cert = pkgs.runCommand "selfSignedCert" { buildInputs = [ pkgs.openssl ]; } '' + openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 36500 \ + -subj '/CN=machine.${domain}' + install -D -t $out key.pem cert.pem + ''; - gitRepositories = [ - "repo1" - "repo2" - ]; - in - { - name = "public-inbox"; + gitRepositories = [ + "repo1" + "repo2" + ]; +in +{ + name = "public-inbox"; - meta.maintainers = with pkgs.lib.maintainers; [ julm ]; + meta.maintainers = with pkgs.lib.maintainers; [ julm ]; - nodes.machine = - { - config, - pkgs, - nodes, - ... - }: - let - inherit (config.services) public-inbox; - in - { - virtualisation.diskSize = 1 * 1024; - virtualisation.memorySize = 1 * 1024; - networking.domain = domain; + nodes.machine = + { + config, + pkgs, + nodes, + ... + }: + let + inherit (config.services) public-inbox; + in + { + virtualisation.diskSize = 1 * 1024; + virtualisation.memorySize = 1 * 1024; + networking.domain = domain; - security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ]; - # If using security.acme: - #security.acme.certs."${domain}".postRun = '' - # systemctl try-restart public-inbox-nntpd public-inbox-imapd - #''; + security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ]; + # If using security.acme: + #security.acme.certs."${domain}".postRun = '' + # systemctl try-restart public-inbox-nntpd public-inbox-imapd + #''; - services.public-inbox = { + services.public-inbox = { + enable = true; + postfix.enable = true; + openFirewall = true; + settings.publicinbox = { + css = [ "href=https://machine.${domain}/style/light.css" ]; + nntpserver = [ "nntps://machine.${domain}" ]; + wwwlisting = "match=domain"; + }; + mda = { enable = true; - postfix.enable = true; - openFirewall = true; - settings.publicinbox = { - css = [ "href=https://machine.${domain}/style/light.css" ]; - nntpserver = [ "nntps://machine.${domain}" ]; - wwwlisting = "match=domain"; - }; - mda = { - enable = true; - args = [ "--no-precheck" ]; # Allow Bcc: - }; - http = { - enable = true; - port = "/run/public-inbox-http.sock"; - #port = 8080; - args = [ "-W0" ]; - mounts = [ - "https://machine.${domain}/inbox" - ]; - }; - nntp = { - enable = true; - #port = 563; - args = [ "-W0" ]; - cert = "${tls-cert}/cert.pem"; - key = "${tls-cert}/key.pem"; - }; - imap = { - enable = true; - #port = 993; - args = [ "-W0" ]; - cert = "${tls-cert}/cert.pem"; - key = "${tls-cert}/key.pem"; - }; - inboxes = - lib.recursiveUpdate - (lib.genAttrs gitRepositories (repo: { - address = [ - # Routed to the "public-inbox:" transport in services.postfix.transport - "${repo}@${domain}" + args = [ "--no-precheck" ]; # Allow Bcc: + }; + http = { + enable = true; + port = "/run/public-inbox-http.sock"; + #port = 8080; + args = [ "-W0" ]; + mounts = [ + "https://machine.${domain}/inbox" + ]; + }; + nntp = { + enable = true; + #port = 563; + args = [ "-W0" ]; + cert = "${tls-cert}/cert.pem"; + key = "${tls-cert}/key.pem"; + }; + imap = { + enable = true; + #port = 993; + args = [ "-W0" ]; + cert = "${tls-cert}/cert.pem"; + key = "${tls-cert}/key.pem"; + }; + inboxes = + lib.recursiveUpdate + (lib.genAttrs gitRepositories (repo: { + address = [ + # Routed to the "public-inbox:" transport in services.postfix.transport + "${repo}@${domain}" + ]; + description = '' + ${repo}@${domain} : + discussions about ${repo}. + ''; + url = "https://machine.${domain}/inbox/${repo}"; + newsgroup = "inbox.comp.${orga}.${repo}"; + coderepo = [ repo ]; + })) + { + repo2 = { + hide = [ + "imap" # FIXME: doesn't work for IMAP as of public-inbox 1.6.1 + "manifest" + "www" ]; - description = '' - ${repo}@${domain} : - discussions about ${repo}. - ''; - url = "https://machine.${domain}/inbox/${repo}"; - newsgroup = "inbox.comp.${orga}.${repo}"; - coderepo = [ repo ]; - })) - { - repo2 = { - hide = [ - "imap" # FIXME: doesn't work for IMAP as of public-inbox 1.6.1 - "manifest" - "www" - ]; - }; }; - settings.coderepo = lib.listToAttrs ( - map ( - repositoryName: - lib.nameValuePair repositoryName { - dir = "/var/lib/public-inbox/repositories/${repositoryName}.git"; - cgitUrl = "https://git.${domain}/${repositoryName}.git"; - } - ) gitRepositories - ); - }; - - # Use nginx as a reverse proxy for public-inbox-httpd - services.nginx = { - enable = true; - recommendedGzipSettings = true; - recommendedOptimisation = true; - recommendedTlsSettings = true; - recommendedProxySettings = true; - virtualHosts."machine.${domain}" = { - forceSSL = true; - sslCertificate = "${tls-cert}/cert.pem"; - sslCertificateKey = "${tls-cert}/key.pem"; - locations."/".return = "302 /inbox"; - locations."= /inbox".return = "302 /inbox/"; - locations."/inbox".proxyPass = "http://unix:${public-inbox.http.port}:/inbox"; - # If using TCP instead of a Unix socket: - #locations."/inbox".proxyPass = "http://127.0.0.1:${toString public-inbox.http.port}/inbox"; - # Referred to by settings.publicinbox.css - # See http://public-inbox.org/meta/_/text/color/ - locations."= /style/light.css".alias = pkgs.writeText "light.css" '' - * { background:#fff; color:#000 } - - a { color:#00f; text-decoration:none } - a:visited { color:#808 } - - *.q { color:#008 } - - *.add { color:#060 } - *.del {color:#900 } - *.head { color:#000 } - *.hunk { color:#960 } - - .hl.num { color:#f30 } /* number */ - .hl.esc { color:#f0f } /* escape character */ - .hl.str { color:#f30 } /* string */ - .hl.ppc { color:#c3c } /* preprocessor */ - .hl.pps { color:#f30 } /* preprocessor string */ - .hl.slc { color:#099 } /* single-line comment */ - .hl.com { color:#099 } /* multi-line comment */ - /* .hl.opt { color:#ccc } */ /* operator */ - /* .hl.ipl { color:#ccc } */ /* interpolation */ - - /* keyword groups kw[a-z] */ - .hl.kwa { color:#f90 } - .hl.kwb { color:#060 } - .hl.kwc { color:#f90 } - /* .hl.kwd { color:#ccc } */ - ''; - }; - }; - - services.postfix = { - enable = true; - setSendmail = true; - #sslCert = "${tls-cert}/cert.pem"; - #sslKey = "${tls-cert}/key.pem"; - recipientDelimiter = "+"; - }; - - environment.systemPackages = [ - pkgs.gitMinimal - pkgs.mailutils - pkgs.openssl - ]; - + }; + settings.coderepo = lib.listToAttrs ( + map ( + repositoryName: + lib.nameValuePair repositoryName { + dir = "/var/lib/public-inbox/repositories/${repositoryName}.git"; + cgitUrl = "https://git.${domain}/${repositoryName}.git"; + } + ) gitRepositories + ); }; - testScript = '' - start_all() + # Use nginx as a reverse proxy for public-inbox-httpd + services.nginx = { + enable = true; + recommendedGzipSettings = true; + recommendedOptimisation = true; + recommendedTlsSettings = true; + recommendedProxySettings = true; + virtualHosts."machine.${domain}" = { + forceSSL = true; + sslCertificate = "${tls-cert}/cert.pem"; + sslCertificateKey = "${tls-cert}/key.pem"; + locations."/".return = "302 /inbox"; + locations."= /inbox".return = "302 /inbox/"; + locations."/inbox".proxyPass = "http://unix:${public-inbox.http.port}:/inbox"; + # If using TCP instead of a Unix socket: + #locations."/inbox".proxyPass = "http://127.0.0.1:${toString public-inbox.http.port}/inbox"; + # Referred to by settings.publicinbox.css + # See http://public-inbox.org/meta/_/text/color/ + locations."= /style/light.css".alias = pkgs.writeText "light.css" '' + * { background:#fff; color:#000 } - # The threshold and/or hardening may have to be changed with new features/checks - with subtest("systemd hardening thresholds"): - print(machine.succeed("systemd-analyze security public-inbox-httpd.service --threshold=5 --no-pager")) - print(machine.succeed("systemd-analyze security public-inbox-imapd.service --threshold=5 --no-pager")) - print(machine.succeed("systemd-analyze security public-inbox-nntpd.service --threshold=4 --no-pager")) + a { color:#00f; text-decoration:none } + a:visited { color:#808 } - machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("public-inbox-init.service") + *.q { color:#008 } - machine.succeed( - ${lib.concatMapStrings (repositoryName: '' - "sudo -u public-inbox git init --bare -b main /var/lib/public-inbox/repositories/${repositoryName}.git", - '') gitRepositories} - ) + *.add { color:#060 } + *.del {color:#900 } + *.head { color:#000 } + *.hunk { color:#960 } - # List inboxes through public-inbox-httpd - machine.wait_for_unit("public-inbox-httpd.socket") - machine.wait_for_unit("nginx.service") - machine.succeed("curl -L https://machine.${domain} | grep repo1@${domain}") - # The repo2 inbox is hidden - machine.fail("curl -L https://machine.${domain} | grep repo2@${domain}") + .hl.num { color:#f30 } /* number */ + .hl.esc { color:#f0f } /* escape character */ + .hl.str { color:#f30 } /* string */ + .hl.ppc { color:#c3c } /* preprocessor */ + .hl.pps { color:#f30 } /* preprocessor string */ + .hl.slc { color:#099 } /* single-line comment */ + .hl.com { color:#099 } /* multi-line comment */ + /* .hl.opt { color:#ccc } */ /* operator */ + /* .hl.ipl { color:#ccc } */ /* interpolation */ - # Send a mail and read it through public-inbox-httpd - # Must work too when using a recipientDelimiter. - machine.wait_for_unit("postfix.service") - machine.succeed("mail -t <${pkgs.writeText "mail" '' - Subject: Testing mail - From: root@localhost - To: repo1+extension@${domain} - Message-ID: - Content-Type: text/plain; charset=utf-8 - Content-Disposition: inline + /* keyword groups kw[a-z] */ + .hl.kwa { color:#f90 } + .hl.kwb { color:#060 } + .hl.kwc { color:#f90 } + /* .hl.kwd { color:#ccc } */ + ''; + }; + }; - This is a testing mail. - ''}") - machine.sleep(10) - machine.succeed("curl -L 'https://machine.${domain}/inbox/repo1/repo1@root-1/T/#u' | grep 'This is a testing mail.'") + services.postfix = { + enable = true; + setSendmail = true; + #sslCert = "${tls-cert}/cert.pem"; + #sslKey = "${tls-cert}/key.pem"; + recipientDelimiter = "+"; + }; - # Read a mail through public-inbox-imapd - machine.wait_for_unit("public-inbox-imapd.socket") - machine.succeed("openssl s_client -ign_eof -crlf -connect machine.${domain}:993 <${pkgs.writeText "imap-commands" '' - tag login anonymous@${domain} anonymous - tag SELECT INBOX.comp.${orga}.repo1.0 - tag FETCH 1 (BODY[HEADER]) - tag LOGOUT - ''} | grep '^Message-ID: '") + environment.systemPackages = [ + pkgs.gitMinimal + pkgs.mailutils + pkgs.openssl + ]; - # TODO: Read a mail through public-inbox-nntpd - #machine.wait_for_unit("public-inbox-nntpd.socket") + }; - # Delete a mail. - # Note that the use of an extension not listed in the addresses - # require to use --all - machine.succeed("curl -L https://machine.${domain}/inbox/repo1/repo1@root-1/raw | sudo -u public-inbox public-inbox-learn rm --all") - machine.fail("curl -L https://machine.${domain}/inbox/repo1/repo1@root-1/T/#u | grep 'This is a testing mail.'") + testScript = '' + start_all() - # Compact the database - machine.succeed("sudo -u public-inbox public-inbox-compact --all") - ''; - } -) + # The threshold and/or hardening may have to be changed with new features/checks + with subtest("systemd hardening thresholds"): + print(machine.succeed("systemd-analyze security public-inbox-httpd.service --threshold=5 --no-pager")) + print(machine.succeed("systemd-analyze security public-inbox-imapd.service --threshold=5 --no-pager")) + print(machine.succeed("systemd-analyze security public-inbox-nntpd.service --threshold=4 --no-pager")) + + machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("public-inbox-init.service") + + machine.succeed( + ${lib.concatMapStrings (repositoryName: '' + "sudo -u public-inbox git init --bare -b main /var/lib/public-inbox/repositories/${repositoryName}.git", + '') gitRepositories} + ) + + # List inboxes through public-inbox-httpd + machine.wait_for_unit("public-inbox-httpd.socket") + machine.wait_for_unit("nginx.service") + machine.succeed("curl -L https://machine.${domain} | grep repo1@${domain}") + # The repo2 inbox is hidden + machine.fail("curl -L https://machine.${domain} | grep repo2@${domain}") + + # Send a mail and read it through public-inbox-httpd + # Must work too when using a recipientDelimiter. + machine.wait_for_unit("postfix.service") + machine.succeed("mail -t <${pkgs.writeText "mail" '' + Subject: Testing mail + From: root@localhost + To: repo1+extension@${domain} + Message-ID: + Content-Type: text/plain; charset=utf-8 + Content-Disposition: inline + + This is a testing mail. + ''}") + machine.sleep(10) + machine.succeed("curl -L 'https://machine.${domain}/inbox/repo1/repo1@root-1/T/#u' | grep 'This is a testing mail.'") + + # Read a mail through public-inbox-imapd + machine.wait_for_unit("public-inbox-imapd.socket") + machine.succeed("openssl s_client -ign_eof -crlf -connect machine.${domain}:993 <${pkgs.writeText "imap-commands" '' + tag login anonymous@${domain} anonymous + tag SELECT INBOX.comp.${orga}.repo1.0 + tag FETCH 1 (BODY[HEADER]) + tag LOGOUT + ''} | grep '^Message-ID: '") + + # TODO: Read a mail through public-inbox-nntpd + #machine.wait_for_unit("public-inbox-nntpd.socket") + + # Delete a mail. + # Note that the use of an extension not listed in the addresses + # require to use --all + machine.succeed("curl -L https://machine.${domain}/inbox/repo1/repo1@root-1/raw | sudo -u public-inbox public-inbox-learn rm --all") + machine.fail("curl -L https://machine.${domain}/inbox/repo1/repo1@root-1/T/#u | grep 'This is a testing mail.'") + + # Compact the database + machine.succeed("sudo -u public-inbox public-inbox-compact --all") + ''; +} diff --git a/nixos/tests/pufferpanel.nix b/nixos/tests/pufferpanel.nix index e0613b43b7de..0b695a57d266 100644 --- a/nixos/tests/pufferpanel.nix +++ b/nixos/tests/pufferpanel.nix @@ -1,79 +1,77 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "pufferpanel"; - meta.maintainers = [ lib.maintainers.tie ]; +{ lib, ... }: +{ + name = "pufferpanel"; + meta.maintainers = [ lib.maintainers.tie ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.pufferpanel ]; - services.pufferpanel = { - enable = true; - extraPackages = [ pkgs.netcat ]; - environment = { - PUFFER_PANEL_REGISTRATIONENABLED = "false"; - PUFFER_PANEL_SETTINGS_COMPANYNAME = "NixOS"; - }; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.pufferpanel ]; + services.pufferpanel = { + enable = true; + extraPackages = [ pkgs.netcat ]; + environment = { + PUFFER_PANEL_REGISTRATIONENABLED = "false"; + PUFFER_PANEL_SETTINGS_COMPANYNAME = "NixOS"; }; }; + }; - testScript = '' - import shlex - import json + testScript = '' + import shlex + import json - curl = "curl --fail-with-body --silent" - baseURL = "http://localhost:8080" - adminName = "admin" - adminEmail = "admin@nixos.org" - adminPass = "admin" - adminCreds = json.dumps({ - "email": adminEmail, - "password": adminPass, - }) - stopCode = 9 # SIGKILL - serverPort = 1337 - serverDefinition = json.dumps({ - "name": "netcat", - "node": 0, - "users": [ - adminName, - ], - "type": "netcat", - "run": { - "stopCode": stopCode, - "command": f"nc -l {serverPort}", - }, - "environment": { - "type": "standard", - }, - }) + curl = "curl --fail-with-body --silent" + baseURL = "http://localhost:8080" + adminName = "admin" + adminEmail = "admin@nixos.org" + adminPass = "admin" + adminCreds = json.dumps({ + "email": adminEmail, + "password": adminPass, + }) + stopCode = 9 # SIGKILL + serverPort = 1337 + serverDefinition = json.dumps({ + "name": "netcat", + "node": 0, + "users": [ + adminName, + ], + "type": "netcat", + "run": { + "stopCode": stopCode, + "command": f"nc -l {serverPort}", + }, + "environment": { + "type": "standard", + }, + }) - start_all() + start_all() - machine.wait_for_unit("pufferpanel.service") - machine.wait_for_open_port(5657) # SFTP - machine.wait_for_open_port(8080) # HTTP + machine.wait_for_unit("pufferpanel.service") + machine.wait_for_open_port(5657) # SFTP + machine.wait_for_open_port(8080) # HTTP - # Note that PufferPanel does not initialize database unless necessary. - # /api/config endpoint creates database file and triggers migrations. - # On success, we run a command to create administrator user that we use to - # interact with HTTP API. - resp = json.loads(machine.succeed(f"{curl} {baseURL}/api/config")) - assert resp["branding"]["name"] == "NixOS", "Invalid company name in configuration" - assert resp["registrationEnabled"] == False, "Expected registration to be disabled" + # Note that PufferPanel does not initialize database unless necessary. + # /api/config endpoint creates database file and triggers migrations. + # On success, we run a command to create administrator user that we use to + # interact with HTTP API. + resp = json.loads(machine.succeed(f"{curl} {baseURL}/api/config")) + assert resp["branding"]["name"] == "NixOS", "Invalid company name in configuration" + assert resp["registrationEnabled"] == False, "Expected registration to be disabled" - machine.succeed(f"pufferpanel --workDir /var/lib/pufferpanel user add --admin --name {adminName} --email {adminEmail} --password {adminPass}") + machine.succeed(f"pufferpanel --workDir /var/lib/pufferpanel user add --admin --name {adminName} --email {adminEmail} --password {adminPass}") - resp = json.loads(machine.succeed(f"{curl} -d '{adminCreds}' {baseURL}/auth/login")) - assert "servers.admin" in resp["scopes"], "User is not administrator" - token = resp["session"] - authHeader = shlex.quote(f"Authorization: Bearer {token}") + resp = json.loads(machine.succeed(f"{curl} -d '{adminCreds}' {baseURL}/auth/login")) + assert "servers.admin" in resp["scopes"], "User is not administrator" + token = resp["session"] + authHeader = shlex.quote(f"Authorization: Bearer {token}") - resp = json.loads(machine.succeed(f"{curl} -H {authHeader} -H 'Content-Type: application/json' -d '{serverDefinition}' {baseURL}/api/servers")) - serverID = resp["id"] - machine.succeed(f"{curl} -X POST -H {authHeader} {baseURL}/proxy/daemon/server/{serverID}/start") - machine.wait_for_open_port(serverPort) - ''; - } -) + resp = json.loads(machine.succeed(f"{curl} -H {authHeader} -H 'Content-Type: application/json' -d '{serverDefinition}' {baseURL}/api/servers")) + serverID = resp["id"] + machine.succeed(f"{curl} -X POST -H {authHeader} {baseURL}/proxy/daemon/server/{serverID}/start") + machine.wait_for_open_port(serverPort) + ''; +} diff --git a/nixos/tests/pykms.nix b/nixos/tests/pykms.nix index e7df338a9fc2..035b9f314e67 100644 --- a/nixos/tests/pykms.nix +++ b/nixos/tests/pykms.nix @@ -1,23 +1,21 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "pykms-test"; - meta.maintainers = with pkgs.lib.maintainers; [ zopieux ]; +{ pkgs, ... }: +{ + name = "pykms-test"; + meta.maintainers = with pkgs.lib.maintainers; [ zopieux ]; - nodes.machine = - { - config, - lib, - pkgs, - ... - }: - { - services.pykms.enable = true; - }; + nodes.machine = + { + config, + lib, + pkgs, + ... + }: + { + services.pykms.enable = true; + }; - testScript = '' - machine.wait_for_unit("pykms.service") - machine.succeed("${pkgs.pykms}/bin/client") - ''; - } -) + testScript = '' + machine.wait_for_unit("pykms.service") + machine.succeed("${pkgs.pykms}/bin/client") + ''; +} diff --git a/nixos/tests/pyload.nix b/nixos/tests/pyload.nix index 9f87faec2b2c..317f1c43a53d 100644 --- a/nixos/tests/pyload.nix +++ b/nixos/tests/pyload.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "pyload"; - meta.maintainers = with lib.maintainers; [ ambroisie ]; +{ lib, ... }: +{ + name = "pyload"; + meta.maintainers = with lib.maintainers; [ ambroisie ]; - nodes = { - machine = - { ... }: - { - services.pyload = { - enable = true; + nodes = { + machine = + { ... }: + { + services.pyload = { + enable = true; - listenAddress = "0.0.0.0"; - port = 9876; - }; - - networking.firewall.allowedTCPPorts = [ 9876 ]; + listenAddress = "0.0.0.0"; + port = 9876; }; - client = { }; - }; + networking.firewall.allowedTCPPorts = [ 9876 ]; + }; - testScript = '' - start_all() + client = { }; + }; - machine.wait_for_unit("pyload.service") + testScript = '' + start_all() - with subtest("Web interface accessible locally"): - machine.wait_until_succeeds("curl -fs localhost:9876") + machine.wait_for_unit("pyload.service") - client.wait_for_unit("network.target") + with subtest("Web interface accessible locally"): + machine.wait_until_succeeds("curl -fs localhost:9876") - with subtest("Web interface accessible from a different machine"): - client.wait_until_succeeds("curl -fs machine:9876") - ''; - } -) + client.wait_for_unit("network.target") + + with subtest("Web interface accessible from a different machine"): + client.wait_until_succeeds("curl -fs machine:9876") + ''; +} diff --git a/nixos/tests/qownnotes.nix b/nixos/tests/qownnotes.nix index 710492f59bf5..3c5de65e5cfc 100644 --- a/nixos/tests/qownnotes.nix +++ b/nixos/tests/qownnotes.nix @@ -1,88 +1,86 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "qownnotes"; - meta.maintainers = [ lib.maintainers.pbek ]; +{ + name = "qownnotes"; + meta.maintainers = [ lib.maintainers.pbek ]; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ - ./common/user-account.nix - ./common/x11.nix - ]; + { + imports = [ + ./common/user-account.nix + ./common/x11.nix + ]; - test-support.displayManager.auto.user = "alice"; - environment.systemPackages = [ - pkgs.qownnotes - pkgs.xdotool - ]; - }; + test-support.displayManager.auto.user = "alice"; + environment.systemPackages = [ + pkgs.qownnotes + pkgs.xdotool + ]; + }; - enableOCR = true; + enableOCR = true; - # https://nixos.org/manual/nixos/stable/#ssec-machine-objects - testScript = - { nodes, ... }: - let - aliceDo = cmd: ''machine.succeed("su - alice -c '${cmd}' >&2 &");''; - in - '' - with subtest("Ensure X starts"): - start_all() - machine.wait_for_x() + # https://nixos.org/manual/nixos/stable/#ssec-machine-objects + testScript = + { nodes, ... }: + let + aliceDo = cmd: ''machine.succeed("su - alice -c '${cmd}' >&2 &");''; + in + '' + with subtest("Ensure X starts"): + start_all() + machine.wait_for_x() - with subtest("Check QOwnNotes version on CLI"): - ${aliceDo "qownnotes --version"} + with subtest("Check QOwnNotes version on CLI"): + ${aliceDo "qownnotes --version"} - machine.wait_for_console_text("QOwnNotes ${pkgs.qownnotes.version}") + machine.wait_for_console_text("QOwnNotes ${pkgs.qownnotes.version}") - with subtest("Ensure QOwnNotes starts"): - # start QOwnNotes window - ${aliceDo "qownnotes"} + with subtest("Ensure QOwnNotes starts"): + # start QOwnNotes window + ${aliceDo "qownnotes"} - machine.wait_for_text("Welcome to QOwnNotes") - machine.screenshot("QOwnNotes-Welcome") + machine.wait_for_text("Welcome to QOwnNotes") + machine.screenshot("QOwnNotes-Welcome") - with subtest("Finish first-run wizard"): - # The wizard should show up now - machine.wait_for_text("Note folder") - machine.send_key("ret") - machine.wait_for_console_text("Note path '/home/alice/Notes' was now created.") - machine.wait_for_text("Panel layout") - machine.send_key("ret") - machine.wait_for_text("Nextcloud") - machine.send_key("ret") - machine.wait_for_text("App metric") - machine.send_key("ret") + with subtest("Finish first-run wizard"): + # The wizard should show up now + machine.wait_for_text("Note folder") + machine.send_key("ret") + machine.wait_for_console_text("Note path '/home/alice/Notes' was now created.") + machine.wait_for_text("Panel layout") + machine.send_key("ret") + machine.wait_for_text("Nextcloud") + machine.send_key("ret") + machine.wait_for_text("App metric") + machine.send_key("ret") - # Doesn't work for non-root - #machine.wait_for_window("QOwnNotes - ${pkgs.qownnotes.version}") + # Doesn't work for non-root + #machine.wait_for_window("QOwnNotes - ${pkgs.qownnotes.version}") - # OCR doesn't seem to be able any more to handle the main window - #machine.wait_for_text("QOwnNotes - ${pkgs.qownnotes.version}") + # OCR doesn't seem to be able any more to handle the main window + #machine.wait_for_text("QOwnNotes - ${pkgs.qownnotes.version}") - # The main window should now show up - machine.wait_for_open_port(22222) - machine.wait_for_console_text("QOwnNotes server listening on port 22222") + # The main window should now show up + machine.wait_for_open_port(22222) + machine.wait_for_console_text("QOwnNotes server listening on port 22222") - machine.screenshot("QOwnNotes-DemoNote") + machine.screenshot("QOwnNotes-DemoNote") - with subtest("Create a new note"): - machine.send_key("ctrl-n") - machine.sleep(1) - machine.send_chars("This is a NixOS test!\n") - machine.wait_until_succeeds("find /home/alice/Notes -type f | grep -qi 'Note 2'") + with subtest("Create a new note"): + machine.send_key("ctrl-n") + machine.sleep(1) + machine.send_chars("This is a NixOS test!\n") + machine.wait_until_succeeds("find /home/alice/Notes -type f | grep -qi 'Note 2'") - # OCR doesn't seem to be able any more to handle the main window - #machine.wait_for_text("This is a NixOS test!") + # OCR doesn't seem to be able any more to handle the main window + #machine.wait_for_text("This is a NixOS test!") - # Doesn't work for non-root - #machine.wait_for_window("- QOwnNotes - ${pkgs.qownnotes.version}") + # Doesn't work for non-root + #machine.wait_for_window("- QOwnNotes - ${pkgs.qownnotes.version}") - machine.screenshot("QOwnNotes-NewNote") - ''; - } -) + machine.screenshot("QOwnNotes-NewNote") + ''; +} diff --git a/nixos/tests/quake3.nix b/nixos/tests/quake3.nix index cade2991de61..d56be93890a9 100644 --- a/nixos/tests/quake3.nix +++ b/nixos/tests/quake3.nix @@ -1,102 +1,100 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let +let - # Build Quake with coverage instrumentation. - overrides = pkgs: { - quake3game = pkgs.quake3game.override (args: { - stdenv = pkgs.stdenvAdapters.addCoverageInstrumentation args.stdenv; - }); + # Build Quake with coverage instrumentation. + overrides = pkgs: { + quake3game = pkgs.quake3game.override (args: { + stdenv = pkgs.stdenvAdapters.addCoverageInstrumentation args.stdenv; + }); + }; + + # Only allow the demo data to be used (only if it's unfreeRedistributable). + unfreePredicate = + pkg: + let + allowPackageNames = [ + "quake3-demodata" + "quake3-pointrelease" + ]; + allowLicenses = [ lib.licenses.unfreeRedistributable ]; + in + lib.elem pkg.pname allowPackageNames && lib.elem (pkg.meta.license or null) allowLicenses; + + client = + { pkgs, ... }: + + { + imports = [ ./common/x11.nix ]; + hardware.graphics.enable = true; + environment.systemPackages = [ pkgs.quake3demo ]; + nixpkgs.config.packageOverrides = overrides; + nixpkgs.config.allowUnfreePredicate = unfreePredicate; }; - # Only allow the demo data to be used (only if it's unfreeRedistributable). - unfreePredicate = - pkg: - let - allowPackageNames = [ - "quake3-demodata" - "quake3-pointrelease" - ]; - allowLicenses = [ lib.licenses.unfreeRedistributable ]; - in - lib.elem pkg.pname allowPackageNames && lib.elem (pkg.meta.license or null) allowLicenses; +in - client = +rec { + name = "quake3"; + meta = with lib.maintainers; { + maintainers = [ domenkozar ]; + }; + + # TODO: lcov doesn't work atm + #makeCoverageReport = true; + + nodes = { + server = { pkgs, ... }: { - imports = [ ./common/x11.nix ]; - hardware.graphics.enable = true; - environment.systemPackages = [ pkgs.quake3demo ]; + systemd.services.quake3-server = { + wantedBy = [ "multi-user.target" ]; + script = + "${pkgs.quake3demo}/bin/quake3-server +set g_gametype 0 " + + "+map q3dm7 +addbot grunt +addbot daemia 2> /tmp/log"; + }; nixpkgs.config.packageOverrides = overrides; nixpkgs.config.allowUnfreePredicate = unfreePredicate; + networking.firewall.allowedUDPPorts = [ 27960 ]; }; - in + client1 = client; + client2 = client; + }; - rec { - name = "quake3"; - meta = with lib.maintainers; { - maintainers = [ domenkozar ]; - }; + testScript = '' + start_all() - # TODO: lcov doesn't work atm - #makeCoverageReport = true; + server.wait_for_unit("quake3-server") + client1.wait_for_x() + client2.wait_for_x() - nodes = { - server = - { pkgs, ... }: + client1.execute("quake3 +set r_fullscreen 0 +set name Foo +connect server >&2 &", check_return = False) + client2.execute("quake3 +set r_fullscreen 0 +set name Bar +connect server >&2 &", check_return = False) - { - systemd.services.quake3-server = { - wantedBy = [ "multi-user.target" ]; - script = - "${pkgs.quake3demo}/bin/quake3-server +set g_gametype 0 " - + "+map q3dm7 +addbot grunt +addbot daemia 2> /tmp/log"; - }; - nixpkgs.config.packageOverrides = overrides; - nixpkgs.config.allowUnfreePredicate = unfreePredicate; - networking.firewall.allowedUDPPorts = [ 27960 ]; - }; + server.wait_until_succeeds("grep -q 'Foo.*entered the game' /tmp/log") + server.wait_until_succeeds("grep -q 'Bar.*entered the game' /tmp/log") - client1 = client; - client2 = client; - }; + server.sleep(10) # wait for a while to get a nice screenshot - testScript = '' - start_all() + client1.block() - server.wait_for_unit("quake3-server") - client1.wait_for_x() - client2.wait_for_x() + server.sleep(20) - client1.execute("quake3 +set r_fullscreen 0 +set name Foo +connect server >&2 &", check_return = False) - client2.execute("quake3 +set r_fullscreen 0 +set name Bar +connect server >&2 &", check_return = False) + client1.screenshot("screen1") + client2.screenshot("screen2") - server.wait_until_succeeds("grep -q 'Foo.*entered the game' /tmp/log") - server.wait_until_succeeds("grep -q 'Bar.*entered the game' /tmp/log") + client1.unblock() - server.sleep(10) # wait for a while to get a nice screenshot + server.sleep(10) - client1.block() + client1.screenshot("screen3") + client2.screenshot("screen4") - server.sleep(20) + client1.shutdown() + client2.shutdown() + server.stop_job("quake3-server") + ''; - client1.screenshot("screen1") - client2.screenshot("screen2") - - client1.unblock() - - server.sleep(10) - - client1.screenshot("screen3") - client2.screenshot("screen4") - - client1.shutdown() - client2.shutdown() - server.stop_job("quake3-server") - ''; - - } -) +} diff --git a/nixos/tests/quicktun.nix b/nixos/tests/quicktun.nix index 62b441f3fd78..22bd0dbdf52a 100644 --- a/nixos/tests/quicktun.nix +++ b/nixos/tests/quicktun.nix @@ -1,22 +1,20 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "quicktun"; - meta.maintainers = with lib.maintainers; [ h7x4 ]; +{ pkgs, lib, ... }: +{ + name = "quicktun"; + meta.maintainers = with lib.maintainers; [ h7x4 ]; - nodes = { - machine = - { ... }: - { - services.quicktun."test-tunnel" = { - protocol = "raw"; - }; + nodes = { + machine = + { ... }: + { + services.quicktun."test-tunnel" = { + protocol = "raw"; }; - }; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("quicktun-test-tunnel.service") - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("quicktun-test-tunnel.service") + ''; +} diff --git a/nixos/tests/quickwit.nix b/nixos/tests/quickwit.nix index 0dddd547fd06..f080285ca345 100644 --- a/nixos/tests/quickwit.nix +++ b/nixos/tests/quickwit.nix @@ -1,106 +1,104 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - let - # Define an example Quickwit index schema, - # and some `exampleDocs` below, to test if ingesting - # and querying works as expected. - index_yaml = '' - version: 0.7 - index_id: example_server_logs - doc_mapping: - mode: dynamic - field_mappings: - - name: datetime - type: datetime - fast: true - input_formats: - - iso8601 - output_format: iso8601 - fast_precision: seconds - fast: true - - name: git - type: text - tokenizer: raw - - name: hostname - type: text - tokenizer: raw - - name: level - type: text - tokenizer: raw - - name: message - type: text - - name: location - type: text - - name: source - type: text - timestamp_field: datetime +let + # Define an example Quickwit index schema, + # and some `exampleDocs` below, to test if ingesting + # and querying works as expected. + index_yaml = '' + version: 0.7 + index_id: example_server_logs + doc_mapping: + mode: dynamic + field_mappings: + - name: datetime + type: datetime + fast: true + input_formats: + - iso8601 + output_format: iso8601 + fast_precision: seconds + fast: true + - name: git + type: text + tokenizer: raw + - name: hostname + type: text + tokenizer: raw + - name: level + type: text + tokenizer: raw + - name: message + type: text + - name: location + type: text + - name: source + type: text + timestamp_field: datetime - search_settings: - default_search_fields: [message] + search_settings: + default_search_fields: [message] - indexing_settings: - commit_timeout_secs: 10 - ''; + indexing_settings: + commit_timeout_secs: 10 + ''; - exampleDocs = '' - {"datetime":"2024-05-03T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Processing request done","location":"path/to/server.c:6442:32","source":""} - {"datetime":"2024-05-04T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""} - {"datetime":"2024-05-05T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""} - {"datetime":"2024-05-06T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-2","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""} - ''; - in - { - name = "quickwit"; - meta.maintainers = [ pkgs.lib.maintainers.happysalada ]; + exampleDocs = '' + {"datetime":"2024-05-03T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Processing request done","location":"path/to/server.c:6442:32","source":""} + {"datetime":"2024-05-04T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""} + {"datetime":"2024-05-05T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-1","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""} + {"datetime":"2024-05-06T02:36:41.017674444Z","git":"e6e1f087ce12065e44ed3b87b50784e6f9bcc2f9","hostname":"machine-2","level":"Info","message":"Got exception processing request: HTTP 404","location":"path/to/server.c:6444:32","source":""} + ''; +in +{ + name = "quickwit"; + meta.maintainers = [ pkgs.lib.maintainers.happysalada ]; - nodes = { - quickwit = - { config, pkgs, ... }: - { - services.quickwit.enable = true; - }; - }; + nodes = { + quickwit = + { config, pkgs, ... }: + { + services.quickwit.enable = true; + }; + }; - testScript = '' - quickwit.wait_for_unit("quickwit") - quickwit.wait_for_open_port(7280) - quickwit.wait_for_open_port(7281) + testScript = '' + quickwit.wait_for_unit("quickwit") + quickwit.wait_for_open_port(7280) + quickwit.wait_for_open_port(7281) - quickwit.wait_until_succeeds( - "journalctl -o cat -u quickwit.service | grep 'version: ${pkgs.quickwit.version}'" - ) + quickwit.wait_until_succeeds( + "journalctl -o cat -u quickwit.service | grep 'version: ${pkgs.quickwit.version}'" + ) - quickwit.wait_until_succeeds( - "journalctl -o cat -u quickwit.service | grep 'transitioned to ready state'" - ) + quickwit.wait_until_succeeds( + "journalctl -o cat -u quickwit.service | grep 'transitioned to ready state'" + ) - with subtest("verify UI installed"): - machine.succeed("curl -sSf http://127.0.0.1:7280/ui/") + with subtest("verify UI installed"): + machine.succeed("curl -sSf http://127.0.0.1:7280/ui/") - with subtest("injest and query data"): - import json + with subtest("injest and query data"): + import json - # Test CLI ingestion - print(machine.succeed('${pkgs.quickwit}/bin/quickwit index create --index-config ${pkgs.writeText "index.yaml" index_yaml}')) - # Important to use `--wait`, otherwise the queries below race with index processing. - print(machine.succeed('${pkgs.quickwit}/bin/quickwit index ingest --index example_server_logs --input-path ${pkgs.writeText "exampleDocs.json" exampleDocs} --wait')) + # Test CLI ingestion + print(machine.succeed('${pkgs.quickwit}/bin/quickwit index create --index-config ${pkgs.writeText "index.yaml" index_yaml}')) + # Important to use `--wait`, otherwise the queries below race with index processing. + print(machine.succeed('${pkgs.quickwit}/bin/quickwit index ingest --index example_server_logs --input-path ${pkgs.writeText "exampleDocs.json" exampleDocs} --wait')) - # Test CLI query - cli_query_output = machine.succeed('${pkgs.quickwit}/bin/quickwit index search --index example_server_logs --query "exception"') - print(cli_query_output) + # Test CLI query + cli_query_output = machine.succeed('${pkgs.quickwit}/bin/quickwit index search --index example_server_logs --query "exception"') + print(cli_query_output) - # Assert query result is as expected. - num_hits = len(json.loads(cli_query_output)["hits"]) - assert num_hits == 3, f"cli_query_output contains unexpected number of results: {num_hits}" + # Assert query result is as expected. + num_hits = len(json.loads(cli_query_output)["hits"]) + assert num_hits == 3, f"cli_query_output contains unexpected number of results: {num_hits}" - # Test API query - api_query_output = machine.succeed('curl --fail http://127.0.0.1:7280/api/v1/example_server_logs/search?query=exception') - print(api_query_output) + # Test API query + api_query_output = machine.succeed('curl --fail http://127.0.0.1:7280/api/v1/example_server_logs/search?query=exception') + print(api_query_output) - quickwit.log(quickwit.succeed( - "systemd-analyze security quickwit.service | grep -v '✓'" - )) - ''; - } -) + quickwit.log(quickwit.succeed( + "systemd-analyze security quickwit.service | grep -v '✓'" + )) + ''; +} diff --git a/nixos/tests/quorum.nix b/nixos/tests/quorum.nix index 1a38ad434658..0e67f3564070 100644 --- a/nixos/tests/quorum.nix +++ b/nixos/tests/quorum.nix @@ -1,105 +1,103 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - keystore = { - address = "9377bc3936de934c497e22917b81aa8774ac3bb0"; - crypto = { - cipher = "aes-128-ctr"; - ciphertext = "ad8341d8ef225650403fd366c955f41095e438dd966a3c84b3d406818c1e366c"; - cipherparams = { - iv = "2a09f7a72fd6dff7c43150ff437e6ac2"; - }; - kdf = "scrypt"; - kdfparams = { - dklen = 32; - n = 262144; - p = 1; - r = 8; - salt = "d1a153845bb80cd6274c87c5bac8ac09fdfac5ff131a6f41b5ed319667f12027"; - }; - mac = "a9621ad88fa1d042acca6fc2fcd711f7e05bfbadea3f30f379235570c8e270d3"; +{ pkgs, ... }: +let + keystore = { + address = "9377bc3936de934c497e22917b81aa8774ac3bb0"; + crypto = { + cipher = "aes-128-ctr"; + ciphertext = "ad8341d8ef225650403fd366c955f41095e438dd966a3c84b3d406818c1e366c"; + cipherparams = { + iv = "2a09f7a72fd6dff7c43150ff437e6ac2"; }; - id = "89e847a3-1527-42f6-a321-77de0a14ce02"; - version = 3; - }; - keystore-file = pkgs.writeText "keystore-file" (builtins.toJSON keystore); - in - { - name = "quorum"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mmahut ]; + kdf = "scrypt"; + kdfparams = { + dklen = 32; + n = 262144; + p = 1; + r = 8; + salt = "d1a153845bb80cd6274c87c5bac8ac09fdfac5ff131a6f41b5ed319667f12027"; + }; + mac = "a9621ad88fa1d042acca6fc2fcd711f7e05bfbadea3f30f379235570c8e270d3"; }; + id = "89e847a3-1527-42f6-a321-77de0a14ce02"; + version = 3; + }; + keystore-file = pkgs.writeText "keystore-file" (builtins.toJSON keystore); +in +{ + name = "quorum"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mmahut ]; + }; - nodes = { - machine = - { ... }: - { - services.quorum = { - enable = true; - permissioned = false; - staticNodes = [ - "enode://dd333ec28f0a8910c92eb4d336461eea1c20803eed9cf2c056557f986e720f8e693605bba2f4e8f289b1162e5ac7c80c914c7178130711e393ca76abc1d92f57@0.0.0.0:30303?discport=0" - ]; - genesis = { - alloc = { - "189d23d201b03ae1cf9113672df29a5d672aefa3" = { - balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; - }; - "44b07d2c28b8ed8f02b45bd84ac7d9051b3349e6" = { - balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; - }; - "4c1ccd426833b9782729a212c857f2f03b7b4c0d" = { - balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; - }; - "7ae555d0f6faad7930434abdaac2274fd86ab516" = { - balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; - }; - c1056df7c02b6f1a353052eaf0533cc7cb743b52 = { - balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; - }; + nodes = { + machine = + { ... }: + { + services.quorum = { + enable = true; + permissioned = false; + staticNodes = [ + "enode://dd333ec28f0a8910c92eb4d336461eea1c20803eed9cf2c056557f986e720f8e693605bba2f4e8f289b1162e5ac7c80c914c7178130711e393ca76abc1d92f57@0.0.0.0:30303?discport=0" + ]; + genesis = { + alloc = { + "189d23d201b03ae1cf9113672df29a5d672aefa3" = { + balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; }; - coinbase = "0x0000000000000000000000000000000000000000"; - config = { - byzantiumBlock = 1; - chainId = 10; - eip150Block = 1; - eip150Hash = "0x0000000000000000000000000000000000000000000000000000000000000000"; - eip155Block = 1; - eip158Block = 1; - homesteadBlock = 1; - isQuorum = true; - istanbul = { - epoch = 30000; - policy = 0; - }; + "44b07d2c28b8ed8f02b45bd84ac7d9051b3349e6" = { + balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; + }; + "4c1ccd426833b9782729a212c857f2f03b7b4c0d" = { + balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; + }; + "7ae555d0f6faad7930434abdaac2274fd86ab516" = { + balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; + }; + c1056df7c02b6f1a353052eaf0533cc7cb743b52 = { + balance = "0x446c3b15f9926687d2c40534fdb564000000000000"; }; - difficulty = "0x1"; - extraData = "0x0000000000000000000000000000000000000000000000000000000000000000f8aff869944c1ccd426833b9782729a212c857f2f03b7b4c0d94189d23d201b03ae1cf9113672df29a5d672aefa39444b07d2c28b8ed8f02b45bd84ac7d9051b3349e694c1056df7c02b6f1a353052eaf0533cc7cb743b52947ae555d0f6faad7930434abdaac2274fd86ab516b8410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0"; - gasLimit = "0xe0000000"; - gasUsed = "0x0"; - mixHash = "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365"; - nonce = "0x0"; - number = "0x0"; - parentHash = "0x0000000000000000000000000000000000000000000000000000000000000000"; - timestamp = "0x5cffc201"; }; + coinbase = "0x0000000000000000000000000000000000000000"; + config = { + byzantiumBlock = 1; + chainId = 10; + eip150Block = 1; + eip150Hash = "0x0000000000000000000000000000000000000000000000000000000000000000"; + eip155Block = 1; + eip158Block = 1; + homesteadBlock = 1; + isQuorum = true; + istanbul = { + epoch = 30000; + policy = 0; + }; + }; + difficulty = "0x1"; + extraData = "0x0000000000000000000000000000000000000000000000000000000000000000f8aff869944c1ccd426833b9782729a212c857f2f03b7b4c0d94189d23d201b03ae1cf9113672df29a5d672aefa39444b07d2c28b8ed8f02b45bd84ac7d9051b3349e694c1056df7c02b6f1a353052eaf0533cc7cb743b52947ae555d0f6faad7930434abdaac2274fd86ab516b8410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0"; + gasLimit = "0xe0000000"; + gasUsed = "0x0"; + mixHash = "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365"; + nonce = "0x0"; + number = "0x0"; + parentHash = "0x0000000000000000000000000000000000000000000000000000000000000000"; + timestamp = "0x5cffc201"; }; }; - }; + }; + }; - testScript = '' - start_all() - machine.succeed("mkdir -p /var/lib/quorum/keystore") - machine.succeed( - 'cp ${keystore-file} /var/lib/quorum/keystore/UTC--2020-03-23T11-08-34.144812212Z--${keystore.address}' - ) - machine.succeed( - "echo fe2725c4e8f7617764b845e8d939a65c664e7956eb47ed7d934573f16488efc1 > /var/lib/quorum/nodekey" - ) - machine.succeed("systemctl restart quorum") - machine.wait_for_unit("quorum.service") - machine.sleep(15) - machine.succeed('geth attach /var/lib/quorum/geth.ipc --exec "eth.accounts" | grep ${keystore.address}') - ''; - } -) + testScript = '' + start_all() + machine.succeed("mkdir -p /var/lib/quorum/keystore") + machine.succeed( + 'cp ${keystore-file} /var/lib/quorum/keystore/UTC--2020-03-23T11-08-34.144812212Z--${keystore.address}' + ) + machine.succeed( + "echo fe2725c4e8f7617764b845e8d939a65c664e7956eb47ed7d934573f16488efc1 > /var/lib/quorum/nodekey" + ) + machine.succeed("systemctl restart quorum") + machine.wait_for_unit("quorum.service") + machine.sleep(15) + machine.succeed('geth attach /var/lib/quorum/geth.ipc --exec "eth.accounts" | grep ${keystore.address}') + ''; +} diff --git a/nixos/tests/rabbitmq.nix b/nixos/tests/rabbitmq.nix index cb6dba27a64c..94c7bdf49ba5 100644 --- a/nixos/tests/rabbitmq.nix +++ b/nixos/tests/rabbitmq.nix @@ -1,63 +1,61 @@ # This test runs rabbitmq and checks if rabbitmq is up and running. -import ./make-test-python.nix ( - { pkgs, ... }: - let - # in real life, you would keep this out of your repo and deploy it to a safe - # location using safe means. - configKeyPath = pkgs.writeText "fake-config-key" "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01"; - in - { - name = "rabbitmq"; - meta = with pkgs.lib.maintainers; { - maintainers = [ offline ]; +{ pkgs, ... }: +let + # in real life, you would keep this out of your repo and deploy it to a safe + # location using safe means. + configKeyPath = pkgs.writeText "fake-config-key" "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01"; +in +{ + name = "rabbitmq"; + meta = with pkgs.lib.maintainers; { + maintainers = [ offline ]; + }; + + nodes.machine = { + services.rabbitmq = { + enable = true; + managementPlugin.enable = true; + + # To encrypt: + # rabbitmqctl --quiet encode --cipher blowfish_cfb64 --hash sha256 \ + # --iterations 10000 '<<"dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw">>' \ + # "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01" ; + config = '' + [ { rabbit + , [ {default_user, <<"alice">>} + , { default_pass + , {encrypted,<<"oKKxyTze9PYmsEfl6FG1MxIUhxY7WPQL7HBoMPRC/1ZOdOZbtr9+DxjWW3e1D5SL48n3D9QOsGD0cOgYG7Qdvb7Txrepw8w=">>} + } + , {config_entry_decoder + , [ {passphrase, {file, <<"${configKeyPath}">>}} + , {cipher, blowfish_cfb64} + , {hash, sha256} + , {iterations, 10000} + ] + } + % , {rabbitmq_management, [{path_prefix, "/_queues"}]} + ] + } + ]. + ''; }; + # Ensure there is sufficient extra disk space for rabbitmq to be happy + virtualisation.diskSize = 1024; + }; - nodes.machine = { - services.rabbitmq = { - enable = true; - managementPlugin.enable = true; + testScript = '' + machine.start() - # To encrypt: - # rabbitmqctl --quiet encode --cipher blowfish_cfb64 --hash sha256 \ - # --iterations 10000 '<<"dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw">>' \ - # "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01" ; - config = '' - [ { rabbit - , [ {default_user, <<"alice">>} - , { default_pass - , {encrypted,<<"oKKxyTze9PYmsEfl6FG1MxIUhxY7WPQL7HBoMPRC/1ZOdOZbtr9+DxjWW3e1D5SL48n3D9QOsGD0cOgYG7Qdvb7Txrepw8w=">>} - } - , {config_entry_decoder - , [ {passphrase, {file, <<"${configKeyPath}">>}} - , {cipher, blowfish_cfb64} - , {hash, sha256} - , {iterations, 10000} - ] - } - % , {rabbitmq_management, [{path_prefix, "/_queues"}]} - ] - } - ]. - ''; - }; - # Ensure there is sufficient extra disk space for rabbitmq to be happy - virtualisation.diskSize = 1024; - }; + machine.wait_for_unit("rabbitmq.service") + machine.wait_until_succeeds( + 'su -s ${pkgs.runtimeShell} rabbitmq -c "rabbitmqctl status"' + ) + machine.wait_for_open_port(15672) - testScript = '' - machine.start() - - machine.wait_for_unit("rabbitmq.service") - machine.wait_until_succeeds( - 'su -s ${pkgs.runtimeShell} rabbitmq -c "rabbitmqctl status"' - ) - machine.wait_for_open_port(15672) - - # The password is the plaintext that was encrypted with rabbitmqctl encode above. - machine.wait_until_succeeds( - 'echo Hello World | ${pkgs.lib.getExe pkgs.amqpcat} --producer --uri=amqp://alice:dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw@localhost --queue test' - ) - ''; - } -) + # The password is the plaintext that was encrypted with rabbitmqctl encode above. + machine.wait_until_succeeds( + 'echo Hello World | ${pkgs.lib.getExe pkgs.amqpcat} --producer --uri=amqp://alice:dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw@localhost --queue test' + ) + ''; +} diff --git a/nixos/tests/radarr.nix b/nixos/tests/radarr.nix index ac093beeb790..00949dcc7695 100644 --- a/nixos/tests/radarr.nix +++ b/nixos/tests/radarr.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "radarr"; - meta.maintainers = with lib.maintainers; [ etu ]; +{ + name = "radarr"; + meta.maintainers = with lib.maintainers; [ etu ]; - nodes.machine = - { pkgs, ... }: - { - services.radarr.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.radarr.enable = true; + }; - testScript = '' - machine.wait_for_unit("radarr.service") - machine.wait_for_open_port(7878) - machine.succeed("curl --fail http://localhost:7878/") - ''; - } -) + testScript = '' + machine.wait_for_unit("radarr.service") + machine.wait_for_open_port(7878) + machine.succeed("curl --fail http://localhost:7878/") + ''; +} diff --git a/nixos/tests/radicale.nix b/nixos/tests/radicale.nix index 96c17a657b3a..210be285c988 100644 --- a/nixos/tests/radicale.nix +++ b/nixos/tests/radicale.nix @@ -1,100 +1,98 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - let - user = "someuser"; - password = "some_password"; - port = "5232"; - filesystem_folder = "/data/radicale"; +let + user = "someuser"; + password = "some_password"; + port = "5232"; + filesystem_folder = "/data/radicale"; - cli = "${lib.getExe pkgs.calendar-cli} --caldav-user ${user} --caldav-pass ${password}"; - in - { - name = "radicale3"; - meta.maintainers = with lib.maintainers; [ dotlambda ]; + cli = "${lib.getExe pkgs.calendar-cli} --caldav-user ${user} --caldav-pass ${password}"; +in +{ + name = "radicale3"; + meta.maintainers = with lib.maintainers; [ dotlambda ]; - nodes.machine = - { pkgs, ... }: - { - services.radicale = { - enable = true; - settings = { - auth = { - type = "htpasswd"; - htpasswd_filename = "/etc/radicale/users"; - htpasswd_encryption = "bcrypt"; - }; - storage = { - inherit filesystem_folder; - hook = "git add -A && (git diff --cached --quiet || git commit -m 'Changes by '%(user)s)"; - }; - logging.level = "info"; + nodes.machine = + { pkgs, ... }: + { + services.radicale = { + enable = true; + settings = { + auth = { + type = "htpasswd"; + htpasswd_filename = "/etc/radicale/users"; + htpasswd_encryption = "bcrypt"; }; - rights = { - principal = { - user = ".+"; - collection = "{user}"; - permissions = "RW"; - }; - calendars = { - user = ".+"; - collection = "{user}/[^/]+"; - permissions = "rw"; - }; + storage = { + inherit filesystem_folder; + hook = "git add -A && (git diff --cached --quiet || git commit -m 'Changes by '%(user)s)"; + }; + logging.level = "info"; + }; + rights = { + principal = { + user = ".+"; + collection = "{user}"; + permissions = "RW"; + }; + calendars = { + user = ".+"; + collection = "{user}/[^/]+"; + permissions = "rw"; }; }; - systemd.services.radicale.path = [ pkgs.git ]; - environment.systemPackages = [ pkgs.git ]; - systemd.tmpfiles.rules = [ "d ${filesystem_folder} 0750 radicale radicale -" ]; - # WARNING: DON'T DO THIS IN PRODUCTION! - # This puts unhashed secrets directly into the Nix store for ease of testing. - environment.etc."radicale/users".source = pkgs.runCommand "htpasswd" { } '' - ${pkgs.apacheHttpd}/bin/htpasswd -bcB "$out" ${user} ${password} - ''; }; - testScript = '' - machine.wait_for_unit("radicale.service") - machine.wait_for_open_port(${port}) + systemd.services.radicale.path = [ pkgs.git ]; + environment.systemPackages = [ pkgs.git ]; + systemd.tmpfiles.rules = [ "d ${filesystem_folder} 0750 radicale radicale -" ]; + # WARNING: DON'T DO THIS IN PRODUCTION! + # This puts unhashed secrets directly into the Nix store for ease of testing. + environment.etc."radicale/users".source = pkgs.runCommand "htpasswd" { } '' + ${pkgs.apacheHttpd}/bin/htpasswd -bcB "$out" ${user} ${password} + ''; + }; + testScript = '' + machine.wait_for_unit("radicale.service") + machine.wait_for_open_port(${port}) - machine.succeed("sudo -u radicale git -C ${filesystem_folder} init") - machine.succeed( - "sudo -u radicale git -C ${filesystem_folder} config --local user.email radicale@example.com" - ) - machine.succeed( - "sudo -u radicale git -C ${filesystem_folder} config --local user.name radicale" - ) + machine.succeed("sudo -u radicale git -C ${filesystem_folder} init") + machine.succeed( + "sudo -u radicale git -C ${filesystem_folder} config --local user.email radicale@example.com" + ) + machine.succeed( + "sudo -u radicale git -C ${filesystem_folder} config --local user.name radicale" + ) - with subtest("Test calendar and event creation"): - machine.succeed( - "${cli} --caldav-url http://localhost:${port}/${user} calendar create cal" - ) - machine.succeed("test -d ${filesystem_folder}/collection-root/${user}/cal") - machine.succeed('test -z "$(ls ${filesystem_folder}/collection-root/${user}/cal)"') - machine.succeed( - "${cli} --caldav-url http://localhost:${port}/${user}/cal calendar add 2021-04-23 testevent" - ) - machine.succeed('test -n "$(ls ${filesystem_folder}/collection-root/${user}/cal)"') - (status, stdout) = machine.execute( - "sudo -u radicale git -C ${filesystem_folder} log --format=oneline | wc -l" - ) - assert status == 0, "git log failed" - assert stdout == "3\n", "there should be exactly 3 commits" + with subtest("Test calendar and event creation"): + machine.succeed( + "${cli} --caldav-url http://localhost:${port}/${user} calendar create cal" + ) + machine.succeed("test -d ${filesystem_folder}/collection-root/${user}/cal") + machine.succeed('test -z "$(ls ${filesystem_folder}/collection-root/${user}/cal)"') + machine.succeed( + "${cli} --caldav-url http://localhost:${port}/${user}/cal calendar add 2021-04-23 testevent" + ) + machine.succeed('test -n "$(ls ${filesystem_folder}/collection-root/${user}/cal)"') + (status, stdout) = machine.execute( + "sudo -u radicale git -C ${filesystem_folder} log --format=oneline | wc -l" + ) + assert status == 0, "git log failed" + assert stdout == "3\n", "there should be exactly 3 commits" - with subtest("Test rights file"): - machine.fail( - "${cli} --caldav-url http://localhost:${port}/${user} calendar create sub/cal" - ) - machine.fail( - "${cli} --caldav-url http://localhost:${port}/otheruser calendar create cal" - ) + with subtest("Test rights file"): + machine.fail( + "${cli} --caldav-url http://localhost:${port}/${user} calendar create sub/cal" + ) + machine.fail( + "${cli} --caldav-url http://localhost:${port}/otheruser calendar create cal" + ) - with subtest("Test web interface"): - machine.succeed("curl --fail http://${user}:${password}@localhost:${port}/.web/") + with subtest("Test web interface"): + machine.succeed("curl --fail http://${user}:${password}@localhost:${port}/.web/") - with subtest("Test security"): - output = machine.succeed("systemd-analyze security radicale.service") - machine.log(output) - assert output[-9:-1] == "SAFE :-}" - ''; - } -) + with subtest("Test security"): + output = machine.succeed("systemd-analyze security radicale.service") + machine.log(output) + assert output[-9:-1] == "SAFE :-}" + ''; +} diff --git a/nixos/tests/rasdaemon.nix b/nixos/tests/rasdaemon.nix index c7b147a1ca52..cc7b3fc3c670 100644 --- a/nixos/tests/rasdaemon.nix +++ b/nixos/tests/rasdaemon.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "rasdaemon"; - meta = with pkgs.lib.maintainers; { - maintainers = [ evils ]; +{ pkgs, ... }: +{ + name = "rasdaemon"; + meta = with pkgs.lib.maintainers; { + maintainers = [ evils ]; + }; + + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + hardware.rasdaemon = { + enable = true; + # should be enabled by default, just making sure + record = true; + # nonsense label + labels = '' + vendor: none + product: none + model: none + DIMM_0: 0.0.0; + ''; + }; }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - hardware.rasdaemon = { - enable = true; - # should be enabled by default, just making sure - record = true; - # nonsense label - labels = '' - vendor: none - product: none - model: none - DIMM_0: 0.0.0; - ''; - }; - }; - - testScript = '' - start_all() - machine.wait_for_unit("multi-user.target") - # confirm rasdaemon is running and has a valid database - # some disk errors detected in qemu for some reason ¯\_(ツ)_/¯ - machine.wait_until_succeeds("ras-mc-ctl --errors | tee /dev/stderr | grep -q 'No .* errors.'") - # confirm the supplied labels text made it into the system - machine.succeed("grep -q 'vendor: none' /etc/ras/dimm_labels.d/labels >&2") - machine.shutdown() - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("multi-user.target") + # confirm rasdaemon is running and has a valid database + # some disk errors detected in qemu for some reason ¯\_(ツ)_/¯ + machine.wait_until_succeeds("ras-mc-ctl --errors | tee /dev/stderr | grep -q 'No .* errors.'") + # confirm the supplied labels text made it into the system + machine.succeed("grep -q 'vendor: none' /etc/ras/dimm_labels.d/labels >&2") + machine.shutdown() + ''; +} diff --git a/nixos/tests/readarr.nix b/nixos/tests/readarr.nix index d6d5fdd586b5..6a0e25742632 100644 --- a/nixos/tests/readarr.nix +++ b/nixos/tests/readarr.nix @@ -1,19 +1,17 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "readarr"; - meta.maintainers = with lib.maintainers; [ jocelynthode ]; +{ lib, ... }: +{ + name = "readarr"; + meta.maintainers = with lib.maintainers; [ jocelynthode ]; - nodes.machine = - { pkgs, ... }: - { - services.readarr.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.readarr.enable = true; + }; - testScript = '' - machine.wait_for_unit("readarr.service") - machine.wait_for_open_port(8787) - machine.succeed("curl --fail http://localhost:8787/") - ''; - } -) + testScript = '' + machine.wait_for_unit("readarr.service") + machine.wait_for_open_port(8787) + machine.succeed("curl --fail http://localhost:8787/") + ''; +} diff --git a/nixos/tests/realm.nix b/nixos/tests/realm.nix index 826dab61dbf2..353e595fbd3d 100644 --- a/nixos/tests/realm.nix +++ b/nixos/tests/realm.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "realm"; +{ lib, pkgs, ... }: +{ + name = "realm"; - meta = { - maintainers = with lib.maintainers; [ ocfox ]; - }; + meta = { + maintainers = with lib.maintainers; [ ocfox ]; + }; - nodes.machine = - { pkgs, ... }: - { - services.nginx = { - enable = true; - statusPage = true; - }; - # realm need DNS resolv server to run or use config.dns.nameserver - services.resolved.enable = true; + nodes.machine = + { pkgs, ... }: + { + services.nginx = { + enable = true; + statusPage = true; + }; + # realm need DNS resolv server to run or use config.dns.nameserver + services.resolved.enable = true; - services.realm = { - enable = true; - config = { - endpoints = [ - { - listen = "0.0.0.0:1000"; - remote = "127.0.0.1:80"; - } - ]; - }; + services.realm = { + enable = true; + config = { + endpoints = [ + { + listen = "0.0.0.0:1000"; + remote = "127.0.0.1:80"; + } + ]; }; }; + }; - testScript = '' - machine.wait_for_unit("nginx.service") - machine.wait_for_unit("realm.service") + testScript = '' + machine.wait_for_unit("nginx.service") + machine.wait_for_unit("realm.service") - machine.wait_for_open_port(80) - machine.wait_for_open_port(1000) + machine.wait_for_open_port(80) + machine.wait_for_open_port(1000) - machine.succeed("curl --fail http://localhost:1000/") - ''; + machine.succeed("curl --fail http://localhost:1000/") + ''; - } -) +} diff --git a/nixos/tests/redlib.nix b/nixos/tests/redlib.nix index 55bfd6be4720..e03df475fe3d 100644 --- a/nixos/tests/redlib.nix +++ b/nixos/tests/redlib.nix @@ -1,31 +1,29 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "redlib"; - meta.maintainers = with lib.maintainers; [ - bpeetz - Guanran928 - ]; +{ lib, pkgs, ... }: +{ + name = "redlib"; + meta.maintainers = with lib.maintainers; [ + bpeetz + Guanran928 + ]; - nodes.machine = { - services.redlib = { - package = pkgs.redlib; - enable = true; - # Test CAP_NET_BIND_SERVICE - port = 80; + nodes.machine = { + services.redlib = { + package = pkgs.redlib; + enable = true; + # Test CAP_NET_BIND_SERVICE + port = 80; - settings = { - REDLIB_DEFAULT_USE_HLS = true; - }; + settings = { + REDLIB_DEFAULT_USE_HLS = true; }; }; + }; - testScript = '' - machine.wait_for_unit("redlib.service") - machine.wait_for_open_port(80) - # Query a page that does not require Internet access - machine.succeed("curl --fail http://localhost:80/settings") - machine.succeed("curl --fail http://localhost:80/info | grep 'Use HLSon'") - ''; - } -) + testScript = '' + machine.wait_for_unit("redlib.service") + machine.wait_for_open_port(80) + # Query a page that does not require Internet access + machine.succeed("curl --fail http://localhost:80/settings") + machine.succeed("curl --fail http://localhost:80/info | grep 'Use HLSon'") + ''; +} diff --git a/nixos/tests/renovate.nix b/nixos/tests/renovate.nix index 52bcc867173c..d6adb3f76801 100644 --- a/nixos/tests/renovate.nix +++ b/nixos/tests/renovate.nix @@ -1,74 +1,72 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "renovate"; - meta.maintainers = with pkgs.lib.maintainers; [ - marie - natsukium - ]; +{ pkgs, ... }: +{ + name = "renovate"; + meta.maintainers = with pkgs.lib.maintainers; [ + marie + natsukium + ]; - nodes.machine = - { config, ... }: - { - services.renovate = { - enable = true; - settings = { - platform = "gitea"; - endpoint = "http://localhost:3000"; - autodiscover = true; - gitAuthor = "Renovate "; - }; - credentials = { - RENOVATE_TOKEN = "/etc/renovate-token"; - }; + nodes.machine = + { config, ... }: + { + services.renovate = { + enable = true; + settings = { + platform = "gitea"; + endpoint = "http://localhost:3000"; + autodiscover = true; + gitAuthor = "Renovate "; }; - environment.systemPackages = [ - config.services.forgejo.package - pkgs.tea - pkgs.git - ]; - services.forgejo = { - enable = true; - settings.server.HTTP_PORT = 3000; + credentials = { + RENOVATE_TOKEN = "/etc/renovate-token"; }; - # Uncomment the next line to lint service scripts (Note: breaks VM startup; see #373166) - #systemd.enableStrictShellChecks = true; }; + environment.systemPackages = [ + config.services.forgejo.package + pkgs.tea + pkgs.git + ]; + services.forgejo = { + enable = true; + settings.server.HTTP_PORT = 3000; + }; + # Uncomment the next line to lint service scripts (Note: breaks VM startup; see #373166) + #systemd.enableStrictShellChecks = true; + }; - testScript = '' - def gitea(command): - return machine.succeed(f"cd /var/lib/forgejo && sudo --user=forgejo GITEA_WORK_DIR=/var/lib/forgejo GITEA_CUSTOM=/var/lib/forgejo/custom gitea {command}") + testScript = '' + def gitea(command): + return machine.succeed(f"cd /var/lib/forgejo && sudo --user=forgejo GITEA_WORK_DIR=/var/lib/forgejo GITEA_CUSTOM=/var/lib/forgejo/custom gitea {command}") - machine.wait_for_unit("forgejo.service") - machine.wait_for_open_port(3000) + machine.wait_for_unit("forgejo.service") + machine.wait_for_open_port(3000) - machine.systemctl("stop forgejo.service") + machine.systemctl("stop forgejo.service") - gitea("admin user create --username meow --email meow@example.com --password meow") + gitea("admin user create --username meow --email meow@example.com --password meow") - machine.systemctl("start forgejo.service") - machine.wait_for_unit("forgejo.service") - machine.wait_for_open_port(3000) + machine.systemctl("start forgejo.service") + machine.wait_for_unit("forgejo.service") + machine.wait_for_open_port(3000) - accessToken = gitea("admin user generate-access-token --raw --username meow --scopes all | tr -d '\n'") + accessToken = gitea("admin user generate-access-token --raw --username meow --scopes all | tr -d '\n'") - machine.succeed(f"tea login add --name default --user meow --token '{accessToken}' --password meow --url http://localhost:3000") - machine.succeed("tea repo create --name kitty --init") - machine.succeed("git config --global user.name Meow") - machine.succeed("git config --global user.email meow@example.com") - machine.succeed(f"git clone http://meow:{accessToken}@localhost:3000/meow/kitty.git /tmp/kitty") - machine.succeed("echo '{ \"name\": \"meow\", \"version\": \"0.1.0\" }' > /tmp/kitty/package.json") - machine.succeed("git -C /tmp/kitty add /tmp/kitty/package.json") - machine.succeed("git -C /tmp/kitty commit -m 'add package.json'") - machine.succeed("git -C /tmp/kitty push origin") + machine.succeed(f"tea login add --name default --user meow --token '{accessToken}' --password meow --url http://localhost:3000") + machine.succeed("tea repo create --name kitty --init") + machine.succeed("git config --global user.name Meow") + machine.succeed("git config --global user.email meow@example.com") + machine.succeed(f"git clone http://meow:{accessToken}@localhost:3000/meow/kitty.git /tmp/kitty") + machine.succeed("echo '{ \"name\": \"meow\", \"version\": \"0.1.0\" }' > /tmp/kitty/package.json") + machine.succeed("git -C /tmp/kitty add /tmp/kitty/package.json") + machine.succeed("git -C /tmp/kitty commit -m 'add package.json'") + machine.succeed("git -C /tmp/kitty push origin") - machine.succeed(f"echo '{accessToken}' > /etc/renovate-token") - machine.systemctl("start --wait renovate.service") + machine.succeed(f"echo '{accessToken}' > /etc/renovate-token") + machine.systemctl("start --wait renovate.service") - machine.succeed("tea pulls list --repo meow/kitty | grep 'Configure Renovate'") - machine.succeed("tea pulls merge --repo meow/kitty 1") + machine.succeed("tea pulls list --repo meow/kitty | grep 'Configure Renovate'") + machine.succeed("tea pulls merge --repo meow/kitty 1") - machine.systemctl("start --wait renovate.service") - ''; - } -) + machine.systemctl("start --wait renovate.service") + ''; +} diff --git a/nixos/tests/restart-by-activation-script.nix b/nixos/tests/restart-by-activation-script.nix index cf89aa341ada..6d26d9843aff 100644 --- a/nixos/tests/restart-by-activation-script.nix +++ b/nixos/tests/restart-by-activation-script.nix @@ -1,81 +1,79 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "restart-by-activation-script"; - meta = with pkgs.lib.maintainers; { - maintainers = [ das_j ]; - }; +{ pkgs, ... }: +{ + name = "restart-by-activation-script"; + meta = with pkgs.lib.maintainers; { + maintainers = [ das_j ]; + }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; - system.switch.enable = true; + system.switch.enable = true; - systemd.services.restart-me = { - wantedBy = [ "multi-user.target" ]; - serviceConfig = { - Type = "oneshot"; - RemainAfterExit = true; - ExecStart = "${pkgs.coreutils}/bin/true"; - }; - }; - - systemd.services.reload-me = { - wantedBy = [ "multi-user.target" ]; - serviceConfig = rec { - Type = "oneshot"; - RemainAfterExit = true; - ExecStart = "${pkgs.coreutils}/bin/true"; - ExecReload = ExecStart; - }; - }; - - system.activationScripts.test = { - supportsDryActivation = true; - text = '' - if [ -e /test-the-activation-script ]; then - if [ "$NIXOS_ACTION" != dry-activate ]; then - touch /activation-was-run - echo restart-me.service > /run/nixos/activation-restart-list - echo reload-me.service > /run/nixos/activation-reload-list - else - echo restart-me.service > /run/nixos/dry-activation-restart-list - echo reload-me.service > /run/nixos/dry-activation-reload-list - fi - fi - ''; + systemd.services.restart-me = { + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + ExecStart = "${pkgs.coreutils}/bin/true"; }; }; - testScript = # python - '' - machine.wait_for_unit("multi-user.target") + systemd.services.reload-me = { + wantedBy = [ "multi-user.target" ]; + serviceConfig = rec { + Type = "oneshot"; + RemainAfterExit = true; + ExecStart = "${pkgs.coreutils}/bin/true"; + ExecReload = ExecStart; + }; + }; - with subtest("nothing happens when the activation script does nothing"): - out = machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate 2>&1") - assert 'restart' not in out - assert 'reload' not in out - out = machine.succeed("/run/current-system/bin/switch-to-configuration test") - assert 'restart' not in out - assert 'reload' not in out + system.activationScripts.test = { + supportsDryActivation = true; + text = '' + if [ -e /test-the-activation-script ]; then + if [ "$NIXOS_ACTION" != dry-activate ]; then + touch /activation-was-run + echo restart-me.service > /run/nixos/activation-restart-list + echo reload-me.service > /run/nixos/activation-reload-list + else + echo restart-me.service > /run/nixos/dry-activation-restart-list + echo reload-me.service > /run/nixos/dry-activation-reload-list + fi + fi + ''; + }; + }; - machine.succeed("touch /test-the-activation-script") + testScript = # python + '' + machine.wait_for_unit("multi-user.target") - with subtest("dry activation"): - out = machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate 2>&1") - assert 'would restart the following units: restart-me.service' in out - assert 'would reload the following units: reload-me.service' in out - machine.fail("test -f /run/nixos/dry-activation-restart-list") - machine.fail("test -f /run/nixos/dry-activation-reload-list") + with subtest("nothing happens when the activation script does nothing"): + out = machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate 2>&1") + assert 'restart' not in out + assert 'reload' not in out + out = machine.succeed("/run/current-system/bin/switch-to-configuration test") + assert 'restart' not in out + assert 'reload' not in out - with subtest("real activation"): - out = machine.succeed("/run/current-system/bin/switch-to-configuration test 2>&1") - assert 'restarting the following units: restart-me.service' in out - assert 'reloading the following units: reload-me.service' in out - machine.fail("test -f /run/nixos/activation-restart-list") - machine.fail("test -f /run/nixos/activation-reload-list") - ''; - } -) + machine.succeed("touch /test-the-activation-script") + + with subtest("dry activation"): + out = machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate 2>&1") + assert 'would restart the following units: restart-me.service' in out + assert 'would reload the following units: reload-me.service' in out + machine.fail("test -f /run/nixos/dry-activation-restart-list") + machine.fail("test -f /run/nixos/dry-activation-reload-list") + + with subtest("real activation"): + out = machine.succeed("/run/current-system/bin/switch-to-configuration test 2>&1") + assert 'restarting the following units: restart-me.service' in out + assert 'reloading the following units: reload-me.service' in out + machine.fail("test -f /run/nixos/activation-restart-list") + machine.fail("test -f /run/nixos/activation-reload-list") + ''; +} diff --git a/nixos/tests/restic-rest-server.nix b/nixos/tests/restic-rest-server.nix index ecca1bf83af3..9e6753d91b2a 100644 --- a/nixos/tests/restic-rest-server.nix +++ b/nixos/tests/restic-rest-server.nix @@ -1,129 +1,127 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - remoteRepository = "rest:http://restic_rest_server:8001/"; +let + remoteRepository = "rest:http://restic_rest_server:8001/"; - backupPrepareCommand = '' - touch /root/backupPrepareCommand - test ! -e /root/backupCleanupCommand + backupPrepareCommand = '' + touch /root/backupPrepareCommand + test ! -e /root/backupCleanupCommand + ''; + + backupCleanupCommand = '' + rm /root/backupPrepareCommand + touch /root/backupCleanupCommand + ''; + + testDir = pkgs.stdenvNoCC.mkDerivation { + name = "test-files-to-backup"; + unpackPhase = "true"; + installPhase = '' + mkdir $out + echo some_file > $out/some_file + echo some_other_file > $out/some_other_file + mkdir $out/a_dir + echo a_file > $out/a_dir/a_file ''; + }; - backupCleanupCommand = '' - rm /root/backupPrepareCommand - touch /root/backupCleanupCommand - ''; + passwordFile = "${pkgs.writeText "password" "correcthorsebatterystaple"}"; + paths = [ "/opt" ]; + exclude = [ "/opt/excluded_file_*" ]; + pruneOpts = [ + "--keep-daily 2" + "--keep-weekly 1" + "--keep-monthly 1" + "--keep-yearly 99" + ]; +in +{ + name = "restic-rest-server"; - testDir = pkgs.stdenvNoCC.mkDerivation { - name = "test-files-to-backup"; - unpackPhase = "true"; - installPhase = '' - mkdir $out - echo some_file > $out/some_file - echo some_other_file > $out/some_other_file - mkdir $out/a_dir - echo a_file > $out/a_dir/a_file - ''; - }; - - passwordFile = "${pkgs.writeText "password" "correcthorsebatterystaple"}"; - paths = [ "/opt" ]; - exclude = [ "/opt/excluded_file_*" ]; - pruneOpts = [ - "--keep-daily 2" - "--keep-weekly 1" - "--keep-monthly 1" - "--keep-yearly 99" - ]; - in - { - name = "restic-rest-server"; - - nodes = { - restic_rest_server = { - services.restic.server = { - enable = true; - extraFlags = [ "--no-auth" ]; - listenAddress = "8001"; - }; - networking.firewall.allowedTCPPorts = [ 8001 ]; + nodes = { + restic_rest_server = { + services.restic.server = { + enable = true; + extraFlags = [ "--no-auth" ]; + listenAddress = "8001"; }; - server = { - services.restic.backups = { - remotebackup = { - inherit - passwordFile - paths - exclude - pruneOpts - backupPrepareCommand - backupCleanupCommand - ; - repository = remoteRepository; - initialize = true; - timerConfig = null; # has no effect here, just checking that it doesn't break the service - }; - remoteprune = { - inherit passwordFile; - repository = remoteRepository; - pruneOpts = [ "--keep-last 1" ]; - }; + networking.firewall.allowedTCPPorts = [ 8001 ]; + }; + server = { + services.restic.backups = { + remotebackup = { + inherit + passwordFile + paths + exclude + pruneOpts + backupPrepareCommand + backupCleanupCommand + ; + repository = remoteRepository; + initialize = true; + timerConfig = null; # has no effect here, just checking that it doesn't break the service + }; + remoteprune = { + inherit passwordFile; + repository = remoteRepository; + pruneOpts = [ "--keep-last 1" ]; }; }; }; + }; - testScript = '' - restic_rest_server.start() - server.start() - restic_rest_server.wait_for_unit("restic-rest-server.socket") - restic_rest_server.wait_for_open_port(8001) - server.wait_for_unit("dbus.socket") - server.fail( - "restic-remotebackup snapshots", - ) - server.succeed( - # set up - "cp -rT ${testDir} /opt", - "touch /opt/excluded_file_1 /opt/excluded_file_2", + testScript = '' + restic_rest_server.start() + server.start() + restic_rest_server.wait_for_unit("restic-rest-server.socket") + restic_rest_server.wait_for_open_port(8001) + server.wait_for_unit("dbus.socket") + server.fail( + "restic-remotebackup snapshots", + ) + server.succeed( + # set up + "cp -rT ${testDir} /opt", + "touch /opt/excluded_file_1 /opt/excluded_file_2", - # test that remotebackup runs custom commands and produces a snapshot - "timedatectl set-time '2016-12-13 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + # test that remotebackup runs custom commands and produces a snapshot + "timedatectl set-time '2016-12-13 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - # test that restoring that snapshot produces the same directory - "mkdir /tmp/restore-1", - "restic-remotebackup restore latest -t /tmp/restore-1", - "diff -ru ${testDir} /tmp/restore-1/opt", + # test that restoring that snapshot produces the same directory + "mkdir /tmp/restore-1", + "restic-remotebackup restore latest -t /tmp/restore-1", + "diff -ru ${testDir} /tmp/restore-1/opt", - # test that we can create four snapshots in remotebackup and rclonebackup - "timedatectl set-time '2017-12-13 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", + # test that we can create four snapshots in remotebackup and rclonebackup + "timedatectl set-time '2017-12-13 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", - "timedatectl set-time '2018-12-13 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", + "timedatectl set-time '2018-12-13 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", - "timedatectl set-time '2018-12-14 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", + "timedatectl set-time '2018-12-14 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", - "timedatectl set-time '2018-12-15 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", + "timedatectl set-time '2018-12-15 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", - "timedatectl set-time '2018-12-16 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", + "timedatectl set-time '2018-12-16 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", - 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"', + 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"', - # test that remoteprune brings us back to 1 snapshot in remotebackup - "systemctl start restic-backups-remoteprune.service", - 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - ) - ''; - } -) + # test that remoteprune brings us back to 1 snapshot in remotebackup + "systemctl start restic-backups-remoteprune.service", + 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + ) + ''; +} diff --git a/nixos/tests/restic.nix b/nixos/tests/restic.nix index 54d2a88c4545..44e405fe4852 100644 --- a/nixos/tests/restic.nix +++ b/nixos/tests/restic.nix @@ -1,249 +1,247 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - remoteRepository = "/root/restic-backup"; - remoteFromFileRepository = "/root/restic-backup-from-file"; - remoteInhibitTestRepository = "/root/restic-backup-inhibit-test"; - remoteNoInitRepository = "/root/restic-backup-no-init"; - rcloneRepository = "rclone:local:/root/restic-rclone-backup"; +let + remoteRepository = "/root/restic-backup"; + remoteFromFileRepository = "/root/restic-backup-from-file"; + remoteInhibitTestRepository = "/root/restic-backup-inhibit-test"; + remoteNoInitRepository = "/root/restic-backup-no-init"; + rcloneRepository = "rclone:local:/root/restic-rclone-backup"; - backupPrepareCommand = '' - touch /root/backupPrepareCommand - test ! -e /root/backupCleanupCommand + backupPrepareCommand = '' + touch /root/backupPrepareCommand + test ! -e /root/backupCleanupCommand + ''; + + backupCleanupCommand = '' + rm /root/backupPrepareCommand + touch /root/backupCleanupCommand + ''; + + testDir = pkgs.stdenvNoCC.mkDerivation { + name = "test-files-to-backup"; + unpackPhase = "true"; + installPhase = '' + mkdir $out + echo some_file > $out/some_file + echo some_other_file > $out/some_other_file + mkdir $out/a_dir + echo a_file > $out/a_dir/a_file + echo a_file_2 > $out/a_dir/a_file_2 ''; + }; - backupCleanupCommand = '' - rm /root/backupPrepareCommand - touch /root/backupCleanupCommand - ''; + passwordFile = "${pkgs.writeText "password" "correcthorsebatterystaple"}"; + paths = [ "/opt" ]; + exclude = [ "/opt/excluded_file_*" ]; + pruneOpts = [ + "--keep-daily 2" + "--keep-weekly 1" + "--keep-monthly 1" + "--keep-yearly 99" + ]; +in +{ + name = "restic"; - testDir = pkgs.stdenvNoCC.mkDerivation { - name = "test-files-to-backup"; - unpackPhase = "true"; - installPhase = '' - mkdir $out - echo some_file > $out/some_file - echo some_other_file > $out/some_other_file - mkdir $out/a_dir - echo a_file > $out/a_dir/a_file - echo a_file_2 > $out/a_dir/a_file_2 - ''; - }; - - passwordFile = "${pkgs.writeText "password" "correcthorsebatterystaple"}"; - paths = [ "/opt" ]; - exclude = [ "/opt/excluded_file_*" ]; - pruneOpts = [ - "--keep-daily 2" - "--keep-weekly 1" - "--keep-monthly 1" - "--keep-yearly 99" + meta = with pkgs.lib.maintainers; { + maintainers = [ + bbigras + i077 ]; - in - { - name = "restic"; + }; - meta = with pkgs.lib.maintainers; { - maintainers = [ - bbigras - i077 - ]; - }; - - nodes = { - server = - { pkgs, ... }: - { - services.restic.backups = { - remotebackup = { - inherit - passwordFile - paths - exclude - pruneOpts - backupPrepareCommand - backupCleanupCommand - ; - repository = remoteRepository; - initialize = true; - timerConfig = null; # has no effect here, just checking that it doesn't break the service - }; - remote-from-file-backup = { - inherit passwordFile exclude pruneOpts; - initialize = true; - repositoryFile = pkgs.writeText "repositoryFile" remoteFromFileRepository; - paths = [ - "/opt/a_dir/a_file" - "/opt/a_dir/a_file_2" - ]; - dynamicFilesFrom = '' - find /opt -mindepth 1 -maxdepth 1 ! -name a_dir # all files in /opt except for a_dir - ''; - }; - inhibit-test = { - inherit - passwordFile - paths - exclude - pruneOpts - ; - repository = remoteInhibitTestRepository; - initialize = true; - inhibitsSleep = true; - }; - remote-noinit-backup = { - inherit - passwordFile - exclude - pruneOpts - paths - ; - initialize = false; - repository = remoteNoInitRepository; - }; - rclonebackup = { - inherit - passwordFile - paths - exclude - pruneOpts - ; - initialize = true; - repository = rcloneRepository; - rcloneConfig = { - type = "local"; - one_file_system = true; - }; - - # This gets overridden by rcloneConfig.type - rcloneConfigFile = pkgs.writeText "rclone.conf" '' - [local] - type=ftp - ''; - }; - remoteprune = { - inherit passwordFile; - repository = remoteRepository; - pruneOpts = [ "--keep-last 1" ]; - }; - custompackage = { - inherit passwordFile paths; - repository = "some-fake-repository"; - package = pkgs.writeShellScriptBin "restic" '' - echo "$@" >> /root/fake-restic.log; - ''; - - pruneOpts = [ "--keep-last 1" ]; - checkOpts = [ "--some-check-option" ]; - }; + nodes = { + server = + { pkgs, ... }: + { + services.restic.backups = { + remotebackup = { + inherit + passwordFile + paths + exclude + pruneOpts + backupPrepareCommand + backupCleanupCommand + ; + repository = remoteRepository; + initialize = true; + timerConfig = null; # has no effect here, just checking that it doesn't break the service }; + remote-from-file-backup = { + inherit passwordFile exclude pruneOpts; + initialize = true; + repositoryFile = pkgs.writeText "repositoryFile" remoteFromFileRepository; + paths = [ + "/opt/a_dir/a_file" + "/opt/a_dir/a_file_2" + ]; + dynamicFilesFrom = '' + find /opt -mindepth 1 -maxdepth 1 ! -name a_dir # all files in /opt except for a_dir + ''; + }; + inhibit-test = { + inherit + passwordFile + paths + exclude + pruneOpts + ; + repository = remoteInhibitTestRepository; + initialize = true; + inhibitsSleep = true; + }; + remote-noinit-backup = { + inherit + passwordFile + exclude + pruneOpts + paths + ; + initialize = false; + repository = remoteNoInitRepository; + }; + rclonebackup = { + inherit + passwordFile + paths + exclude + pruneOpts + ; + initialize = true; + repository = rcloneRepository; + rcloneConfig = { + type = "local"; + one_file_system = true; + }; - environment.sessionVariables.RCLONE_CONFIG_LOCAL_TYPE = "local"; + # This gets overridden by rcloneConfig.type + rcloneConfigFile = pkgs.writeText "rclone.conf" '' + [local] + type=ftp + ''; + }; + remoteprune = { + inherit passwordFile; + repository = remoteRepository; + pruneOpts = [ "--keep-last 1" ]; + }; + custompackage = { + inherit passwordFile paths; + repository = "some-fake-repository"; + package = pkgs.writeShellScriptBin "restic" '' + echo "$@" >> /root/fake-restic.log; + ''; + + pruneOpts = [ "--keep-last 1" ]; + checkOpts = [ "--some-check-option" ]; + }; }; - }; - testScript = '' - server.start() - server.wait_for_unit("dbus.socket") - server.fail( - "restic-remotebackup snapshots", - 'restic-remote-from-file-backup snapshots"', - "restic-rclonebackup snapshots", - "grep 'backup.* /opt' /root/fake-restic.log", - ) - server.succeed( - # set up - "cp -rT ${testDir} /opt", - "touch /opt/excluded_file_1 /opt/excluded_file_2", - "mkdir -p /root/restic-rclone-backup", - ) + environment.sessionVariables.RCLONE_CONFIG_LOCAL_TYPE = "local"; + }; + }; - server.fail( - # test that noinit backup in fact does not initialize the repository - # and thus fails without a pre-initialized repository - "systemctl start restic-backups-remote-noinit-backup.service", - ) + testScript = '' + server.start() + server.wait_for_unit("dbus.socket") + server.fail( + "restic-remotebackup snapshots", + 'restic-remote-from-file-backup snapshots"', + "restic-rclonebackup snapshots", + "grep 'backup.* /opt' /root/fake-restic.log", + ) + server.succeed( + # set up + "cp -rT ${testDir} /opt", + "touch /opt/excluded_file_1 /opt/excluded_file_2", + "mkdir -p /root/restic-rclone-backup", + ) - server.succeed( - # test that remotebackup runs custom commands and produces a snapshot - "timedatectl set-time '2016-12-13 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + server.fail( + # test that noinit backup in fact does not initialize the repository + # and thus fails without a pre-initialized repository + "systemctl start restic-backups-remote-noinit-backup.service", + ) - # test that restoring that snapshot produces the same directory - "mkdir /tmp/restore-1", - "restic-remotebackup restore latest -t /tmp/restore-1", - "diff -ru ${testDir} /tmp/restore-1/opt", + server.succeed( + # test that remotebackup runs custom commands and produces a snapshot + "timedatectl set-time '2016-12-13 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - # test that remote-from-file-backup produces a snapshot - "systemctl start restic-backups-remote-from-file-backup.service", - 'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - "mkdir /tmp/restore-2", - "restic-remote-from-file-backup restore latest -t /tmp/restore-2", - "diff -ru ${testDir} /tmp/restore-2/opt", + # test that restoring that snapshot produces the same directory + "mkdir /tmp/restore-1", + "restic-remotebackup restore latest -t /tmp/restore-1", + "diff -ru ${testDir} /tmp/restore-1/opt", - # test that remote-noinit-backup produces a snapshot once initialized - "restic-remote-noinit-backup init", - "systemctl start restic-backups-remote-noinit-backup.service", - 'restic-remote-noinit-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + # test that remote-from-file-backup produces a snapshot + "systemctl start restic-backups-remote-from-file-backup.service", + 'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + "mkdir /tmp/restore-2", + "restic-remote-from-file-backup restore latest -t /tmp/restore-2", + "diff -ru ${testDir} /tmp/restore-2/opt", - # test that restoring that snapshot produces the same directory - "mkdir /tmp/restore-3", - "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-3", - "diff -ru ${testDir} /tmp/restore-3/opt", + # test that remote-noinit-backup produces a snapshot once initialized + "restic-remote-noinit-backup init", + "systemctl start restic-backups-remote-noinit-backup.service", + 'restic-remote-noinit-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - # test that rclonebackup produces a snapshot - "systemctl start restic-backups-rclonebackup.service", - 'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + # test that restoring that snapshot produces the same directory + "mkdir /tmp/restore-3", + "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-3", + "diff -ru ${testDir} /tmp/restore-3/opt", - # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines - "systemctl start restic-backups-custompackage.service", - "grep 'backup' /root/fake-restic.log", - "grep 'check.* --some-check-option' /root/fake-restic.log", + # test that rclonebackup produces a snapshot + "systemctl start restic-backups-rclonebackup.service", + 'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - # test that we can create four snapshots in remotebackup and rclonebackup - "timedatectl set-time '2017-12-13 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - "systemctl start restic-backups-rclonebackup.service", + # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines + "systemctl start restic-backups-custompackage.service", + "grep 'backup' /root/fake-restic.log", + "grep 'check.* --some-check-option' /root/fake-restic.log", - "timedatectl set-time '2018-12-13 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - "systemctl start restic-backups-rclonebackup.service", + # test that we can create four snapshots in remotebackup and rclonebackup + "timedatectl set-time '2017-12-13 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + "systemctl start restic-backups-rclonebackup.service", - "timedatectl set-time '2018-12-14 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - "systemctl start restic-backups-rclonebackup.service", + "timedatectl set-time '2018-12-13 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + "systemctl start restic-backups-rclonebackup.service", - "timedatectl set-time '2018-12-15 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - "systemctl start restic-backups-rclonebackup.service", + "timedatectl set-time '2018-12-14 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + "systemctl start restic-backups-rclonebackup.service", - "timedatectl set-time '2018-12-16 13:45'", - "systemctl start restic-backups-remotebackup.service", - "rm /root/backupCleanupCommand", - "systemctl start restic-backups-rclonebackup.service", + "timedatectl set-time '2018-12-15 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + "systemctl start restic-backups-rclonebackup.service", - 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"', - 'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"', + "timedatectl set-time '2018-12-16 13:45'", + "systemctl start restic-backups-remotebackup.service", + "rm /root/backupCleanupCommand", + "systemctl start restic-backups-rclonebackup.service", - # test that remoteprune brings us back to 1 snapshot in remotebackup - "systemctl start restic-backups-remoteprune.service", - 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', + 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"', + 'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"', - ) + # test that remoteprune brings us back to 1 snapshot in remotebackup + "systemctl start restic-backups-remoteprune.service", + 'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"', - # test that the inhibit option is working - server.systemctl("start --no-block restic-backups-inhibit-test.service") - server.wait_until_succeeds( - "systemd-inhibit --no-legend --no-pager | grep -q restic", - 5 - ) - ''; - } -) + ) + + # test that the inhibit option is working + server.systemctl("start --no-block restic-backups-inhibit-test.service") + server.wait_until_succeeds( + "systemd-inhibit --no-legend --no-pager | grep -q restic", + 5 + ) + ''; +} diff --git a/nixos/tests/retroarch.nix b/nixos/tests/retroarch.nix index ac03cbffefea..9f700ed290ef 100644 --- a/nixos/tests/retroarch.nix +++ b/nixos/tests/retroarch.nix @@ -1,56 +1,54 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "retroarch"; - meta = with pkgs.lib; { - maintainers = teams.libretro.members ++ [ maintainers.j0hax ]; - }; +{ + name = "retroarch"; + meta = with pkgs.lib; { + maintainers = teams.libretro.members ++ [ maintainers.j0hax ]; + }; - nodes.machine = - { ... }: + nodes.machine = + { ... }: - { - imports = [ ./common/user-account.nix ]; - services.xserver.enable = true; - services.xserver.desktopManager.retroarch = { + { + imports = [ ./common/user-account.nix ]; + services.xserver.enable = true; + services.xserver.desktopManager.retroarch = { + enable = true; + package = pkgs.retroarch-bare; + }; + services.xserver.displayManager = { + sddm.enable = true; + defaultSession = "RetroArch"; + autoLogin = { enable = true; - package = pkgs.retroarch-bare; - }; - services.xserver.displayManager = { - sddm.enable = true; - defaultSession = "RetroArch"; - autoLogin = { - enable = true; - user = "alice"; - }; + user = "alice"; }; }; + }; - testScript = - { nodes, ... }: - let - user = nodes.machine.config.users.users.alice; - xdo = "${pkgs.xdotool}/bin/xdotool"; - in - '' - with subtest("Wait for login"): - start_all() - machine.wait_for_file("/tmp/xauth_*") - machine.succeed("xauth merge /tmp/xauth_*") + testScript = + { nodes, ... }: + let + user = nodes.machine.config.users.users.alice; + xdo = "${pkgs.xdotool}/bin/xdotool"; + in + '' + with subtest("Wait for login"): + start_all() + machine.wait_for_file("/tmp/xauth_*") + machine.succeed("xauth merge /tmp/xauth_*") - with subtest("Check RetroArch started"): - machine.wait_until_succeeds("pgrep retroarch") - machine.wait_for_window("^RetroArch") + with subtest("Check RetroArch started"): + machine.wait_until_succeeds("pgrep retroarch") + machine.wait_for_window("^RetroArch") - with subtest("Check configuration created"): - machine.wait_for_file("${user.home}/.config/retroarch/retroarch.cfg") + with subtest("Check configuration created"): + machine.wait_for_file("${user.home}/.config/retroarch/retroarch.cfg") - with subtest("Wait to get a screenshot"): - machine.execute( - "${xdo} key Alt+F1 sleep 10" - ) - machine.screenshot("screen") - ''; - } -) + with subtest("Wait to get a screenshot"): + machine.execute( + "${xdo} key Alt+F1 sleep 10" + ) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/robustirc-bridge.nix b/nixos/tests/robustirc-bridge.nix index 31b4240f9836..2f6f48df757c 100644 --- a/nixos/tests/robustirc-bridge.nix +++ b/nixos/tests/robustirc-bridge.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "robustirc-bridge"; - meta = with pkgs.lib.maintainers; { - maintainers = [ hax404 ]; - }; +{ + name = "robustirc-bridge"; + meta = with pkgs.lib.maintainers; { + maintainers = [ hax404 ]; + }; - nodes = { - bridge = { - services.robustirc-bridge = { - enable = true; - extraFlags = [ - "-listen localhost:6667" - "-network example.com" - ]; - }; + nodes = { + bridge = { + services.robustirc-bridge = { + enable = true; + extraFlags = [ + "-listen localhost:6667" + "-network example.com" + ]; }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - bridge.wait_for_unit("robustirc-bridge.service") - bridge.wait_for_open_port(1080) - bridge.wait_for_open_port(6667) - ''; - } -) + bridge.wait_for_unit("robustirc-bridge.service") + bridge.wait_for_open_port(1080) + bridge.wait_for_open_port(6667) + ''; +} diff --git a/nixos/tests/rosenpass.nix b/nixos/tests/rosenpass.nix index cb0937b58971..9af5ef608bfc 100644 --- a/nixos/tests/rosenpass.nix +++ b/nixos/tests/rosenpass.nix @@ -1,223 +1,221 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - deviceName = "rp0"; +{ pkgs, ... }: +let + deviceName = "rp0"; - server = { - ip = "fe80::1"; - wg = { - public = "mQufmDFeQQuU/fIaB2hHgluhjjm1ypK4hJr1cW3WqAw="; - secret = "4N5Y1dldqrpsbaEiY8O0XBUGUFf8vkvtBtm8AoOX7Eo="; - listen = 10000; - }; + server = { + ip = "fe80::1"; + wg = { + public = "mQufmDFeQQuU/fIaB2hHgluhjjm1ypK4hJr1cW3WqAw="; + secret = "4N5Y1dldqrpsbaEiY8O0XBUGUFf8vkvtBtm8AoOX7Eo="; + listen = 10000; }; - client = { - ip = "fe80::2"; - wg = { - public = "Mb3GOlT7oS+F3JntVKiaD7SpHxLxNdtEmWz/9FMnRFU="; - secret = "uC5dfGMv7Oxf5UDfdPkj6rZiRZT2dRWp5x8IQxrNcUE="; - }; + }; + client = { + ip = "fe80::2"; + wg = { + public = "Mb3GOlT7oS+F3JntVKiaD7SpHxLxNdtEmWz/9FMnRFU="; + secret = "uC5dfGMv7Oxf5UDfdPkj6rZiRZT2dRWp5x8IQxrNcUE="; }; - in - { - name = "rosenpass"; + }; +in +{ + name = "rosenpass"; - nodes = - let - shared = - peer: - { config, modulesPath, ... }: - { - imports = [ "${modulesPath}/services/networking/rosenpass.nix" ]; + nodes = + let + shared = + peer: + { config, modulesPath, ... }: + { + imports = [ "${modulesPath}/services/networking/rosenpass.nix" ]; - boot.kernelModules = [ "wireguard" ]; + boot.kernelModules = [ "wireguard" ]; - services.rosenpass = { - enable = true; - defaultDevice = deviceName; - settings = { - verbosity = "Verbose"; - public_key = "/etc/rosenpass/pqpk"; - secret_key = "/etc/rosenpass/pqsk"; - }; - }; - - networking.firewall.allowedUDPPorts = [ 9999 ]; - - systemd.network = { - enable = true; - networks."rosenpass" = { - matchConfig.Name = deviceName; - networkConfig.IPv4Forwarding = true; - networkConfig.IPv6Forwarding = true; - address = [ "${peer.ip}/64" ]; - }; - - netdevs."10-rp0" = { - netdevConfig = { - Kind = "wireguard"; - Name = deviceName; - }; - wireguardConfig.PrivateKeyFile = "/etc/wireguard/wgsk"; - }; - }; - - environment.etc."wireguard/wgsk" = { - text = peer.wg.secret; - user = "systemd-network"; - group = "systemd-network"; + services.rosenpass = { + enable = true; + defaultDevice = deviceName; + settings = { + verbosity = "Verbose"; + public_key = "/etc/rosenpass/pqpk"; + secret_key = "/etc/rosenpass/pqsk"; }; }; - in - { - server = { - imports = [ (shared server) ]; - networking.firewall.allowedUDPPorts = [ server.wg.listen ]; + networking.firewall.allowedUDPPorts = [ 9999 ]; - systemd.network.netdevs."10-${deviceName}" = { - wireguardConfig.ListenPort = server.wg.listen; - wireguardPeers = [ - { - AllowedIPs = [ "::/0" ]; - PublicKey = client.wg.public; - } - ]; + systemd.network = { + enable = true; + networks."rosenpass" = { + matchConfig.Name = deviceName; + networkConfig.IPv4Forwarding = true; + networkConfig.IPv6Forwarding = true; + address = [ "${peer.ip}/64" ]; + }; + + netdevs."10-rp0" = { + netdevConfig = { + Kind = "wireguard"; + Name = deviceName; + }; + wireguardConfig.PrivateKeyFile = "/etc/wireguard/wgsk"; + }; }; - services.rosenpass.settings = { - listen = [ "0.0.0.0:9999" ]; - peers = [ - { - public_key = "/etc/rosenpass/peers/client/pqpk"; - peer = client.wg.public; - } - ]; + environment.etc."wireguard/wgsk" = { + text = peer.wg.secret; + user = "systemd-network"; + group = "systemd-network"; }; }; - client = { - imports = [ (shared client) ]; + in + { + server = { + imports = [ (shared server) ]; - systemd.network.netdevs."10-${deviceName}".wireguardPeers = [ + networking.firewall.allowedUDPPorts = [ server.wg.listen ]; + + systemd.network.netdevs."10-${deviceName}" = { + wireguardConfig.ListenPort = server.wg.listen; + wireguardPeers = [ { AllowedIPs = [ "::/0" ]; - PublicKey = server.wg.public; - Endpoint = "server:${builtins.toString server.wg.listen}"; + PublicKey = client.wg.public; } ]; + }; - services.rosenpass.settings.peers = [ + services.rosenpass.settings = { + listen = [ "0.0.0.0:9999" ]; + peers = [ { - public_key = "/etc/rosenpass/peers/server/pqpk"; - endpoint = "server:9999"; - peer = server.wg.public; + public_key = "/etc/rosenpass/peers/client/pqpk"; + peer = client.wg.public; } ]; }; }; + client = { + imports = [ (shared client) ]; - testScript = - { ... }: - '' - from os import system + systemd.network.netdevs."10-${deviceName}".wireguardPeers = [ + { + AllowedIPs = [ "::/0" ]; + PublicKey = server.wg.public; + Endpoint = "server:${builtins.toString server.wg.listen}"; + } + ]; - # Full path to rosenpass in the store, to avoid fiddling with `$PATH`. - rosenpass = "${pkgs.rosenpass}/bin/rosenpass" - - # Path in `/etc` where keys will be placed. - etc = "/etc/rosenpass" - - start_all() - - for machine in [server, client]: - machine.wait_for_unit("multi-user.target") - - # Gently stop Rosenpass to avoid crashes during key generation/distribution. - for machine in [server, client]: - machine.execute("systemctl stop rosenpass.service") - - for (name, machine, remote) in [("server", server, client), ("client", client, server)]: - pk, sk = f"{name}.pqpk", f"{name}.pqsk" - system(f"{rosenpass} gen-keys --force --secret-key {sk} --public-key {pk}") - machine.copy_from_host(sk, f"{etc}/pqsk") - machine.copy_from_host(pk, f"{etc}/pqpk") - remote.copy_from_host(pk, f"{etc}/peers/{name}/pqpk") - - for machine in [server, client]: - machine.execute("systemctl start rosenpass.service") - - for machine in [server, client]: - machine.wait_for_unit("rosenpass.service") - - with subtest("ping"): - client.succeed("ping -c 2 -i 0.5 ${server.ip}%${deviceName}") - - with subtest("preshared-keys"): - # Rosenpass works by setting the WireGuard preshared key at regular intervals. - # Thus, if it is not active, then no key will be set, and the output of `wg show` will contain "none". - # Otherwise, if it is active, then the key will be set and "none" will not be found in the output of `wg show`. - for machine in [server, client]: - machine.wait_until_succeeds("wg show all preshared-keys | grep --invert-match none", timeout=5) - ''; - - # NOTE: Below configuration is for "interactive" (=developing/debugging) only. - interactive.nodes = - let - inherit (import ./ssh-keys.nix pkgs) snakeOilPublicKey snakeOilPrivateKey; - - sshAndKeyGeneration = { - services.openssh.enable = true; - users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - environment.systemPackages = [ - (pkgs.writeShellApplication { - name = "gen-keys"; - runtimeInputs = [ pkgs.rosenpass ]; - text = '' - HOST="$(hostname)" - if [ "$HOST" == "server" ] - then - PEER="client" - else - PEER="server" - fi - - # Generate keypair. - mkdir -vp /etc/rosenpass/peers/$PEER - rosenpass gen-keys --force --secret-key /etc/rosenpass/pqsk --public-key /etc/rosenpass/pqpk - - # Set up SSH key. - mkdir -p /root/.ssh - cp ${snakeOilPrivateKey} /root/.ssh/id_ecdsa - chmod 0400 /root/.ssh/id_ecdsa - - # Copy public key to other peer. - # shellcheck disable=SC2029 - ssh -o StrictHostKeyChecking=no $PEER "mkdir -pv /etc/rosenpass/peers/$HOST" - scp /etc/rosenpass/pqpk "$PEER:/etc/rosenpass/peers/$HOST/pqpk" - ''; - }) - ]; - }; - - # Use kmscon - # to provide a slightly nicer console, and while we're at it, - # also use a nice font. - # With kmscon, we can for example zoom in/out using [Ctrl] + [+] - # and [Ctrl] + [-] - niceConsoleAndAutologin.services.kmscon = { - enable = true; - autologinUser = "root"; - fonts = [ - { - name = "Fira Code"; - package = pkgs.fira-code; - } - ]; - }; - in - { - server = sshAndKeyGeneration // niceConsoleAndAutologin; - client = sshAndKeyGeneration // niceConsoleAndAutologin; + services.rosenpass.settings.peers = [ + { + public_key = "/etc/rosenpass/peers/server/pqpk"; + endpoint = "server:9999"; + peer = server.wg.public; + } + ]; }; - } -) + }; + + testScript = + { ... }: + '' + from os import system + + # Full path to rosenpass in the store, to avoid fiddling with `$PATH`. + rosenpass = "${pkgs.rosenpass}/bin/rosenpass" + + # Path in `/etc` where keys will be placed. + etc = "/etc/rosenpass" + + start_all() + + for machine in [server, client]: + machine.wait_for_unit("multi-user.target") + + # Gently stop Rosenpass to avoid crashes during key generation/distribution. + for machine in [server, client]: + machine.execute("systemctl stop rosenpass.service") + + for (name, machine, remote) in [("server", server, client), ("client", client, server)]: + pk, sk = f"{name}.pqpk", f"{name}.pqsk" + system(f"{rosenpass} gen-keys --force --secret-key {sk} --public-key {pk}") + machine.copy_from_host(sk, f"{etc}/pqsk") + machine.copy_from_host(pk, f"{etc}/pqpk") + remote.copy_from_host(pk, f"{etc}/peers/{name}/pqpk") + + for machine in [server, client]: + machine.execute("systemctl start rosenpass.service") + + for machine in [server, client]: + machine.wait_for_unit("rosenpass.service") + + with subtest("ping"): + client.succeed("ping -c 2 -i 0.5 ${server.ip}%${deviceName}") + + with subtest("preshared-keys"): + # Rosenpass works by setting the WireGuard preshared key at regular intervals. + # Thus, if it is not active, then no key will be set, and the output of `wg show` will contain "none". + # Otherwise, if it is active, then the key will be set and "none" will not be found in the output of `wg show`. + for machine in [server, client]: + machine.wait_until_succeeds("wg show all preshared-keys | grep --invert-match none", timeout=5) + ''; + + # NOTE: Below configuration is for "interactive" (=developing/debugging) only. + interactive.nodes = + let + inherit (import ./ssh-keys.nix pkgs) snakeOilPublicKey snakeOilPrivateKey; + + sshAndKeyGeneration = { + services.openssh.enable = true; + users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + environment.systemPackages = [ + (pkgs.writeShellApplication { + name = "gen-keys"; + runtimeInputs = [ pkgs.rosenpass ]; + text = '' + HOST="$(hostname)" + if [ "$HOST" == "server" ] + then + PEER="client" + else + PEER="server" + fi + + # Generate keypair. + mkdir -vp /etc/rosenpass/peers/$PEER + rosenpass gen-keys --force --secret-key /etc/rosenpass/pqsk --public-key /etc/rosenpass/pqpk + + # Set up SSH key. + mkdir -p /root/.ssh + cp ${snakeOilPrivateKey} /root/.ssh/id_ecdsa + chmod 0400 /root/.ssh/id_ecdsa + + # Copy public key to other peer. + # shellcheck disable=SC2029 + ssh -o StrictHostKeyChecking=no $PEER "mkdir -pv /etc/rosenpass/peers/$HOST" + scp /etc/rosenpass/pqpk "$PEER:/etc/rosenpass/peers/$HOST/pqpk" + ''; + }) + ]; + }; + + # Use kmscon + # to provide a slightly nicer console, and while we're at it, + # also use a nice font. + # With kmscon, we can for example zoom in/out using [Ctrl] + [+] + # and [Ctrl] + [-] + niceConsoleAndAutologin.services.kmscon = { + enable = true; + autologinUser = "root"; + fonts = [ + { + name = "Fira Code"; + package = pkgs.fira-code; + } + ]; + }; + in + { + server = sshAndKeyGeneration // niceConsoleAndAutologin; + client = sshAndKeyGeneration // niceConsoleAndAutologin; + }; +} diff --git a/nixos/tests/roundcube.nix b/nixos/tests/roundcube.nix index 1f82e9c7a902..c631c97d6827 100644 --- a/nixos/tests/roundcube.nix +++ b/nixos/tests/roundcube.nix @@ -1,40 +1,38 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "roundcube"; - meta = with pkgs.lib.maintainers; { - maintainers = [ globin ]; - }; +{ pkgs, ... }: +{ + name = "roundcube"; + meta = with pkgs.lib.maintainers; { + maintainers = [ globin ]; + }; - nodes = { - roundcube = - { config, pkgs, ... }: - { - services.roundcube = { - enable = true; - hostName = "roundcube"; - database.password = "not production"; - package = pkgs.roundcube.withPlugins (plugins: [ plugins.persistent_login ]); - plugins = [ "persistent_login" ]; - dicts = with pkgs.aspellDicts; [ - en - fr - de - ]; - }; - services.nginx.virtualHosts.roundcube = { - forceSSL = false; - enableACME = false; - }; + nodes = { + roundcube = + { config, pkgs, ... }: + { + services.roundcube = { + enable = true; + hostName = "roundcube"; + database.password = "not production"; + package = pkgs.roundcube.withPlugins (plugins: [ plugins.persistent_login ]); + plugins = [ "persistent_login" ]; + dicts = with pkgs.aspellDicts; [ + en + fr + de + ]; }; - }; + services.nginx.virtualHosts.roundcube = { + forceSSL = false; + enableACME = false; + }; + }; + }; - testScript = '' - roundcube.start - roundcube.wait_for_unit("postgresql.service") - roundcube.wait_for_unit("phpfpm-roundcube.service") - roundcube.wait_for_unit("nginx.service") - roundcube.succeed("curl -sSfL http://roundcube/ | grep 'Keep me logged in'") - ''; - } -) + testScript = '' + roundcube.start + roundcube.wait_for_unit("postgresql.service") + roundcube.wait_for_unit("phpfpm-roundcube.service") + roundcube.wait_for_unit("nginx.service") + roundcube.succeed("curl -sSfL http://roundcube/ | grep 'Keep me logged in'") + ''; +} diff --git a/nixos/tests/rspamd-trainer.nix b/nixos/tests/rspamd-trainer.nix index fdb05bb8b00d..2f58ab73dd4f 100644 --- a/nixos/tests/rspamd-trainer.nix +++ b/nixos/tests/rspamd-trainer.nix @@ -1,168 +1,166 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - certs = import ./common/acme/server/snakeoil-certs.nix; - domain = certs.domain; - in - { - name = "rspamd-trainer"; - meta = with pkgs.lib.maintainers; { - maintainers = [ onny ]; - }; +{ pkgs, ... }: +let + certs = import ./common/acme/server/snakeoil-certs.nix; + domain = certs.domain; +in +{ + name = "rspamd-trainer"; + meta = with pkgs.lib.maintainers; { + maintainers = [ onny ]; + }; - nodes = { - machine = - { options, config, ... }: - { + nodes = { + machine = + { options, config, ... }: + { - security.pki.certificateFiles = [ - certs.ca.cert + security.pki.certificateFiles = [ + certs.ca.cert + ]; + + networking.extraHosts = '' + 127.0.0.1 ${domain} + ''; + + services.rspamd-trainer = { + enable = true; + settings = { + HOST = domain; + USERNAME = "spam@${domain}"; + INBOXPREFIX = "INBOX/"; + }; + secrets = [ + # Do not use this in production. This will make passwords + # world-readable in the Nix store + "${pkgs.writeText "secrets" '' + PASSWORD = test123 + ''}" ]; - - networking.extraHosts = '' - 127.0.0.1 ${domain} - ''; - - services.rspamd-trainer = { - enable = true; - settings = { - HOST = domain; - USERNAME = "spam@${domain}"; - INBOXPREFIX = "INBOX/"; - }; - secrets = [ - # Do not use this in production. This will make passwords - # world-readable in the Nix store - "${pkgs.writeText "secrets" '' - PASSWORD = test123 - ''}" - ]; - }; - - services.maddy = { - enable = true; - hostname = domain; - primaryDomain = domain; - ensureAccounts = [ "spam@${domain}" ]; - ensureCredentials = { - # Do not use this in production. This will make passwords world-readable - # in the Nix store - "spam@${domain}".passwordFile = "${pkgs.writeText "postmaster" "test123"}"; - }; - tls = { - loader = "file"; - certificates = [ - { - certPath = "${certs.${domain}.cert}"; - keyPath = "${certs.${domain}.key}"; - } - ]; - }; - config = - builtins.replaceStrings - [ - "imap tcp://0.0.0.0:143" - "submission tcp://0.0.0.0:587" - ] - [ - "imap tls://0.0.0.0:993 tcp://0.0.0.0:143" - "submission tls://0.0.0.0:465 tcp://0.0.0.0:587" - ] - options.services.maddy.config.default; - }; - - services.rspamd = { - enable = true; - locals = { - "redis.conf".text = '' - servers = "${config.services.redis.servers.rspamd.unixSocket}"; - ''; - "classifier-bayes.conf".text = '' - backend = "redis"; - autolearn = true; - ''; - }; - }; - - services.redis.servers.rspamd = { - enable = true; - port = 0; - unixSocket = "/run/redis-rspamd/redis.sock"; - user = config.services.rspamd.user; - }; - - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "send-testmail" { } '' - import smtplib - import ssl - from email.mime.text import MIMEText - context = ssl.create_default_context() - msg = MIMEText("Hello World") - msg['Subject'] = 'Test' - msg['From'] = "spam@${domain}" - msg['To'] = "spam@${domain}" - with smtplib.SMTP_SSL(host='${domain}', port=465, context=context) as smtp: - smtp.login('spam@${domain}', 'test123') - smtp.sendmail( - 'spam@${domain}', 'spam@${domain}', msg.as_string() - ) - '') - (pkgs.writers.writePython3Bin "create-mail-dirs" { } '' - import imaplib - with imaplib.IMAP4_SSL('${domain}') as imap: - imap.login('spam@${domain}', 'test123') - imap.create("\"INBOX/report_spam\"") - imap.create("\"INBOX/report_ham\"") - imap.create("\"INBOX/report_spam_reply\"") - imap.select("INBOX") - imap.copy("1", "\"INBOX/report_ham\"") - imap.logout() - '') - (pkgs.writers.writePython3Bin "test-imap" { } '' - import imaplib - with imaplib.IMAP4_SSL('${domain}') as imap: - imap.login('spam@${domain}', 'test123') - imap.select("INBOX/learned_ham") - status, refs = imap.search(None, 'ALL') - assert status == 'OK' - assert len(refs) == 1 - status, msg = imap.fetch(refs[0], 'BODY[TEXT]') - assert status == 'OK' - assert msg[0][1].strip() == b"Hello World" - imap.logout() - '') - ]; - }; - }; + services.maddy = { + enable = true; + hostname = domain; + primaryDomain = domain; + ensureAccounts = [ "spam@${domain}" ]; + ensureCredentials = { + # Do not use this in production. This will make passwords world-readable + # in the Nix store + "spam@${domain}".passwordFile = "${pkgs.writeText "postmaster" "test123"}"; + }; + tls = { + loader = "file"; + certificates = [ + { + certPath = "${certs.${domain}.cert}"; + keyPath = "${certs.${domain}.key}"; + } + ]; + }; + config = + builtins.replaceStrings + [ + "imap tcp://0.0.0.0:143" + "submission tcp://0.0.0.0:587" + ] + [ + "imap tls://0.0.0.0:993 tcp://0.0.0.0:143" + "submission tls://0.0.0.0:465 tcp://0.0.0.0:587" + ] + options.services.maddy.config.default; + }; - testScript = - { nodes }: - '' - start_all() - machine.wait_for_unit("maddy.service") - machine.wait_for_open_port(143) - machine.wait_for_open_port(993) - machine.wait_for_open_port(587) - machine.wait_for_open_port(465) + services.rspamd = { + enable = true; + locals = { + "redis.conf".text = '' + servers = "${config.services.redis.servers.rspamd.unixSocket}"; + ''; + "classifier-bayes.conf".text = '' + backend = "redis"; + autolearn = true; + ''; + }; + }; - # Send test mail to spam@domain - machine.succeed("send-testmail") + services.redis.servers.rspamd = { + enable = true; + port = 0; + unixSocket = "/run/redis-rspamd/redis.sock"; + user = config.services.rspamd.user; + }; - # Create mail directories required for rspamd-trainer and copy mail from - # INBOX into INBOX/report_ham - machine.succeed("create-mail-dirs") + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "send-testmail" { } '' + import smtplib + import ssl + from email.mime.text import MIMEText + context = ssl.create_default_context() + msg = MIMEText("Hello World") + msg['Subject'] = 'Test' + msg['From'] = "spam@${domain}" + msg['To'] = "spam@${domain}" + with smtplib.SMTP_SSL(host='${domain}', port=465, context=context) as smtp: + smtp.login('spam@${domain}', 'test123') + smtp.sendmail( + 'spam@${domain}', 'spam@${domain}', msg.as_string() + ) + '') + (pkgs.writers.writePython3Bin "create-mail-dirs" { } '' + import imaplib + with imaplib.IMAP4_SSL('${domain}') as imap: + imap.login('spam@${domain}', 'test123') + imap.create("\"INBOX/report_spam\"") + imap.create("\"INBOX/report_ham\"") + imap.create("\"INBOX/report_spam_reply\"") + imap.select("INBOX") + imap.copy("1", "\"INBOX/report_ham\"") + imap.logout() + '') + (pkgs.writers.writePython3Bin "test-imap" { } '' + import imaplib + with imaplib.IMAP4_SSL('${domain}') as imap: + imap.login('spam@${domain}', 'test123') + imap.select("INBOX/learned_ham") + status, refs = imap.search(None, 'ALL') + assert status == 'OK' + assert len(refs) == 1 + status, msg = imap.fetch(refs[0], 'BODY[TEXT]') + assert status == 'OK' + assert msg[0][1].strip() == b"Hello World" + imap.logout() + '') + ]; - # Start rspamd-trainer. It should read mail from INBOX/report_ham - machine.wait_for_unit("rspamd.service") - machine.wait_for_unit("redis-rspamd.service") - machine.wait_for_file("/run/rspamd/rspamd.sock") - machine.succeed("systemctl start rspamd-trainer.service") + }; - # Check if mail got processed by rspamd-trainer successfully and check for - # it in INBOX/learned_ham - machine.succeed("test-imap") - ''; - } -) + }; + + testScript = + { nodes }: + '' + start_all() + machine.wait_for_unit("maddy.service") + machine.wait_for_open_port(143) + machine.wait_for_open_port(993) + machine.wait_for_open_port(587) + machine.wait_for_open_port(465) + + # Send test mail to spam@domain + machine.succeed("send-testmail") + + # Create mail directories required for rspamd-trainer and copy mail from + # INBOX into INBOX/report_ham + machine.succeed("create-mail-dirs") + + # Start rspamd-trainer. It should read mail from INBOX/report_ham + machine.wait_for_unit("rspamd.service") + machine.wait_for_unit("redis-rspamd.service") + machine.wait_for_file("/run/rspamd/rspamd.sock") + machine.succeed("systemctl start rspamd-trainer.service") + + # Check if mail got processed by rspamd-trainer successfully and check for + # it in INBOX/learned_ham + machine.succeed("test-imap") + ''; +} diff --git a/nixos/tests/rstudio-server.nix b/nixos/tests/rstudio-server.nix index 03a173fe32f2..e561bd309d40 100644 --- a/nixos/tests/rstudio-server.nix +++ b/nixos/tests/rstudio-server.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "rstudio-server-test"; - meta.maintainers = with pkgs.lib.maintainers; [ - jbedo - cfhammill - ]; +{ pkgs, ... }: +{ + name = "rstudio-server-test"; + meta.maintainers = with pkgs.lib.maintainers; [ + jbedo + cfhammill + ]; - nodes.machine = - { - config, - lib, - pkgs, - ... - }: - { - services.rstudio-server.enable = true; + nodes.machine = + { + config, + lib, + pkgs, + ... + }: + { + services.rstudio-server.enable = true; + }; + + nodes.customPackageMachine = + { + config, + lib, + pkgs, + ... + }: + { + services.rstudio-server = { + enable = true; + package = pkgs.rstudioServerWrapper.override { packages = [ pkgs.rPackages.ggplot2 ]; }; }; + }; - nodes.customPackageMachine = - { - config, - lib, - pkgs, - ... - }: - { - services.rstudio-server = { - enable = true; - package = pkgs.rstudioServerWrapper.override { packages = [ pkgs.rPackages.ggplot2 ]; }; - }; - }; + testScript = '' + machine.wait_for_unit("rstudio-server.service") + machine.succeed("curl -f -vvv -s http://127.0.0.1:8787") - testScript = '' - machine.wait_for_unit("rstudio-server.service") - machine.succeed("curl -f -vvv -s http://127.0.0.1:8787") - - customPackageMachine.wait_for_unit("rstudio-server.service") - customPackageMachine.succeed("curl -f -vvv -s http://127.0.0.1:8787") - ''; - } -) + customPackageMachine.wait_for_unit("rstudio-server.service") + customPackageMachine.succeed("curl -f -vvv -s http://127.0.0.1:8787") + ''; +} diff --git a/nixos/tests/rsyncd.nix b/nixos/tests/rsyncd.nix index 9935c0e6c6bf..3ccf22c2132b 100644 --- a/nixos/tests/rsyncd.nix +++ b/nixos/tests/rsyncd.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "rsyncd"; - meta.maintainers = with pkgs.lib.maintainers; [ ehmry ]; +{ pkgs, ... }: +{ + name = "rsyncd"; + meta.maintainers = with pkgs.lib.maintainers; [ ehmry ]; - nodes = - let - mkNode = - socketActivated: - { config, ... }: - { - networking.firewall.allowedTCPPorts = [ config.services.rsyncd.port ]; - services.rsyncd = { - enable = true; - inherit socketActivated; - settings = { - globalSection = { - "reverse lookup" = false; - "forward lookup" = false; - }; - sections = { - tmp = { - path = "/nix/store"; - comment = "test module"; - }; + nodes = + let + mkNode = + socketActivated: + { config, ... }: + { + networking.firewall.allowedTCPPorts = [ config.services.rsyncd.port ]; + services.rsyncd = { + enable = true; + inherit socketActivated; + settings = { + globalSection = { + "reverse lookup" = false; + "forward lookup" = false; + }; + sections = { + tmp = { + path = "/nix/store"; + comment = "test module"; }; }; }; }; - in - { - a = mkNode false; - b = mkNode true; - }; + }; + in + { + a = mkNode false; + b = mkNode true; + }; - testScript = '' - start_all() - a.wait_for_unit("rsync") - b.wait_for_unit("sockets.target") - b.succeed("rsync a::") - a.succeed("rsync b::") - ''; - } -) + testScript = '' + start_all() + a.wait_for_unit("rsync") + b.wait_for_unit("sockets.target") + b.succeed("rsync a::") + a.succeed("rsync b::") + ''; +} diff --git a/nixos/tests/rtorrent.nix b/nixos/tests/rtorrent.nix index ca680380374a..b34f1cb5cec6 100644 --- a/nixos/tests/rtorrent.nix +++ b/nixos/tests/rtorrent.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - port = 50001; - in - { - name = "rtorrent"; - meta = { - maintainers = with pkgs.lib.maintainers; [ thiagokokada ]; +{ pkgs, ... }: +let + port = 50001; +in +{ + name = "rtorrent"; + meta = { + maintainers = with pkgs.lib.maintainers; [ thiagokokada ]; + }; + + nodes.machine = + { pkgs, ... }: + { + services.rtorrent = { + inherit port; + enable = true; + }; }; - nodes.machine = - { pkgs, ... }: - { - services.rtorrent = { - inherit port; - enable = true; - }; - }; + testScript = # python + '' + machine.start() + machine.wait_for_unit("rtorrent.service") + machine.wait_for_open_port(${toString port}) - testScript = # python - '' - machine.start() - machine.wait_for_unit("rtorrent.service") - machine.wait_for_open_port(${toString port}) - - machine.succeed("nc -z localhost ${toString port}") - ''; - } -) + machine.succeed("nc -z localhost ${toString port}") + ''; +} diff --git a/nixos/tests/rustls-libssl.nix b/nixos/tests/rustls-libssl.nix index 079fa52435bc..165edb76a5d4 100644 --- a/nixos/tests/rustls-libssl.nix +++ b/nixos/tests/rustls-libssl.nix @@ -1,92 +1,90 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - caCert = builtins.readFile ./common/acme/server/ca.cert.pem; - certPath = ./common/acme/server/acme.test.cert.pem; - keyPath = ./common/acme/server/acme.test.key.pem; - hosts = '' - 192.168.2.101 acme.test - ''; - in - { - name = "rustls-libssl"; - meta.maintainers = with pkgs.lib.maintainers; [ - stephank - cpu - ]; +{ pkgs, lib, ... }: +let + caCert = builtins.readFile ./common/acme/server/ca.cert.pem; + certPath = ./common/acme/server/acme.test.cert.pem; + keyPath = ./common/acme/server/acme.test.key.pem; + hosts = '' + 192.168.2.101 acme.test + ''; +in +{ + name = "rustls-libssl"; + meta.maintainers = with pkgs.lib.maintainers; [ + stephank + cpu + ]; - nodes = { - server = - { lib, pkgs, ... }: - { - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.2.101"; - prefixLength = 24; - } - ]; - }; - extraHosts = hosts; - firewall.allowedTCPPorts = [ 443 ]; - }; - - security.pki.certificates = [ caCert ]; - - services.nginx = { - enable = true; - package = pkgs.nginxMainline.override { - openssl = pkgs.rustls-libssl; - modules = [ ]; # slightly reduces the size of the build - }; - - # Hardcoded sole input accepted by rustls-libssl. - sslCiphers = "HIGH:!aNULL:!MD5"; - - virtualHosts."acme.test" = { - onlySSL = true; - sslCertificate = certPath; - sslCertificateKey = keyPath; - http2 = true; - reuseport = true; - root = lib.mkForce ( - pkgs.runCommandLocal "testdir" { } '' - mkdir "$out" - cat > "$out/index.html" <Hello World! - EOF - '' - ); - }; + nodes = { + server = + { lib, pkgs, ... }: + { + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.2.101"; + prefixLength = 24; + } + ]; }; + extraHosts = hosts; + firewall.allowedTCPPorts = [ 443 ]; }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.curlHTTP3 ]; - networking = { - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "192.168.2.201"; - prefixLength = 24; - } - ]; - }; - extraHosts = hosts; + security.pki.certificates = [ caCert ]; + + services.nginx = { + enable = true; + package = pkgs.nginxMainline.override { + openssl = pkgs.rustls-libssl; + modules = [ ]; # slightly reduces the size of the build }; - security.pki.certificates = [ caCert ]; - }; - }; + # Hardcoded sole input accepted by rustls-libssl. + sslCiphers = "HIGH:!aNULL:!MD5"; - testScript = '' - start_all() - server.wait_for_open_port(443) - client.succeed("curl --verbose --http1.1 https://acme.test | grep 'Hello World!'") - client.succeed("curl --verbose --http2-prior-knowledge https://acme.test | grep 'Hello World!'") - ''; - } -) + virtualHosts."acme.test" = { + onlySSL = true; + sslCertificate = certPath; + sslCertificateKey = keyPath; + http2 = true; + reuseport = true; + root = lib.mkForce ( + pkgs.runCommandLocal "testdir" { } '' + mkdir "$out" + cat > "$out/index.html" <Hello World! + EOF + '' + ); + }; + }; + }; + + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.curlHTTP3 ]; + networking = { + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "192.168.2.201"; + prefixLength = 24; + } + ]; + }; + extraHosts = hosts; + }; + + security.pki.certificates = [ caCert ]; + }; + }; + + testScript = '' + start_all() + server.wait_for_open_port(443) + client.succeed("curl --verbose --http1.1 https://acme.test | grep 'Hello World!'") + client.succeed("curl --verbose --http2-prior-knowledge https://acme.test | grep 'Hello World!'") + ''; +} diff --git a/nixos/tests/rxe.nix b/nixos/tests/rxe.nix index f05b5a7dd374..8b2626857d84 100644 --- a/nixos/tests/rxe.nix +++ b/nixos/tests/rxe.nix @@ -1,53 +1,51 @@ -import ./make-test-python.nix ( - { ... }: +{ ... }: - let - node = - { pkgs, ... }: - { - networking = { - firewall = { - allowedUDPPorts = [ 4791 ]; # open RoCE port - allowedTCPPorts = [ 4800 ]; # port for test utils - }; - rxe = { - enable = true; - interfaces = [ "eth1" ]; - }; +let + node = + { pkgs, ... }: + { + networking = { + firewall = { + allowedUDPPorts = [ 4791 ]; # open RoCE port + allowedTCPPorts = [ 4800 ]; # port for test utils + }; + rxe = { + enable = true; + interfaces = [ "eth1" ]; }; - - environment.systemPackages = with pkgs; [ - rdma-core - screen - ]; }; - in - { - name = "rxe"; - - nodes = { - server = node; - client = node; + environment.systemPackages = with pkgs; [ + rdma-core + screen + ]; }; - testScript = '' - # Test if rxe interface comes up - server.wait_for_unit("default.target") - server.succeed("systemctl status rxe.service") - server.succeed("ibv_devices | grep rxe_eth1") +in +{ + name = "rxe"; - client.wait_for_unit("default.target") + nodes = { + server = node; + client = node; + }; - # ping pong tests - for proto in "rc", "uc", "ud", "srq": - server.succeed( - "screen -dmS {0}_pingpong ibv_{0}_pingpong -p 4800 -s 1024 -g0".format(proto) - ) - client.succeed("sleep 2; ibv_{}_pingpong -p 4800 -s 1024 -g0 server".format(proto)) + testScript = '' + # Test if rxe interface comes up + server.wait_for_unit("default.target") + server.succeed("systemctl status rxe.service") + server.succeed("ibv_devices | grep rxe_eth1") - server.succeed("screen -dmS rping rping -s -a server -C 10") - client.succeed("sleep 2; rping -c -a server -C 10") - ''; - } -) + client.wait_for_unit("default.target") + + # ping pong tests + for proto in "rc", "uc", "ud", "srq": + server.succeed( + "screen -dmS {0}_pingpong ibv_{0}_pingpong -p 4800 -s 1024 -g0".format(proto) + ) + client.succeed("sleep 2; ibv_{}_pingpong -p 4800 -s 1024 -g0 server".format(proto)) + + server.succeed("screen -dmS rping rping -s -a server -C 10") + client.succeed("sleep 2; rping -c -a server -C 10") + ''; +} diff --git a/nixos/tests/sabnzbd.nix b/nixos/tests/sabnzbd.nix index 5aa466267a5d..a1a954d8b1e5 100644 --- a/nixos/tests/sabnzbd.nix +++ b/nixos/tests/sabnzbd.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "sabnzbd"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ jojosch ]; - }; +{ pkgs, lib, ... }: +{ + name = "sabnzbd"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ jojosch ]; + }; - nodes.machine = - { pkgs, ... }: - { - services.sabnzbd = { - enable = true; - }; - - # unrar is unfree - nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "unrar" ]; + nodes.machine = + { pkgs, ... }: + { + services.sabnzbd = { + enable = true; }; - testScript = '' - machine.wait_for_unit("sabnzbd.service") - machine.wait_until_succeeds( - "curl --fail -L http://localhost:8080/" - ) - _, out = machine.execute("grep SABCTools /var/lib/sabnzbd/logs/sabnzbd.log") - machine.log(out) - machine.fail("grep 'SABCTools disabled: no correct version found!' /var/lib/sabnzbd/logs/sabnzbd.log") - ''; - } -) + # unrar is unfree + nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "unrar" ]; + }; + + testScript = '' + machine.wait_for_unit("sabnzbd.service") + machine.wait_until_succeeds( + "curl --fail -L http://localhost:8080/" + ) + _, out = machine.execute("grep SABCTools /var/lib/sabnzbd/logs/sabnzbd.log") + machine.log(out) + machine.fail("grep 'SABCTools disabled: no correct version found!' /var/lib/sabnzbd/logs/sabnzbd.log") + ''; +} diff --git a/nixos/tests/samba-wsdd.nix b/nixos/tests/samba-wsdd.nix index 2efc3058ee96..07ad7d68f664 100644 --- a/nixos/tests/samba-wsdd.nix +++ b/nixos/tests/samba-wsdd.nix @@ -1,48 +1,46 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "samba-wsdd"; - meta.maintainers = with pkgs.lib.maintainers; [ izorkin ]; +{ + name = "samba-wsdd"; + meta.maintainers = with pkgs.lib.maintainers; [ izorkin ]; - nodes = { - client_wsdd = - { pkgs, ... }: - { - services.samba-wsdd = { - enable = true; - openFirewall = true; - interface = "eth1"; - workgroup = "WORKGROUP"; - hostname = "CLIENT-WSDD"; - discovery = true; - extraOptions = [ "--no-host" ]; - }; + nodes = { + client_wsdd = + { pkgs, ... }: + { + services.samba-wsdd = { + enable = true; + openFirewall = true; + interface = "eth1"; + workgroup = "WORKGROUP"; + hostname = "CLIENT-WSDD"; + discovery = true; + extraOptions = [ "--no-host" ]; }; + }; - server_wsdd = - { ... }: - { - services.samba-wsdd = { - enable = true; - openFirewall = true; - interface = "eth1"; - workgroup = "WORKGROUP"; - hostname = "SERVER-WSDD"; - }; + server_wsdd = + { ... }: + { + services.samba-wsdd = { + enable = true; + openFirewall = true; + interface = "eth1"; + workgroup = "WORKGROUP"; + hostname = "SERVER-WSDD"; }; - }; + }; + }; - testScript = '' - client_wsdd.start() - client_wsdd.wait_for_unit("samba-wsdd") + testScript = '' + client_wsdd.start() + client_wsdd.wait_for_unit("samba-wsdd") - server_wsdd.start() - server_wsdd.wait_for_unit("samba-wsdd") + server_wsdd.start() + server_wsdd.wait_for_unit("samba-wsdd") - client_wsdd.wait_until_succeeds( - "echo list | ${pkgs.libressl.nc}/bin/nc -N -U /run/wsdd/wsdd.sock | grep -i SERVER-WSDD" - ) - ''; - } -) + client_wsdd.wait_until_succeeds( + "echo list | ${pkgs.libressl.nc}/bin/nc -N -U /run/wsdd/wsdd.sock | grep -i SERVER-WSDD" + ) + ''; +} diff --git a/nixos/tests/sane.nix b/nixos/tests/sane.nix index 117831e5c7ab..6b112898de0c 100644 --- a/nixos/tests/sane.nix +++ b/nixos/tests/sane.nix @@ -1,94 +1,92 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - /* - SANE NixOS test - =============== - SANE is intrisically tied to hardware, so testing it is not straightforward. - However: - - a fake webcam can be created with v4l2loopback - - sane has a backend (v4l) to use a webcam as a scanner - This test creates a webcam /dev/video0, streams a still image with some text - through this webcam, uses SANE to scan from the webcam, and uses OCR to check - that the expected text was scanned. - */ - let - text = "66263666188646651519653683416"; - fontsConf = pkgs.makeFontsConf { - fontDirectories = [ - pkgs.dejavu_fonts.minimal - ]; - }; - # an image with black on white text spelling "${text}" - # for some reason, the test fails if it's jpg instead of png - # the font is quite large to make OCR easier - image = - pkgs.runCommand "image.png" - { - # only imagemagickBig can render text - nativeBuildInputs = [ pkgs.imagemagickBig ]; - FONTCONFIG_FILE = fontsConf; - } - '' - magick -pointsize 100 label:${text} $out - ''; - in - { - name = "sane"; - nodes.machine = - { pkgs, config, ... }: +{ pkgs, ... }: +/* + SANE NixOS test + =============== + SANE is intrisically tied to hardware, so testing it is not straightforward. + However: + - a fake webcam can be created with v4l2loopback + - sane has a backend (v4l) to use a webcam as a scanner + This test creates a webcam /dev/video0, streams a still image with some text + through this webcam, uses SANE to scan from the webcam, and uses OCR to check + that the expected text was scanned. +*/ +let + text = "66263666188646651519653683416"; + fontsConf = pkgs.makeFontsConf { + fontDirectories = [ + pkgs.dejavu_fonts.minimal + ]; + }; + # an image with black on white text spelling "${text}" + # for some reason, the test fails if it's jpg instead of png + # the font is quite large to make OCR easier + image = + pkgs.runCommand "image.png" { - boot = { - # create /dev/video0 as a fake webcam whose content is filled by ffmpeg - extraModprobeConfig = '' - options v4l2loopback devices=1 max_buffers=2 exclusive_caps=1 card_label=VirtualCam - ''; - kernelModules = [ "v4l2loopback" ]; - extraModulePackages = [ config.boot.kernelPackages.v4l2loopback ]; - }; - systemd.services.fake-webcam = { - wantedBy = [ "multi-user.target" ]; - description = "fill /dev/video0 with ${image}"; - /* - HACK: /dev/video0 is a v4l2 only device, it misses one single v4l1 - ioctl, VIDIOCSPICT. But sane only supports v4l1, so it will log that this - ioctl failed, and assume that the pixel format is Y8 (gray). So we tell - ffmpeg to produce this pixel format. - */ - serviceConfig.ExecStart = [ - "${pkgs.ffmpeg}/bin/ffmpeg -framerate 30 -re -stream_loop -1 -i ${image} -f v4l2 -pix_fmt gray /dev/video0" - ]; - }; - hardware.sane.enable = true; - system.extraDependencies = [ image ]; - environment.systemPackages = [ - pkgs.fswebcam - pkgs.tesseract - pkgs.v4l-utils - ]; - environment.variables.SANE_DEBUG_V4L = "128"; + # only imagemagickBig can render text + nativeBuildInputs = [ pkgs.imagemagickBig ]; + FONTCONFIG_FILE = fontsConf; + } + '' + magick -pointsize 100 label:${text} $out + ''; +in +{ + name = "sane"; + nodes.machine = + { pkgs, config, ... }: + { + boot = { + # create /dev/video0 as a fake webcam whose content is filled by ffmpeg + extraModprobeConfig = '' + options v4l2loopback devices=1 max_buffers=2 exclusive_caps=1 card_label=VirtualCam + ''; + kernelModules = [ "v4l2loopback" ]; + extraModulePackages = [ config.boot.kernelPackages.v4l2loopback ]; }; - testScript = '' - start_all() - machine.wait_for_unit("fake-webcam.service") + systemd.services.fake-webcam = { + wantedBy = [ "multi-user.target" ]; + description = "fill /dev/video0 with ${image}"; + /* + HACK: /dev/video0 is a v4l2 only device, it misses one single v4l1 + ioctl, VIDIOCSPICT. But sane only supports v4l1, so it will log that this + ioctl failed, and assume that the pixel format is Y8 (gray). So we tell + ffmpeg to produce this pixel format. + */ + serviceConfig.ExecStart = [ + "${pkgs.ffmpeg}/bin/ffmpeg -framerate 30 -re -stream_loop -1 -i ${image} -f v4l2 -pix_fmt gray /dev/video0" + ]; + }; + hardware.sane.enable = true; + system.extraDependencies = [ image ]; + environment.systemPackages = [ + pkgs.fswebcam + pkgs.tesseract + pkgs.v4l-utils + ]; + environment.variables.SANE_DEBUG_V4L = "128"; + }; + testScript = '' + start_all() + machine.wait_for_unit("fake-webcam.service") - # the device only appears when ffmpeg starts producing frames - machine.wait_until_succeeds("scanimage -L | grep /dev/video0") + # the device only appears when ffmpeg starts producing frames + machine.wait_until_succeeds("scanimage -L | grep /dev/video0") - machine.succeed("scanimage -L >&2") + machine.succeed("scanimage -L >&2") - with subtest("debugging: /dev/video0 works"): - machine.succeed("v4l2-ctl --all >&2") - machine.succeed("fswebcam --no-banner /tmp/webcam.jpg") - machine.copy_from_vm("/tmp/webcam.jpg", "webcam") + with subtest("debugging: /dev/video0 works"): + machine.succeed("v4l2-ctl --all >&2") + machine.succeed("fswebcam --no-banner /tmp/webcam.jpg") + machine.copy_from_vm("/tmp/webcam.jpg", "webcam") - # scan with the webcam - machine.succeed("scanimage -o /tmp/scan.png >&2") - machine.copy_from_vm("/tmp/scan.png", "scan") + # scan with the webcam + machine.succeed("scanimage -o /tmp/scan.png >&2") + machine.copy_from_vm("/tmp/scan.png", "scan") - # the image should contain "${text}" - output = machine.succeed("tesseract /tmp/scan.png -") - print(output) - assert "${text}" in output, f"expected text ${text} was not found, OCR found {output!r}" - ''; - } -) + # the image should contain "${text}" + output = machine.succeed("tesseract /tmp/scan.png -") + print(output) + assert "${text}" in output, f"expected text ${text} was not found, OCR found {output!r}" + ''; +} diff --git a/nixos/tests/sanoid.nix b/nixos/tests/sanoid.nix index 227a95e9471d..e42fd54dfd82 100644 --- a/nixos/tests/sanoid.nix +++ b/nixos/tests/sanoid.nix @@ -1,148 +1,146 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - inherit (import ./ssh-keys.nix pkgs) - snakeOilPrivateKey - snakeOilPublicKey - ; +{ pkgs, ... }: +let + inherit (import ./ssh-keys.nix pkgs) + snakeOilPrivateKey + snakeOilPublicKey + ; - commonConfig = - { pkgs, ... }: + commonConfig = + { pkgs, ... }: + { + virtualisation.emptyDiskImages = [ 2048 ]; + boot.supportedFilesystems = [ "zfs" ]; + environment.systemPackages = [ pkgs.parted ]; + }; +in +{ + name = "sanoid"; + meta = with pkgs.lib.maintainers; { + maintainers = [ lopsided98 ]; + }; + + nodes = { + source = + { ... }: { - virtualisation.emptyDiskImages = [ 2048 ]; - boot.supportedFilesystems = [ "zfs" ]; - environment.systemPackages = [ pkgs.parted ]; + imports = [ commonConfig ]; + networking.hostId = "daa82e91"; + + programs.ssh.extraConfig = '' + UserKnownHostsFile=/dev/null + StrictHostKeyChecking=no + ''; + + services.sanoid = { + enable = true; + templates.test = { + hourly = 12; + daily = 1; + monthly = 1; + yearly = 1; + + autosnap = true; + }; + datasets."pool/sanoid".use_template = [ "test" ]; + datasets."pool/compat".useTemplate = [ "test" ]; + extraArgs = [ "--verbose" ]; + }; + + services.syncoid = { + enable = true; + sshKey = "/var/lib/syncoid/id_ecdsa"; + commands = { + # Sync snapshot taken by sanoid + "pool/sanoid" = { + target = "root@target:pool/sanoid"; + extraArgs = [ + "--no-sync-snap" + "--create-bookmark" + ]; + }; + # Take snapshot and sync + "pool/syncoid".target = "root@target:pool/syncoid"; + + # Test pool without parent (regression test for https://github.com/NixOS/nixpkgs/pull/180111) + "pool".target = "root@target:pool/full-pool"; + + # Test backward compatible options (regression test for https://github.com/NixOS/nixpkgs/issues/181561) + "pool/compat" = { + target = "root@target:pool/compat"; + extraArgs = [ "--no-sync-snap" ]; + }; + }; + }; }; - in - { - name = "sanoid"; - meta = with pkgs.lib.maintainers; { - maintainers = [ lopsided98 ]; - }; + target = + { ... }: + { + imports = [ commonConfig ]; + networking.hostId = "dcf39d36"; - nodes = { - source = - { ... }: - { - imports = [ commonConfig ]; - networking.hostId = "daa82e91"; + services.openssh.enable = true; + users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + }; + }; - programs.ssh.extraConfig = '' - UserKnownHostsFile=/dev/null - StrictHostKeyChecking=no - ''; + testScript = '' + source.succeed( + "mkdir /mnt", + "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s", + "udevadm settle", + "zpool create pool -R /mnt /dev/vdb1", + "zfs create pool/sanoid", + "zfs create pool/compat", + "zfs create pool/syncoid", + "udevadm settle", + ) + target.succeed( + "mkdir /mnt", + "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s", + "udevadm settle", + "zpool create pool -R /mnt /dev/vdb1", + "udevadm settle", + ) - services.sanoid = { - enable = true; - templates.test = { - hourly = 12; - daily = 1; - monthly = 1; - yearly = 1; + source.succeed( + "mkdir -m 700 -p /var/lib/syncoid", + "cat '${snakeOilPrivateKey}' > /var/lib/syncoid/id_ecdsa", + "chmod 600 /var/lib/syncoid/id_ecdsa", + "chown -R syncoid:syncoid /var/lib/syncoid/", + ) - autosnap = true; - }; - datasets."pool/sanoid".use_template = [ "test" ]; - datasets."pool/compat".useTemplate = [ "test" ]; - extraArgs = [ "--verbose" ]; - }; + assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set before snapshotting" + assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set before snapshotting" + assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set before snapshotting" - services.syncoid = { - enable = true; - sshKey = "/var/lib/syncoid/id_ecdsa"; - commands = { - # Sync snapshot taken by sanoid - "pool/sanoid" = { - target = "root@target:pool/sanoid"; - extraArgs = [ - "--no-sync-snap" - "--create-bookmark" - ]; - }; - # Take snapshot and sync - "pool/syncoid".target = "root@target:pool/syncoid"; + # Take snapshot with sanoid + source.succeed("touch /mnt/pool/sanoid/test.txt") + source.succeed("touch /mnt/pool/compat/test.txt") + source.systemctl("start --wait sanoid.service") - # Test pool without parent (regression test for https://github.com/NixOS/nixpkgs/pull/180111) - "pool".target = "root@target:pool/full-pool"; + assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after snapshotting" + assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after snapshotting" + assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after snapshotting" - # Test backward compatible options (regression test for https://github.com/NixOS/nixpkgs/issues/181561) - "pool/compat" = { - target = "root@target:pool/compat"; - extraArgs = [ "--no-sync-snap" ]; - }; - }; - }; - }; - target = - { ... }: - { - imports = [ commonConfig ]; - networking.hostId = "dcf39d36"; + # Sync snapshots + target.wait_for_open_port(22) + source.succeed("touch /mnt/pool/syncoid/test.txt") + source.systemctl("start --wait syncoid-pool-sanoid.service") + target.succeed("cat /mnt/pool/sanoid/test.txt") + source.systemctl("start --wait syncoid-pool-syncoid.service") + source.systemctl("start --wait syncoid-pool-syncoid.service") + target.succeed("cat /mnt/pool/syncoid/test.txt") - services.openssh.enable = true; - users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; - }; + assert(len(source.succeed("zfs list -H -t snapshot pool/syncoid").splitlines()) == 1), "Syncoid should only retain one sync snapshot" - testScript = '' - source.succeed( - "mkdir /mnt", - "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s", - "udevadm settle", - "zpool create pool -R /mnt /dev/vdb1", - "zfs create pool/sanoid", - "zfs create pool/compat", - "zfs create pool/syncoid", - "udevadm settle", - ) - target.succeed( - "mkdir /mnt", - "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s", - "udevadm settle", - "zpool create pool -R /mnt /dev/vdb1", - "udevadm settle", - ) + source.systemctl("start --wait syncoid-pool.service") + target.succeed("[[ -d /mnt/pool/full-pool/syncoid ]]") - source.succeed( - "mkdir -m 700 -p /var/lib/syncoid", - "cat '${snakeOilPrivateKey}' > /var/lib/syncoid/id_ecdsa", - "chmod 600 /var/lib/syncoid/id_ecdsa", - "chown -R syncoid:syncoid /var/lib/syncoid/", - ) + source.systemctl("start --wait syncoid-pool-compat.service") + target.succeed("cat /mnt/pool/compat/test.txt") - assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set before snapshotting" - assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set before snapshotting" - assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set before snapshotting" - - # Take snapshot with sanoid - source.succeed("touch /mnt/pool/sanoid/test.txt") - source.succeed("touch /mnt/pool/compat/test.txt") - source.systemctl("start --wait sanoid.service") - - assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after snapshotting" - assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after snapshotting" - assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after snapshotting" - - # Sync snapshots - target.wait_for_open_port(22) - source.succeed("touch /mnt/pool/syncoid/test.txt") - source.systemctl("start --wait syncoid-pool-sanoid.service") - target.succeed("cat /mnt/pool/sanoid/test.txt") - source.systemctl("start --wait syncoid-pool-syncoid.service") - source.systemctl("start --wait syncoid-pool-syncoid.service") - target.succeed("cat /mnt/pool/syncoid/test.txt") - - assert(len(source.succeed("zfs list -H -t snapshot pool/syncoid").splitlines()) == 1), "Syncoid should only retain one sync snapshot" - - source.systemctl("start --wait syncoid-pool.service") - target.succeed("[[ -d /mnt/pool/full-pool/syncoid ]]") - - source.systemctl("start --wait syncoid-pool-compat.service") - target.succeed("cat /mnt/pool/compat/test.txt") - - assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after syncing snapshots" - assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after syncing snapshots" - assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after syncing snapshots" - ''; - } -) + assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after syncing snapshots" + assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after syncing snapshots" + assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after syncing snapshots" + ''; +} diff --git a/nixos/tests/saunafs.nix b/nixos/tests/saunafs.nix index 49d986175716..cc0c9e941372 100644 --- a/nixos/tests/saunafs.nix +++ b/nixos/tests/saunafs.nix @@ -1,122 +1,120 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - master = - { pkgs, ... }: - { - # data base is stored in memory - # server may crash with default memory size - virtualisation.memorySize = 1024; +let + master = + { pkgs, ... }: + { + # data base is stored in memory + # server may crash with default memory size + virtualisation.memorySize = 1024; - services.saunafs.master = { - enable = true; - openFirewall = true; - exports = [ - "* / rw,alldirs,maproot=0:0" - ]; - }; - }; - - chunkserver = - { pkgs, ... }: - { - virtualisation.emptyDiskImages = [ 4096 ]; - boot.initrd.postDeviceCommands = '' - ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb - ''; - - fileSystems = pkgs.lib.mkVMOverride { - "/data" = { - device = "/dev/disk/by-label/data"; - fsType = "ext4"; - }; - }; - - services.saunafs = { - masterHost = "master"; - chunkserver = { - openFirewall = true; - enable = true; - hdds = [ "/data" ]; - - # The test image is too small and gets set to "full" - settings.HDD_LEAVE_SPACE_DEFAULT = "100M"; - }; - }; - }; - - metalogger = - { pkgs, ... }: - { - services.saunafs = { - masterHost = "master"; - metalogger.enable = true; - }; - }; - - client = - { pkgs, lib, ... }: - { - services.saunafs.client.enable = true; - # systemd.tmpfiles.rules = [ "d /sfs 755 root root -" ]; - systemd.network.enable = true; - - # Use networkd to have properly functioning - # network-online.target - networking = { - useDHCP = false; - useNetworkd = true; - }; - - systemd.mounts = [ - { - requires = [ "network-online.target" ]; - after = [ "network-online.target" ]; - wantedBy = [ "remote-fs.target" ]; - type = "saunafs"; - what = "master:/"; - where = "/sfs"; - } + services.saunafs.master = { + enable = true; + openFirewall = true; + exports = [ + "* / rw,alldirs,maproot=0:0" ]; }; - - in - { - name = "saunafs"; - - meta.maintainers = [ lib.maintainers.markuskowa ]; - - nodes = { - inherit master metalogger; - chunkserver1 = chunkserver; - chunkserver2 = chunkserver; - client1 = client; - client2 = client; }; - testScript = '' - # prepare master server - master.start() - master.wait_for_unit("multi-user.target") - master.succeed("sfsmaster-init") - master.succeed("systemctl restart sfs-master") - master.wait_for_unit("sfs-master.service") + chunkserver = + { pkgs, ... }: + { + virtualisation.emptyDiskImages = [ 4096 ]; + boot.initrd.postDeviceCommands = '' + ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb + ''; - metalogger.wait_for_unit("sfs-metalogger.service") + fileSystems = pkgs.lib.mkVMOverride { + "/data" = { + device = "/dev/disk/by-label/data"; + fsType = "ext4"; + }; + }; - # Setup chunkservers - for chunkserver in [chunkserver1, chunkserver2]: - chunkserver.wait_for_unit("multi-user.target") - chunkserver.succeed("chown saunafs:saunafs /data") - chunkserver.succeed("systemctl restart sfs-chunkserver") - chunkserver.wait_for_unit("sfs-chunkserver.service") + services.saunafs = { + masterHost = "master"; + chunkserver = { + openFirewall = true; + enable = true; + hdds = [ "/data" ]; - for client in [client1, client2]: - client.wait_for_unit("multi-user.target") + # The test image is too small and gets set to "full" + settings.HDD_LEAVE_SPACE_DEFAULT = "100M"; + }; + }; + }; - client1.succeed("echo test > /sfs/file") - client2.succeed("grep test /sfs/file") - ''; - } -) + metalogger = + { pkgs, ... }: + { + services.saunafs = { + masterHost = "master"; + metalogger.enable = true; + }; + }; + + client = + { pkgs, lib, ... }: + { + services.saunafs.client.enable = true; + # systemd.tmpfiles.rules = [ "d /sfs 755 root root -" ]; + systemd.network.enable = true; + + # Use networkd to have properly functioning + # network-online.target + networking = { + useDHCP = false; + useNetworkd = true; + }; + + systemd.mounts = [ + { + requires = [ "network-online.target" ]; + after = [ "network-online.target" ]; + wantedBy = [ "remote-fs.target" ]; + type = "saunafs"; + what = "master:/"; + where = "/sfs"; + } + ]; + }; + +in +{ + name = "saunafs"; + + meta.maintainers = [ lib.maintainers.markuskowa ]; + + nodes = { + inherit master metalogger; + chunkserver1 = chunkserver; + chunkserver2 = chunkserver; + client1 = client; + client2 = client; + }; + + testScript = '' + # prepare master server + master.start() + master.wait_for_unit("multi-user.target") + master.succeed("sfsmaster-init") + master.succeed("systemctl restart sfs-master") + master.wait_for_unit("sfs-master.service") + + metalogger.wait_for_unit("sfs-metalogger.service") + + # Setup chunkservers + for chunkserver in [chunkserver1, chunkserver2]: + chunkserver.wait_for_unit("multi-user.target") + chunkserver.succeed("chown saunafs:saunafs /data") + chunkserver.succeed("systemctl restart sfs-chunkserver") + chunkserver.wait_for_unit("sfs-chunkserver.service") + + for client in [client1, client2]: + client.wait_for_unit("multi-user.target") + + client1.succeed("echo test > /sfs/file") + client2.succeed("grep test /sfs/file") + ''; +} diff --git a/nixos/tests/sdl3.nix b/nixos/tests/sdl3.nix index d07f342fe8b2..33c913321632 100644 --- a/nixos/tests/sdl3.nix +++ b/nixos/tests/sdl3.nix @@ -1,28 +1,26 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "sdl3"; - meta.maintainers = pkgs.sdl3.meta.maintainers; +{ pkgs, ... }: +{ + name = "sdl3"; + meta.maintainers = pkgs.sdl3.meta.maintainers; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/x11.nix ]; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/x11.nix ]; - environment.systemPackages = [ pkgs.sdl3.passthru.debug-text-example ]; - }; + environment.systemPackages = [ pkgs.sdl3.passthru.debug-text-example ]; + }; - enableOCR = true; + enableOCR = true; - testScript = '' - machine.wait_for_x() + testScript = '' + machine.wait_for_x() - machine.execute("debug-text >&2 &") + machine.execute("debug-text >&2 &") - machine.wait_for_window("examples/renderer/debug-text") - machine.wait_for_text("Hello world") + machine.wait_for_window("examples/renderer/debug-text") + machine.wait_for_text("Hello world") - machine.screenshot("screen") - ''; - } -) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/seafile.nix b/nixos/tests/seafile.nix index 54aa582149d5..85ff857535fe 100644 --- a/nixos/tests/seafile.nix +++ b/nixos/tests/seafile.nix @@ -1,131 +1,129 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - client = - { config, pkgs, ... }: - { - environment.systemPackages = [ - pkgs.seafile-shared - pkgs.curl - ]; - }; - in - { - name = "seafile"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - kampfschlaefer - schmittlauch +{ pkgs, ... }: +let + client = + { config, pkgs, ... }: + { + environment.systemPackages = [ + pkgs.seafile-shared + pkgs.curl ]; }; +in +{ + name = "seafile"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + kampfschlaefer + schmittlauch + ]; + }; - nodes = { - server = - { config, pkgs, ... }: - { - services.seafile = { - enable = true; - ccnetSettings.General.SERVICE_URL = "http://server"; - seafileSettings.fileserver.host = "unix:/run/seafile/server.sock"; - adminEmail = "admin@example.com"; - initialAdminPassword = "seafile_password"; - }; - services.nginx = { - enable = true; - virtualHosts."server" = { - locations."/".proxyPass = "http://unix:/run/seahub/gunicorn.sock"; - locations."/seafhttp" = { - proxyPass = "http://unix:/run/seafile/server.sock"; - extraConfig = '' - rewrite ^/seafhttp(.*)$ $1 break; - client_max_body_size 0; - proxy_connect_timeout 36000s; - proxy_read_timeout 36000s; - proxy_send_timeout 36000s; - send_timeout 36000s; - proxy_http_version 1.1; - ''; - }; + nodes = { + server = + { config, pkgs, ... }: + { + services.seafile = { + enable = true; + ccnetSettings.General.SERVICE_URL = "http://server"; + seafileSettings.fileserver.host = "unix:/run/seafile/server.sock"; + adminEmail = "admin@example.com"; + initialAdminPassword = "seafile_password"; + }; + services.nginx = { + enable = true; + virtualHosts."server" = { + locations."/".proxyPass = "http://unix:/run/seahub/gunicorn.sock"; + locations."/seafhttp" = { + proxyPass = "http://unix:/run/seafile/server.sock"; + extraConfig = '' + rewrite ^/seafhttp(.*)$ $1 break; + client_max_body_size 0; + proxy_connect_timeout 36000s; + proxy_read_timeout 36000s; + proxy_send_timeout 36000s; + send_timeout 36000s; + proxy_http_version 1.1; + ''; }; }; - networking.firewall = { - allowedTCPPorts = [ 80 ]; - }; }; - client1 = client pkgs; - client2 = client pkgs; - }; + networking.firewall = { + allowedTCPPorts = [ 80 ]; + }; + }; + client1 = client pkgs; + client2 = client pkgs; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("start seaf-server"): - server.wait_for_unit("seaf-server.service") - server.wait_for_file("/run/seafile/seafile.sock") + with subtest("start seaf-server"): + server.wait_for_unit("seaf-server.service") + server.wait_for_file("/run/seafile/seafile.sock") - with subtest("start seahub"): - server.wait_for_unit("seahub.service") - server.wait_for_unit("nginx.service") - server.wait_for_file("/run/seahub/gunicorn.sock") + with subtest("start seahub"): + server.wait_for_unit("seahub.service") + server.wait_for_unit("nginx.service") + server.wait_for_file("/run/seahub/gunicorn.sock") - with subtest("client1 fetch seahub page"): - client1.succeed("curl -L http://server | grep 'Log In' >&2") + with subtest("client1 fetch seahub page"): + client1.succeed("curl -L http://server | grep 'Log In' >&2") - with subtest("client1 connect"): - client1.wait_for_unit("default.target") - client1.succeed("seaf-cli init -d . >&2") - client1.succeed("seaf-cli start >&2") - client1.succeed( - "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password >&2" - ) + with subtest("client1 connect"): + client1.wait_for_unit("default.target") + client1.succeed("seaf-cli init -d . >&2") + client1.succeed("seaf-cli start >&2") + client1.succeed( + "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password >&2" + ) - libid = client1.succeed( - 'seaf-cli create -s http://server -n test01 -u admin\@example.com -p seafile_password -t "first test library"' - ).strip() + libid = client1.succeed( + 'seaf-cli create -s http://server -n test01 -u admin\@example.com -p seafile_password -t "first test library"' + ).strip() - client1.succeed( - "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test01" - ) - client1.fail( - "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test02" - ) + client1.succeed( + "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test01" + ) + client1.fail( + "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test02" + ) - client1.succeed( - f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2" - ) + client1.succeed( + f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2" + ) - client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2") + client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2") - client1.succeed("ls -la >&2") - client1.succeed("ls -la test01 >&2") + client1.succeed("ls -la >&2") + client1.succeed("ls -la test01 >&2") - client1.execute("echo bla > test01/first_file") + client1.execute("echo bla > test01/first_file") - client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2") + client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2") - with subtest("client2 sync"): - client2.wait_for_unit("default.target") + with subtest("client2 sync"): + client2.wait_for_unit("default.target") - client2.succeed("seaf-cli init -d . >&2") - client2.succeed("seaf-cli start >&2") + client2.succeed("seaf-cli init -d . >&2") + client2.succeed("seaf-cli start >&2") - client2.succeed( - "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password >&2" - ) + client2.succeed( + "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password >&2" + ) - libid = client2.succeed( - "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test01 |cut -d' ' -f 2" - ).strip() + libid = client2.succeed( + "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test01 |cut -d' ' -f 2" + ).strip() - client2.succeed( - f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2" - ) + client2.succeed( + f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2" + ) - client2.wait_until_succeeds("seaf-cli status |grep synchronized >&2") + client2.wait_until_succeeds("seaf-cli status |grep synchronized >&2") - client2.succeed("ls -la test01 >&2") + client2.succeed("ls -la test01 >&2") - client2.succeed('[ `cat test01/first_file` = "bla" ]') - ''; - } -) + client2.succeed('[ `cat test01/first_file` = "bla" ]') + ''; +} diff --git a/nixos/tests/seatd.nix b/nixos/tests/seatd.nix index 8ba79ca49584..117c1ea4be50 100644 --- a/nixos/tests/seatd.nix +++ b/nixos/tests/seatd.nix @@ -1,58 +1,56 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - seatd-test = pkgs.writeShellApplication { - name = "seatd-client-pid"; - text = '' - journalctl -u seatd --no-pager -b | while read -r line; do - case "$line" in - *"New client connected"*) - line="''${line##*pid: }" - pid="''${line%%,*}" - ;; - *"Opened client"*) - echo "$pid" - exit - esac - done; - ''; - }; - in - { - name = "seatd"; - meta.maintainers = with lib.maintainers; [ sinanmohd ]; - - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; - services.getty.autologinUser = "alice"; - users.users.alice.extraGroups = [ - "seat" - "wheel" - ]; - - fonts.enableDefaultPackages = true; - environment.systemPackages = with pkgs; [ - dwl - foot - seatd-test - ]; - - programs.bash.loginShellInit = '' - [ "$(tty)" = "/dev/tty1" ] && - dwl -s 'foot touch /tmp/foot_started' - ''; - - hardware.graphics.enable = true; - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; - services.seatd.enable = true; - }; - - testScript = '' - machine.wait_for_file("/tmp/foot_started") - machine.succeed("test $(seatd-client-pid) = $(pgrep dwl)") +let + seatd-test = pkgs.writeShellApplication { + name = "seatd-client-pid"; + text = '' + journalctl -u seatd --no-pager -b | while read -r line; do + case "$line" in + *"New client connected"*) + line="''${line##*pid: }" + pid="''${line%%,*}" + ;; + *"Opened client"*) + echo "$pid" + exit + esac + done; ''; - } -) + }; +in +{ + name = "seatd"; + meta.maintainers = with lib.maintainers; [ sinanmohd ]; + + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; + services.getty.autologinUser = "alice"; + users.users.alice.extraGroups = [ + "seat" + "wheel" + ]; + + fonts.enableDefaultPackages = true; + environment.systemPackages = with pkgs; [ + dwl + foot + seatd-test + ]; + + programs.bash.loginShellInit = '' + [ "$(tty)" = "/dev/tty1" ] && + dwl -s 'foot touch /tmp/foot_started' + ''; + + hardware.graphics.enable = true; + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + services.seatd.enable = true; + }; + + testScript = '' + machine.wait_for_file("/tmp/foot_started") + machine.succeed("test $(seatd-client-pid) = $(pgrep dwl)") + ''; +} diff --git a/nixos/tests/service-runner.nix b/nixos/tests/service-runner.nix index 42d87c384234..e7a552b9e536 100644 --- a/nixos/tests/service-runner.nix +++ b/nixos/tests/service-runner.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "service-runner"; - meta = with pkgs.lib.maintainers; { - maintainers = [ roberth ]; - }; +{ pkgs, ... }: +{ + name = "service-runner"; + meta = with pkgs.lib.maintainers; { + maintainers = [ roberth ]; + }; - nodes = { - machine = - { pkgs, lib, ... }: - { - services.nginx.enable = true; - services.nginx.virtualHosts.machine.root = pkgs.runCommand "webroot" { } '' - mkdir $out - echo 'yay' >$out/index.html - ''; - systemd.services.nginx.enable = false; - }; + nodes = { + machine = + { pkgs, lib, ... }: + { + services.nginx.enable = true; + services.nginx.virtualHosts.machine.root = pkgs.runCommand "webroot" { } '' + mkdir $out + echo 'yay' >$out/index.html + ''; + systemd.services.nginx.enable = false; + }; - }; + }; - testScript = - { nodes, ... }: - '' - url = "http://localhost/index.html" + testScript = + { nodes, ... }: + '' + url = "http://localhost/index.html" - with subtest("check systemd.services.nginx.runner"): - machine.fail(f"curl {url}") - machine.succeed( - """ - mkdir -p /run/nginx /var/log/nginx /var/cache/nginx - ${nodes.machine.config.systemd.services.nginx.runner} >&2 & - echo $!>my-nginx.pid - """ - ) - machine.wait_for_open_port(80) - machine.succeed(f"curl -f {url}") - machine.succeed("kill -INT $(cat my-nginx.pid)") - machine.wait_for_closed_port(80) - ''; - } -) + with subtest("check systemd.services.nginx.runner"): + machine.fail(f"curl {url}") + machine.succeed( + """ + mkdir -p /run/nginx /var/log/nginx /var/cache/nginx + ${nodes.machine.config.systemd.services.nginx.runner} >&2 & + echo $!>my-nginx.pid + """ + ) + machine.wait_for_open_port(80) + machine.succeed(f"curl -f {url}") + machine.succeed("kill -INT $(cat my-nginx.pid)") + machine.wait_for_closed_port(80) + ''; +} diff --git a/nixos/tests/sfxr-qt.nix b/nixos/tests/sfxr-qt.nix index d3696a576041..f62eeacafbd5 100644 --- a/nixos/tests/sfxr-qt.nix +++ b/nixos/tests/sfxr-qt.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "sfxr-qt"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "sfxr-qt"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.sfxr-qt ]; }; - machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.sfxr-qt ]; - }; + testScript = '' + machine.wait_for_x() + # Add a dummy sound card, or the program won't start + machine.execute("modprobe snd-dummy") - enableOCR = true; + machine.execute("sfxr-qt >&2 &") - testScript = '' - machine.wait_for_x() - # Add a dummy sound card, or the program won't start - machine.execute("modprobe snd-dummy") - - machine.execute("sfxr-qt >&2 &") - - machine.wait_for_window(r"sfxr") - machine.sleep(10) - machine.wait_for_text("requency") - machine.screenshot("screen") - ''; - } -) + machine.wait_for_window(r"sfxr") + machine.sleep(10) + machine.wait_for_text("requency") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/sgt-puzzles.nix b/nixos/tests/sgt-puzzles.nix index e5a92690854b..6096b0ae78d6 100644 --- a/nixos/tests/sgt-puzzles.nix +++ b/nixos/tests/sgt-puzzles.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "sgt-puzzles"; - meta = with pkgs.lib.maintainers; { - maintainers = [ tomfitzhenry ]; +{ pkgs, ... }: +{ + name = "sgt-puzzles"; + meta = with pkgs.lib.maintainers; { + maintainers = [ tomfitzhenry ]; + }; + + nodes.machine = + { ... }: + + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = with pkgs; [ + sgt-puzzles + ]; }; - nodes.machine = - { ... }: + enableOCR = true; - { - imports = [ - ./common/x11.nix - ]; + testScript = + { nodes, ... }: + '' + start_all() + machine.wait_for_x() - services.xserver.enable = true; - environment.systemPackages = with pkgs; [ - sgt-puzzles - ]; - }; + machine.execute("mines >&2 &") - enableOCR = true; - - testScript = - { nodes, ... }: - '' - start_all() - machine.wait_for_x() - - machine.execute("mines >&2 &") - - machine.wait_for_window("Mines") - machine.wait_for_text("Marked") - machine.screenshot("mines") - ''; - } -) + machine.wait_for_window("Mines") + machine.wait_for_text("Marked") + machine.screenshot("mines") + ''; +} diff --git a/nixos/tests/shadow.nix b/nixos/tests/shadow.nix index dba18f127639..585abc599a97 100644 --- a/nixos/tests/shadow.nix +++ b/nixos/tests/shadow.nix @@ -7,174 +7,172 @@ let hashed_yeshash = "$y$j9T$d8Z4EAf8P1SvM/aDFbxMS0$VnTXMp/Hnc7QdCBEaLTq5ZFOAFo2/PM0/xEAFuOE88."; # fnord hashed_sha512crypt = "$6$ymzs8WINZ5wGwQcV$VC2S0cQiX8NVukOLymysTPn4v1zJoJp3NGyhnqyv/dAf4NWZsBWYveQcj6gEJr4ZUjRBRjM0Pj1L8TCQ8hUUp0"; # meow in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "shadow"; - meta = with pkgs.lib.maintainers; { - maintainers = [ nequissimus ]; - }; +{ pkgs, ... }: +{ + name = "shadow"; + meta = with pkgs.lib.maintainers; { + maintainers = [ nequissimus ]; + }; - nodes.shadow = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.shadow ]; + nodes.shadow = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.shadow ]; - users = { - mutableUsers = true; - users.emma = { - isNormalUser = true; - password = password1; - shell = pkgs.bash; - }; - users.layla = { - isNormalUser = true; - password = password2; - shell = pkgs.shadow; - }; - users.ash = { - isNormalUser = true; - password = password4; - shell = pkgs.bash; - }; - users.berta = { - isNormalUser = true; - hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; - shell = pkgs.bash; - }; - users.yesim = { - isNormalUser = true; - hashedPassword = hashed_yeshash; - shell = pkgs.bash; - }; - users.leo = { - isNormalUser = true; - initialHashedPassword = "!"; - hashedPassword = hashed_sha512crypt; # should take precedence over initialHashedPassword - shell = pkgs.bash; - }; + users = { + mutableUsers = true; + users.emma = { + isNormalUser = true; + password = password1; + shell = pkgs.bash; + }; + users.layla = { + isNormalUser = true; + password = password2; + shell = pkgs.shadow; + }; + users.ash = { + isNormalUser = true; + password = password4; + shell = pkgs.bash; + }; + users.berta = { + isNormalUser = true; + hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath; + shell = pkgs.bash; + }; + users.yesim = { + isNormalUser = true; + hashedPassword = hashed_yeshash; + shell = pkgs.bash; + }; + users.leo = { + isNormalUser = true; + initialHashedPassword = "!"; + hashedPassword = hashed_sha512crypt; # should take precedence over initialHashedPassword + shell = pkgs.bash; }; }; + }; - testScript = '' - shadow.wait_for_unit("multi-user.target") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + testScript = '' + shadow.wait_for_unit("multi-user.target") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - with subtest("Normal login"): - shadow.send_key("alt-f2") - shadow.wait_until_succeeds("[ $(fgconsole) = 2 ]") - shadow.wait_for_unit("getty@tty2.service") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty2'") - shadow.wait_until_tty_matches("2", "login: ") - shadow.send_chars("emma\n") - shadow.wait_until_tty_matches("2", "login: emma") - shadow.wait_until_succeeds("pgrep login") - shadow.sleep(2) - shadow.send_chars("${password1}\n") - shadow.send_chars("whoami > /tmp/1\n") - shadow.wait_for_file("/tmp/1") - assert "emma" in shadow.succeed("cat /tmp/1") + with subtest("Normal login"): + shadow.send_key("alt-f2") + shadow.wait_until_succeeds("[ $(fgconsole) = 2 ]") + shadow.wait_for_unit("getty@tty2.service") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty2'") + shadow.wait_until_tty_matches("2", "login: ") + shadow.send_chars("emma\n") + shadow.wait_until_tty_matches("2", "login: emma") + shadow.wait_until_succeeds("pgrep login") + shadow.sleep(2) + shadow.send_chars("${password1}\n") + shadow.send_chars("whoami > /tmp/1\n") + shadow.wait_for_file("/tmp/1") + assert "emma" in shadow.succeed("cat /tmp/1") - with subtest("Switch user"): - shadow.send_chars("su - ash\n") - shadow.sleep(2) - shadow.send_chars("${password4}\n") - shadow.sleep(2) - shadow.send_chars("whoami > /tmp/3\n") - shadow.wait_for_file("/tmp/3") - assert "ash" in shadow.succeed("cat /tmp/3") + with subtest("Switch user"): + shadow.send_chars("su - ash\n") + shadow.sleep(2) + shadow.send_chars("${password4}\n") + shadow.sleep(2) + shadow.send_chars("whoami > /tmp/3\n") + shadow.wait_for_file("/tmp/3") + assert "ash" in shadow.succeed("cat /tmp/3") - with subtest("Change password"): - shadow.send_key("alt-f3") - shadow.wait_until_succeeds("[ $(fgconsole) = 3 ]") - shadow.wait_for_unit("getty@tty3.service") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty3'") - shadow.wait_until_tty_matches("3", "login: ") - shadow.send_chars("emma\n") - shadow.wait_until_tty_matches("3", "login: emma") - shadow.wait_until_succeeds("pgrep login") - shadow.sleep(2) - shadow.send_chars("${password1}\n") - shadow.send_chars("passwd\n") - shadow.sleep(2) - shadow.send_chars("${password1}\n") - shadow.sleep(2) - shadow.send_chars("${password3}\n") - shadow.sleep(2) - shadow.send_chars("${password3}\n") - shadow.sleep(2) - shadow.send_key("alt-f4") - shadow.wait_until_succeeds("[ $(fgconsole) = 4 ]") - shadow.wait_for_unit("getty@tty4.service") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty4'") - shadow.wait_until_tty_matches("4", "login: ") - shadow.send_chars("emma\n") - shadow.wait_until_tty_matches("4", "login: emma") - shadow.wait_until_succeeds("pgrep login") - shadow.sleep(2) - shadow.send_chars("${password1}\n") - shadow.wait_until_tty_matches("4", "Login incorrect") - shadow.wait_until_tty_matches("4", "login:") - shadow.send_chars("emma\n") - shadow.wait_until_tty_matches("4", "login: emma") - shadow.wait_until_succeeds("pgrep login") - shadow.sleep(2) - shadow.send_chars("${password3}\n") - shadow.send_chars("whoami > /tmp/2\n") - shadow.wait_for_file("/tmp/2") - assert "emma" in shadow.succeed("cat /tmp/2") + with subtest("Change password"): + shadow.send_key("alt-f3") + shadow.wait_until_succeeds("[ $(fgconsole) = 3 ]") + shadow.wait_for_unit("getty@tty3.service") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty3'") + shadow.wait_until_tty_matches("3", "login: ") + shadow.send_chars("emma\n") + shadow.wait_until_tty_matches("3", "login: emma") + shadow.wait_until_succeeds("pgrep login") + shadow.sleep(2) + shadow.send_chars("${password1}\n") + shadow.send_chars("passwd\n") + shadow.sleep(2) + shadow.send_chars("${password1}\n") + shadow.sleep(2) + shadow.send_chars("${password3}\n") + shadow.sleep(2) + shadow.send_chars("${password3}\n") + shadow.sleep(2) + shadow.send_key("alt-f4") + shadow.wait_until_succeeds("[ $(fgconsole) = 4 ]") + shadow.wait_for_unit("getty@tty4.service") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty4'") + shadow.wait_until_tty_matches("4", "login: ") + shadow.send_chars("emma\n") + shadow.wait_until_tty_matches("4", "login: emma") + shadow.wait_until_succeeds("pgrep login") + shadow.sleep(2) + shadow.send_chars("${password1}\n") + shadow.wait_until_tty_matches("4", "Login incorrect") + shadow.wait_until_tty_matches("4", "login:") + shadow.send_chars("emma\n") + shadow.wait_until_tty_matches("4", "login: emma") + shadow.wait_until_succeeds("pgrep login") + shadow.sleep(2) + shadow.send_chars("${password3}\n") + shadow.send_chars("whoami > /tmp/2\n") + shadow.wait_for_file("/tmp/2") + assert "emma" in shadow.succeed("cat /tmp/2") - with subtest("Groups"): - assert "foobar" not in shadow.succeed("groups emma") - shadow.succeed("groupadd foobar") - shadow.succeed("usermod -a -G foobar emma") - assert "foobar" in shadow.succeed("groups emma") + with subtest("Groups"): + assert "foobar" not in shadow.succeed("groups emma") + shadow.succeed("groupadd foobar") + shadow.succeed("usermod -a -G foobar emma") + assert "foobar" in shadow.succeed("groups emma") - with subtest("nologin shell"): - shadow.send_key("alt-f5") - shadow.wait_until_succeeds("[ $(fgconsole) = 5 ]") - shadow.wait_for_unit("getty@tty5.service") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty5'") - shadow.wait_until_tty_matches("5", "login: ") - shadow.send_chars("layla\n") - shadow.wait_until_tty_matches("5", "login: layla") - shadow.wait_until_succeeds("pgrep login") - shadow.send_chars("${password2}\n") - shadow.wait_until_tty_matches("5", "login:") + with subtest("nologin shell"): + shadow.send_key("alt-f5") + shadow.wait_until_succeeds("[ $(fgconsole) = 5 ]") + shadow.wait_for_unit("getty@tty5.service") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty5'") + shadow.wait_until_tty_matches("5", "login: ") + shadow.send_chars("layla\n") + shadow.wait_until_tty_matches("5", "login: layla") + shadow.wait_until_succeeds("pgrep login") + shadow.send_chars("${password2}\n") + shadow.wait_until_tty_matches("5", "login:") - with subtest("check alternate password hashes"): - shadow.send_key("alt-f6") - shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]") - for u in ["berta", "yesim"]: - shadow.wait_for_unit("getty@tty6.service") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'") - shadow.wait_until_tty_matches("6", "login: ") - shadow.send_chars(f"{u}\n") - shadow.wait_until_tty_matches("6", f"login: {u}") - shadow.wait_until_succeeds("pgrep login") - shadow.sleep(2) - shadow.send_chars("fnord\n") - shadow.send_chars(f"whoami > /tmp/{u}\n") - shadow.wait_for_file(f"/tmp/{u}") - print(shadow.succeed(f"cat /tmp/{u}")) - assert u in shadow.succeed(f"cat /tmp/{u}") - shadow.send_chars("logout\n") + with subtest("check alternate password hashes"): + shadow.send_key("alt-f6") + shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]") + for u in ["berta", "yesim"]: + shadow.wait_for_unit("getty@tty6.service") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'") + shadow.wait_until_tty_matches("6", "login: ") + shadow.send_chars(f"{u}\n") + shadow.wait_until_tty_matches("6", f"login: {u}") + shadow.wait_until_succeeds("pgrep login") + shadow.sleep(2) + shadow.send_chars("fnord\n") + shadow.send_chars(f"whoami > /tmp/{u}\n") + shadow.wait_for_file(f"/tmp/{u}") + print(shadow.succeed(f"cat /tmp/{u}")) + assert u in shadow.succeed(f"cat /tmp/{u}") + shadow.send_chars("logout\n") - with subtest("Ensure hashedPassword does not get overridden by initialHashedPassword"): - shadow.send_key("alt-f6") - shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]") - shadow.wait_for_unit("getty@tty6.service") - shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'") - shadow.wait_until_tty_matches("6", "login: ") - shadow.send_chars("leo\n") - shadow.wait_until_tty_matches("6", "login: leo") - shadow.wait_until_succeeds("pgrep login") - shadow.sleep(2) - shadow.send_chars("meow\n") - shadow.send_chars("whoami > /tmp/leo\n") - shadow.wait_for_file("/tmp/leo") - assert "leo" in shadow.succeed("cat /tmp/leo") - shadow.send_chars("logout\n") - ''; - } -) + with subtest("Ensure hashedPassword does not get overridden by initialHashedPassword"): + shadow.send_key("alt-f6") + shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]") + shadow.wait_for_unit("getty@tty6.service") + shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'") + shadow.wait_until_tty_matches("6", "login: ") + shadow.send_chars("leo\n") + shadow.wait_until_tty_matches("6", "login: leo") + shadow.wait_until_succeeds("pgrep login") + shadow.sleep(2) + shadow.send_chars("meow\n") + shadow.send_chars("whoami > /tmp/leo\n") + shadow.wait_for_file("/tmp/leo") + assert "leo" in shadow.succeed("cat /tmp/leo") + shadow.send_chars("logout\n") + ''; +} diff --git a/nixos/tests/shattered-pixel-dungeon.nix b/nixos/tests/shattered-pixel-dungeon.nix index dadd9991e29e..1f2aeeea1438 100644 --- a/nixos/tests/shattered-pixel-dungeon.nix +++ b/nixos/tests/shattered-pixel-dungeon.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "shattered-pixel-dungeon"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "shattered-pixel-dungeon"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.shattered-pixel-dungeon ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.shattered-pixel-dungeon ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - machine.execute("shattered-pixel-dungeon >&2 &") - machine.wait_for_window(r"Shattered Pixel Dungeon") - machine.wait_for_text("Enter") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.execute("shattered-pixel-dungeon >&2 &") + machine.wait_for_window(r"Shattered Pixel Dungeon") + machine.wait_for_text("Enter") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/shiori.nix b/nixos/tests/shiori.nix index f25807e900f9..161228747ceb 100644 --- a/nixos/tests/shiori.nix +++ b/nixos/tests/shiori.nix @@ -1,90 +1,88 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "shiori"; - meta.maintainers = with lib.maintainers; [ minijackson ]; +{ + name = "shiori"; + meta.maintainers = with lib.maintainers; [ minijackson ]; - nodes.machine = - { ... }: - { - services.shiori.enable = true; + nodes.machine = + { ... }: + { + services.shiori.enable = true; + }; + + testScript = + let + authJSON = pkgs.writeText "auth.json" ( + builtins.toJSON { + username = "shiori"; + password = "gopher"; + owner = true; + } + ); + + insertBookmark = { + url = "http://example.org"; + title = "Example Bookmark"; }; - testScript = - let - authJSON = pkgs.writeText "auth.json" ( - builtins.toJSON { - username = "shiori"; - password = "gopher"; - owner = true; - } - ); + insertBookmarkJSON = pkgs.writeText "insertBookmark.json" (builtins.toJSON insertBookmark); + in + '' + #import json - insertBookmark = { - url = "http://example.org"; - title = "Example Bookmark"; - }; + machine.wait_for_unit("shiori.service") + machine.wait_for_open_port(8080) + machine.succeed("curl --fail http://localhost:8080/") + machine.succeed("curl --fail --location http://localhost:8080/ | grep -i shiori") - insertBookmarkJSON = pkgs.writeText "insertBookmark.json" (builtins.toJSON insertBookmark); - in - '' - #import json + # The test code below no longer works because the API authentication has changed. - machine.wait_for_unit("shiori.service") - machine.wait_for_open_port(8080) - machine.succeed("curl --fail http://localhost:8080/") - machine.succeed("curl --fail --location http://localhost:8080/ | grep -i shiori") + #with subtest("login"): + # auth_json = machine.succeed( + # "curl --fail --location http://localhost:8080/api/login " + # "-X POST -H 'Content-Type:application/json' -d @${authJSON}" + # ) + # auth_ret = json.loads(auth_json) + # session_id = auth_ret["session"] - # The test code below no longer works because the API authentication has changed. + #with subtest("bookmarks"): + # with subtest("first use no bookmarks"): + # bookmarks_json = machine.succeed( + # ( + # "curl --fail --location http://localhost:8080/api/bookmarks " + # "-H 'X-Session-Id:{}'" + # ).format(session_id) + # ) - #with subtest("login"): - # auth_json = machine.succeed( - # "curl --fail --location http://localhost:8080/api/login " - # "-X POST -H 'Content-Type:application/json' -d @${authJSON}" - # ) - # auth_ret = json.loads(auth_json) - # session_id = auth_ret["session"] + # if json.loads(bookmarks_json)["bookmarks"] != []: + # raise Exception("Shiori have a bookmark on first use") - #with subtest("bookmarks"): - # with subtest("first use no bookmarks"): - # bookmarks_json = machine.succeed( - # ( - # "curl --fail --location http://localhost:8080/api/bookmarks " - # "-H 'X-Session-Id:{}'" - # ).format(session_id) - # ) + # with subtest("insert bookmark"): + # machine.succeed( + # ( + # "curl --fail --location http://localhost:8080/api/bookmarks " + # "-X POST -H 'X-Session-Id:{}' " + # "-H 'Content-Type:application/json' -d @${insertBookmarkJSON}" + # ).format(session_id) + # ) - # if json.loads(bookmarks_json)["bookmarks"] != []: - # raise Exception("Shiori have a bookmark on first use") + # with subtest("get inserted bookmark"): + # bookmarks_json = machine.succeed( + # ( + # "curl --fail --location http://localhost:8080/api/bookmarks " + # "-H 'X-Session-Id:{}'" + # ).format(session_id) + # ) - # with subtest("insert bookmark"): - # machine.succeed( - # ( - # "curl --fail --location http://localhost:8080/api/bookmarks " - # "-X POST -H 'X-Session-Id:{}' " - # "-H 'Content-Type:application/json' -d @${insertBookmarkJSON}" - # ).format(session_id) - # ) + # bookmarks = json.loads(bookmarks_json)["bookmarks"] + # if len(bookmarks) != 1: + # raise Exception("Shiori didn't save the bookmark") - # with subtest("get inserted bookmark"): - # bookmarks_json = machine.succeed( - # ( - # "curl --fail --location http://localhost:8080/api/bookmarks " - # "-H 'X-Session-Id:{}'" - # ).format(session_id) - # ) - - # bookmarks = json.loads(bookmarks_json)["bookmarks"] - # if len(bookmarks) != 1: - # raise Exception("Shiori didn't save the bookmark") - - # bookmark = bookmarks[0] - # if ( - # bookmark["url"] != "${insertBookmark.url}" - # or bookmark["title"] != "${insertBookmark.title}" - # ): - # raise Exception("Inserted bookmark doesn't have same URL or title") - ''; - } -) + # bookmark = bookmarks[0] + # if ( + # bookmark["url"] != "${insertBookmark.url}" + # or bookmark["title"] != "${insertBookmark.title}" + # ): + # raise Exception("Inserted bookmark doesn't have same URL or title") + ''; +} diff --git a/nixos/tests/silverbullet.nix b/nixos/tests/silverbullet.nix index a14e59cccf48..af2c6d5ec2a7 100644 --- a/nixos/tests/silverbullet.nix +++ b/nixos/tests/silverbullet.nix @@ -1,59 +1,57 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "silverbullet"; - meta.maintainers = with lib.maintainers; [ aorith ]; +{ lib, ... }: +{ + name = "silverbullet"; + meta.maintainers = with lib.maintainers; [ aorith ]; - nodes.simple = - { ... }: - { - services.silverbullet.enable = true; + nodes.simple = + { ... }: + { + services.silverbullet.enable = true; + }; + + nodes.configured = + { pkgs, ... }: + { + users.users.test.isNormalUser = true; + users.groups.test = { }; + + services.silverbullet = { + enable = true; + package = pkgs.silverbullet; + listenPort = 3001; + listenAddress = "localhost"; + spaceDir = "/home/test/silverbullet"; + user = "test"; + group = "test"; + envFile = pkgs.writeText "silverbullet.env" '' + SB_USER=user:password + SB_AUTH_TOKEN=test + ''; + extraArgs = [ + "--reindex" + "--db /home/test/silverbullet/custom.db" + ]; }; + }; - nodes.configured = - { pkgs, ... }: - { - users.users.test.isNormalUser = true; - users.groups.test = { }; + testScript = + { nodes, ... }: + '' + PORT = ${builtins.toString nodes.simple.services.silverbullet.listenPort} + ADDRESS = "${nodes.simple.services.silverbullet.listenAddress}" + SPACEDIR = "${nodes.simple.services.silverbullet.spaceDir}" + simple.wait_for_unit("silverbullet.service") + simple.wait_for_open_port(PORT) + simple.succeed(f"curl --max-time 5 -s -v -o /dev/null --fail http://{ADDRESS}:{PORT}/") + simple.succeed(f"test -d '{SPACEDIR}'") - services.silverbullet = { - enable = true; - package = pkgs.silverbullet; - listenPort = 3001; - listenAddress = "localhost"; - spaceDir = "/home/test/silverbullet"; - user = "test"; - group = "test"; - envFile = pkgs.writeText "silverbullet.env" '' - SB_USER=user:password - SB_AUTH_TOKEN=test - ''; - extraArgs = [ - "--reindex" - "--db /home/test/silverbullet/custom.db" - ]; - }; - }; - - testScript = - { nodes, ... }: - '' - PORT = ${builtins.toString nodes.simple.services.silverbullet.listenPort} - ADDRESS = "${nodes.simple.services.silverbullet.listenAddress}" - SPACEDIR = "${nodes.simple.services.silverbullet.spaceDir}" - simple.wait_for_unit("silverbullet.service") - simple.wait_for_open_port(PORT) - simple.succeed(f"curl --max-time 5 -s -v -o /dev/null --fail http://{ADDRESS}:{PORT}/") - simple.succeed(f"test -d '{SPACEDIR}'") - - PORT = ${builtins.toString nodes.configured.services.silverbullet.listenPort} - ADDRESS = "${nodes.configured.services.silverbullet.listenAddress}" - SPACEDIR = "${nodes.configured.services.silverbullet.spaceDir}" - configured.wait_for_unit("silverbullet.service") - configured.wait_for_open_port(PORT) - assert int(configured.succeed(f"curl --max-time 5 -s -o /dev/null -w '%{{http_code}}' -XPUT -d 'test' --fail http://{ADDRESS}:{PORT}/test.md -H'Authorization: Bearer test'")) == 200 - assert int(configured.fail(f"curl --max-time 5 -s -o /dev/null -w '%{{http_code}}' -XPUT -d 'test' --fail http://{ADDRESS}:{PORT}/test.md -H'Authorization: Bearer wrong'")) == 401 - configured.succeed(f"test -d '{SPACEDIR}'") - ''; - } -) + PORT = ${builtins.toString nodes.configured.services.silverbullet.listenPort} + ADDRESS = "${nodes.configured.services.silverbullet.listenAddress}" + SPACEDIR = "${nodes.configured.services.silverbullet.spaceDir}" + configured.wait_for_unit("silverbullet.service") + configured.wait_for_open_port(PORT) + assert int(configured.succeed(f"curl --max-time 5 -s -o /dev/null -w '%{{http_code}}' -XPUT -d 'test' --fail http://{ADDRESS}:{PORT}/test.md -H'Authorization: Bearer test'")) == 200 + assert int(configured.fail(f"curl --max-time 5 -s -o /dev/null -w '%{{http_code}}' -XPUT -d 'test' --fail http://{ADDRESS}:{PORT}/test.md -H'Authorization: Bearer wrong'")) == 401 + configured.succeed(f"test -d '{SPACEDIR}'") + ''; +} diff --git a/nixos/tests/simple.nix b/nixos/tests/simple.nix index ab9cf1d9768f..3695f3ff9038 100644 --- a/nixos/tests/simple.nix +++ b/nixos/tests/simple.nix @@ -1,21 +1,19 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "simple"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; +{ pkgs, ... }: +{ + name = "simple"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + + nodes.machine = + { ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; }; - nodes.machine = - { ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - }; - - testScript = '' - start_all() - machine.wait_for_unit("multi-user.target") - machine.shutdown() - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("multi-user.target") + machine.shutdown() + ''; +} diff --git a/nixos/tests/sing-box.nix b/nixos/tests/sing-box.nix index a8a287586af2..6d105253f29c 100644 --- a/nixos/tests/sing-box.nix +++ b/nixos/tests/sing-box.nix @@ -1,548 +1,546 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - wg-keys = import ./wireguard/snakeoil-keys.nix; +{ lib, pkgs, ... }: +let + wg-keys = import ./wireguard/snakeoil-keys.nix; - target_host = "acme.test"; - server_host = "sing-box.test"; + target_host = "acme.test"; + server_host = "sing-box.test"; - hosts = { - "${target_host}" = "1.1.1.1"; - "${server_host}" = "1.1.1.2"; - }; - hostsEntries = lib.mapAttrs' (k: v: { - name = v; - value = lib.singleton k; - }) hosts; + hosts = { + "${target_host}" = "1.1.1.1"; + "${server_host}" = "1.1.1.2"; + }; + hostsEntries = lib.mapAttrs' (k: v: { + name = v; + value = lib.singleton k; + }) hosts; - vmessPort = 1080; - vmessUUID = "bf000d23-0752-40b4-affe-68f7707a9661"; - vmessInbound = { - type = "vmess"; - tag = "inbound:vmess"; - listen = "0.0.0.0"; - listen_port = vmessPort; - users = [ - { - name = "sekai"; - uuid = vmessUUID; - alterId = 0; - } - ]; - }; - vmessOutbound = { - type = "vmess"; - tag = "outbound:vmess"; - server = server_host; - server_port = vmessPort; - uuid = vmessUUID; - security = "auto"; - alter_id = 0; - }; + vmessPort = 1080; + vmessUUID = "bf000d23-0752-40b4-affe-68f7707a9661"; + vmessInbound = { + type = "vmess"; + tag = "inbound:vmess"; + listen = "0.0.0.0"; + listen_port = vmessPort; + users = [ + { + name = "sekai"; + uuid = vmessUUID; + alterId = 0; + } + ]; + }; + vmessOutbound = { + type = "vmess"; + tag = "outbound:vmess"; + server = server_host; + server_port = vmessPort; + uuid = vmessUUID; + security = "auto"; + alter_id = 0; + }; - tunInbound = { - type = "tun"; - tag = "inbound:tun"; - interface_name = "tun0"; - address = [ - "172.16.0.1/30" - "fd00::1/126" - ]; - auto_route = true; - iproute2_table_index = 2024; - iproute2_rule_index = 9001; - route_address = [ - "${hosts."${target_host}"}/32" - ]; - route_exclude_address = [ - "${hosts."${server_host}"}/32" - ]; - strict_route = false; - sniff = true; - sniff_override_destination = false; - }; + tunInbound = { + type = "tun"; + tag = "inbound:tun"; + interface_name = "tun0"; + address = [ + "172.16.0.1/30" + "fd00::1/126" + ]; + auto_route = true; + iproute2_table_index = 2024; + iproute2_rule_index = 9001; + route_address = [ + "${hosts."${target_host}"}/32" + ]; + route_exclude_address = [ + "${hosts."${server_host}"}/32" + ]; + strict_route = false; + sniff = true; + sniff_override_destination = false; + }; - tproxyPort = 1081; - tproxyPost = pkgs.writeShellApplication { - name = "exe"; - runtimeInputs = with pkgs; [ - iproute2 - iptables - ]; - text = '' - ip route add local default dev lo table 100 - ip rule add fwmark 1 table 100 + tproxyPort = 1081; + tproxyPost = pkgs.writeShellApplication { + name = "exe"; + runtimeInputs = with pkgs; [ + iproute2 + iptables + ]; + text = '' + ip route add local default dev lo table 100 + ip rule add fwmark 1 table 100 - iptables -t mangle -N SING_BOX - iptables -t mangle -A SING_BOX -d 100.64.0.0/10 -j RETURN - iptables -t mangle -A SING_BOX -d 127.0.0.0/8 -j RETURN - iptables -t mangle -A SING_BOX -d 169.254.0.0/16 -j RETURN - iptables -t mangle -A SING_BOX -d 172.16.0.0/12 -j RETURN - iptables -t mangle -A SING_BOX -d 192.0.0.0/24 -j RETURN - iptables -t mangle -A SING_BOX -d 224.0.0.0/4 -j RETURN - iptables -t mangle -A SING_BOX -d 240.0.0.0/4 -j RETURN - iptables -t mangle -A SING_BOX -d 255.255.255.255/32 -j RETURN + iptables -t mangle -N SING_BOX + iptables -t mangle -A SING_BOX -d 100.64.0.0/10 -j RETURN + iptables -t mangle -A SING_BOX -d 127.0.0.0/8 -j RETURN + iptables -t mangle -A SING_BOX -d 169.254.0.0/16 -j RETURN + iptables -t mangle -A SING_BOX -d 172.16.0.0/12 -j RETURN + iptables -t mangle -A SING_BOX -d 192.0.0.0/24 -j RETURN + iptables -t mangle -A SING_BOX -d 224.0.0.0/4 -j RETURN + iptables -t mangle -A SING_BOX -d 240.0.0.0/4 -j RETURN + iptables -t mangle -A SING_BOX -d 255.255.255.255/32 -j RETURN - iptables -t mangle -A SING_BOX -d ${hosts."${server_host}"}/32 -p tcp -j RETURN - iptables -t mangle -A SING_BOX -d ${hosts."${server_host}"}/32 -p udp -j RETURN + iptables -t mangle -A SING_BOX -d ${hosts."${server_host}"}/32 -p tcp -j RETURN + iptables -t mangle -A SING_BOX -d ${hosts."${server_host}"}/32 -p udp -j RETURN - iptables -t mangle -A SING_BOX -d ${hosts."${target_host}"}/32 -p tcp -j TPROXY --on-port ${toString tproxyPort} --tproxy-mark 1 - iptables -t mangle -A SING_BOX -d ${hosts."${target_host}"}/32 -p udp -j TPROXY --on-port ${toString tproxyPort} --tproxy-mark 1 - iptables -t mangle -A PREROUTING -j SING_BOX + iptables -t mangle -A SING_BOX -d ${hosts."${target_host}"}/32 -p tcp -j TPROXY --on-port ${toString tproxyPort} --tproxy-mark 1 + iptables -t mangle -A SING_BOX -d ${hosts."${target_host}"}/32 -p udp -j TPROXY --on-port ${toString tproxyPort} --tproxy-mark 1 + iptables -t mangle -A PREROUTING -j SING_BOX - iptables -t mangle -N SING_BOX_SELF - iptables -t mangle -A SING_BOX_SELF -d 100.64.0.0/10 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 127.0.0.0/8 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 169.254.0.0/16 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 172.16.0.0/12 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 192.0.0.0/24 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 224.0.0.0/4 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 240.0.0.0/4 -j RETURN - iptables -t mangle -A SING_BOX_SELF -d 255.255.255.255/32 -j RETURN - iptables -t mangle -A SING_BOX_SELF -j RETURN -m mark --mark 1234 + iptables -t mangle -N SING_BOX_SELF + iptables -t mangle -A SING_BOX_SELF -d 100.64.0.0/10 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 127.0.0.0/8 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 169.254.0.0/16 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 172.16.0.0/12 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 192.0.0.0/24 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 224.0.0.0/4 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 240.0.0.0/4 -j RETURN + iptables -t mangle -A SING_BOX_SELF -d 255.255.255.255/32 -j RETURN + iptables -t mangle -A SING_BOX_SELF -j RETURN -m mark --mark 1234 - iptables -t mangle -A SING_BOX_SELF -d ${hosts."${server_host}"}/32 -p tcp -j RETURN - iptables -t mangle -A SING_BOX_SELF -d ${hosts."${server_host}"}/32 -p udp -j RETURN - iptables -t mangle -A SING_BOX_SELF -p tcp -j MARK --set-mark 1 - iptables -t mangle -A SING_BOX_SELF -p udp -j MARK --set-mark 1 - iptables -t mangle -A OUTPUT -j SING_BOX_SELF - ''; - }; - in - { + iptables -t mangle -A SING_BOX_SELF -d ${hosts."${server_host}"}/32 -p tcp -j RETURN + iptables -t mangle -A SING_BOX_SELF -d ${hosts."${server_host}"}/32 -p udp -j RETURN + iptables -t mangle -A SING_BOX_SELF -p tcp -j MARK --set-mark 1 + iptables -t mangle -A SING_BOX_SELF -p udp -j MARK --set-mark 1 + iptables -t mangle -A OUTPUT -j SING_BOX_SELF + ''; + }; +in +{ - name = "sing-box"; + name = "sing-box"; - meta = { - maintainers = with lib.maintainers; [ nickcao ]; - }; + meta = { + maintainers = with lib.maintainers; [ nickcao ]; + }; - nodes = { - target = - { pkgs, ... }: - { - networking = { - firewall.enable = false; - hosts = hostsEntries; - useDHCP = false; - interfaces.eth1 = { - ipv4.addresses = [ - { - address = hosts."${target_host}"; - prefixLength = 24; - } - ]; - }; - }; - - services.dnsmasq.enable = true; - - services.nginx = { - enable = true; - package = pkgs.nginxQuic; - - virtualHosts."${target_host}" = { - onlySSL = true; - sslCertificate = ./common/acme/server/acme.test.cert.pem; - sslCertificateKey = ./common/acme/server/acme.test.key.pem; - http2 = true; - http3 = true; - http3_hq = false; - quic = true; - reuseport = true; - locations."/" = { - extraConfig = '' - default_type text/plain; - return 200 "$server_protocol $remote_addr"; - allow ${hosts."${server_host}"}/32; - deny all; - ''; - }; - }; - }; - }; - - server = - { pkgs, ... }: - { - boot.kernel.sysctl = { - "net.ipv4.conf.all.forwarding" = 1; - }; - - networking = { - firewall.enable = false; - hosts = hostsEntries; - useDHCP = false; - interfaces.eth1 = { - ipv4.addresses = [ - { - address = hosts."${server_host}"; - prefixLength = 24; - } - ]; - }; - }; - - systemd.network.wait-online.ignoredInterfaces = [ "wg0" ]; - - networking.wg-quick.interfaces.wg0 = { - address = [ - "10.23.42.1/24" + nodes = { + target = + { pkgs, ... }: + { + networking = { + firewall.enable = false; + hosts = hostsEntries; + useDHCP = false; + interfaces.eth1 = { + ipv4.addresses = [ + { + address = hosts."${target_host}"; + prefixLength = 24; + } ]; - listenPort = 2408; - mtu = 1500; + }; + }; - inherit (wg-keys.peer0) privateKey; + services.dnsmasq.enable = true; - peers = lib.singleton { - allowedIPs = [ - "10.23.42.2/32" - ]; + services.nginx = { + enable = true; + package = pkgs.nginxQuic; - inherit (wg-keys.peer1) publicKey; + virtualHosts."${target_host}" = { + onlySSL = true; + sslCertificate = ./common/acme/server/acme.test.cert.pem; + sslCertificateKey = ./common/acme/server/acme.test.key.pem; + http2 = true; + http3 = true; + http3_hq = false; + quic = true; + reuseport = true; + locations."/" = { + extraConfig = '' + default_type text/plain; + return 200 "$server_protocol $remote_addr"; + allow ${hosts."${server_host}"}/32; + deny all; + ''; }; + }; + }; + }; - postUp = '' - ${pkgs.iptables}/bin/iptables -A FORWARD -i wg0 -j ACCEPT - ${pkgs.iptables}/bin/iptables -t nat -A POSTROUTING -s 10.23.42.0/24 -o eth1 -j MASQUERADE - ''; + server = + { pkgs, ... }: + { + boot.kernel.sysctl = { + "net.ipv4.conf.all.forwarding" = 1; + }; + + networking = { + firewall.enable = false; + hosts = hostsEntries; + useDHCP = false; + interfaces.eth1 = { + ipv4.addresses = [ + { + address = hosts."${server_host}"; + prefixLength = 24; + } + ]; + }; + }; + + systemd.network.wait-online.ignoredInterfaces = [ "wg0" ]; + + networking.wg-quick.interfaces.wg0 = { + address = [ + "10.23.42.1/24" + ]; + listenPort = 2408; + mtu = 1500; + + inherit (wg-keys.peer0) privateKey; + + peers = lib.singleton { + allowedIPs = [ + "10.23.42.2/32" + ]; + + inherit (wg-keys.peer1) publicKey; }; - services.sing-box = { - enable = true; - settings = { - inbounds = [ - vmessInbound - ]; - outbounds = [ + postUp = '' + ${pkgs.iptables}/bin/iptables -A FORWARD -i wg0 -j ACCEPT + ${pkgs.iptables}/bin/iptables -t nat -A POSTROUTING -s 10.23.42.0/24 -o eth1 -j MASQUERADE + ''; + }; + + services.sing-box = { + enable = true; + settings = { + inbounds = [ + vmessInbound + ]; + outbounds = [ + { + type = "direct"; + tag = "outbound:direct"; + } + ]; + }; + }; + }; + + tun = + { pkgs, ... }: + { + networking = { + firewall.enable = false; + hosts = hostsEntries; + useDHCP = false; + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "1.1.1.3"; + prefixLength = 24; + } + ]; + }; + }; + + security.pki.certificates = [ + (builtins.readFile ./common/acme/server/ca.cert.pem) + ]; + + environment.systemPackages = [ + pkgs.curlHTTP3 + pkgs.iproute2 + ]; + + services.sing-box = { + enable = true; + settings = { + inbounds = [ + tunInbound + ]; + outbounds = [ + { + type = "block"; + tag = "outbound:block"; + } + { + type = "direct"; + tag = "outbound:direct"; + } + vmessOutbound + ]; + route = { + final = "outbound:block"; + rules = [ { - type = "direct"; - tag = "outbound:direct"; + inbound = [ + "inbound:tun" + ]; + outbound = "outbound:vmess"; } ]; }; }; }; + }; - tun = - { pkgs, ... }: - { - networking = { - firewall.enable = false; - hosts = hostsEntries; - useDHCP = false; - interfaces.eth1 = { - ipv4.addresses = [ + wireguard = + { pkgs, ... }: + { + networking = { + firewall.enable = false; + hosts = hostsEntries; + useDHCP = false; + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "1.1.1.4"; + prefixLength = 24; + } + ]; + }; + }; + + security.pki.certificates = [ + (builtins.readFile ./common/acme/server/ca.cert.pem) + ]; + + environment.systemPackages = [ + pkgs.curlHTTP3 + pkgs.iproute2 + ]; + + services.sing-box = { + enable = true; + settings = { + outbounds = [ + { + type = "block"; + tag = "outbound:block"; + } + { + type = "direct"; + tag = "outbound:direct"; + } + { + detour = "outbound:direct"; + type = "wireguard"; + tag = "outbound:wireguard"; + interface_name = "wg0"; + local_address = [ "10.23.42.2/32" ]; + mtu = 1280; + private_key = wg-keys.peer1.privateKey; + peer_public_key = wg-keys.peer0.publicKey; + server = server_host; + server_port = 2408; + system_interface = true; + } + ]; + route = { + final = "outbound:block"; + }; + }; + }; + }; + + tproxy = + { pkgs, ... }: + { + networking = { + firewall.enable = false; + hosts = hostsEntries; + useDHCP = false; + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "1.1.1.5"; + prefixLength = 24; + } + ]; + }; + }; + + security.pki.certificates = [ + (builtins.readFile ./common/acme/server/ca.cert.pem) + ]; + + environment.systemPackages = [ pkgs.curlHTTP3 ]; + + systemd.services.sing-box.serviceConfig.ExecStartPost = [ + "+${tproxyPost}/bin/exe" + ]; + + services.sing-box = { + enable = true; + settings = { + inbounds = [ + { + tag = "inbound:tproxy"; + type = "tproxy"; + listen = "0.0.0.0"; + listen_port = tproxyPort; + udp_fragment = true; + sniff = true; + sniff_override_destination = false; + } + ]; + outbounds = [ + { + type = "block"; + tag = "outbound:block"; + } + { + type = "direct"; + tag = "outbound:direct"; + } + vmessOutbound + ]; + route = { + final = "outbound:block"; + rules = [ { - address = "1.1.1.3"; - prefixLength = 24; + inbound = [ + "inbound:tproxy" + ]; + outbound = "outbound:vmess"; } ]; }; }; + }; + }; - security.pki.certificates = [ - (builtins.readFile ./common/acme/server/ca.cert.pem) - ]; + fakeip = + { pkgs, ... }: + { + networking = { + firewall.enable = false; + hosts = hostsEntries; + useDHCP = false; + interfaces.eth1 = { + ipv4.addresses = [ + { + address = "1.1.1.6"; + prefixLength = 24; + } + ]; + }; + }; - environment.systemPackages = [ - pkgs.curlHTTP3 - pkgs.iproute2 - ]; + environment.systemPackages = [ pkgs.dnsutils ]; - services.sing-box = { - enable = true; - settings = { - inbounds = [ - tunInbound - ]; - outbounds = [ - { - type = "block"; - tag = "outbound:block"; - } - { - type = "direct"; - tag = "outbound:direct"; - } - vmessOutbound - ]; - route = { - final = "outbound:block"; - rules = [ - { - inbound = [ - "inbound:tun" - ]; - outbound = "outbound:vmess"; - } - ]; + services.sing-box = { + enable = true; + settings = { + dns = { + final = "dns:default"; + independent_cache = true; + fakeip = { + enabled = true; + "inet4_range" = "198.18.0.0/16"; }; - }; - }; - }; - - wireguard = - { pkgs, ... }: - { - networking = { - firewall.enable = false; - hosts = hostsEntries; - useDHCP = false; - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "1.1.1.4"; - prefixLength = 24; - } - ]; - }; - }; - - security.pki.certificates = [ - (builtins.readFile ./common/acme/server/ca.cert.pem) - ]; - - environment.systemPackages = [ - pkgs.curlHTTP3 - pkgs.iproute2 - ]; - - services.sing-box = { - enable = true; - settings = { - outbounds = [ - { - type = "block"; - tag = "outbound:block"; - } - { - type = "direct"; - tag = "outbound:direct"; - } + servers = [ { detour = "outbound:direct"; - type = "wireguard"; - tag = "outbound:wireguard"; - interface_name = "wg0"; - local_address = [ "10.23.42.2/32" ]; - mtu = 1280; - private_key = wg-keys.peer1.privateKey; - peer_public_key = wg-keys.peer0.publicKey; - server = server_host; - server_port = 2408; - system_interface = true; + tag = "dns:default"; + address = hosts."${target_host}"; + } + { + tag = "dns:fakeip"; + address = "fakeip"; } ]; - route = { - final = "outbound:block"; - }; - }; - }; - }; - - tproxy = - { pkgs, ... }: - { - networking = { - firewall.enable = false; - hosts = hostsEntries; - useDHCP = false; - interfaces.eth1 = { - ipv4.addresses = [ + rules = [ { - address = "1.1.1.5"; - prefixLength = 24; + outbound = [ "any" ]; + server = "dns:default"; + } + { + query_type = [ + "A" + "AAAA" + ]; + server = "dns:fakeip"; + } ]; }; - }; - - security.pki.certificates = [ - (builtins.readFile ./common/acme/server/ca.cert.pem) - ]; - - environment.systemPackages = [ pkgs.curlHTTP3 ]; - - systemd.services.sing-box.serviceConfig.ExecStartPost = [ - "+${tproxyPost}/bin/exe" - ]; - - services.sing-box = { - enable = true; - settings = { - inbounds = [ + inbounds = [ + tunInbound + ]; + outbounds = [ + { + type = "block"; + tag = "outbound:block"; + } + { + type = "direct"; + tag = "outbound:direct"; + } + { + type = "dns"; + tag = "outbound:dns"; + } + ]; + route = { + final = "outbound:direct"; + rules = [ { - tag = "inbound:tproxy"; - type = "tproxy"; - listen = "0.0.0.0"; - listen_port = tproxyPort; - udp_fragment = true; - sniff = true; - sniff_override_destination = false; + protocol = "dns"; + outbound = "outbound:dns"; } ]; - outbounds = [ - { - type = "block"; - tag = "outbound:block"; - } - { - type = "direct"; - tag = "outbound:direct"; - } - vmessOutbound - ]; - route = { - final = "outbound:block"; - rules = [ - { - inbound = [ - "inbound:tproxy" - ]; - outbound = "outbound:vmess"; - } - ]; - }; - }; - }; - }; - - fakeip = - { pkgs, ... }: - { - networking = { - firewall.enable = false; - hosts = hostsEntries; - useDHCP = false; - interfaces.eth1 = { - ipv4.addresses = [ - { - address = "1.1.1.6"; - prefixLength = 24; - } - ]; - }; - }; - - environment.systemPackages = [ pkgs.dnsutils ]; - - services.sing-box = { - enable = true; - settings = { - dns = { - final = "dns:default"; - independent_cache = true; - fakeip = { - enabled = true; - "inet4_range" = "198.18.0.0/16"; - }; - servers = [ - { - detour = "outbound:direct"; - tag = "dns:default"; - address = hosts."${target_host}"; - } - { - tag = "dns:fakeip"; - address = "fakeip"; - } - ]; - rules = [ - { - outbound = [ "any" ]; - server = "dns:default"; - } - { - query_type = [ - "A" - "AAAA" - ]; - server = "dns:fakeip"; - - } - ]; - }; - inbounds = [ - tunInbound - ]; - outbounds = [ - { - type = "block"; - tag = "outbound:block"; - } - { - type = "direct"; - tag = "outbound:direct"; - } - { - type = "dns"; - tag = "outbound:dns"; - } - ]; - route = { - final = "outbound:direct"; - rules = [ - { - protocol = "dns"; - outbound = "outbound:dns"; - } - ]; - }; }; }; }; - }; + }; + }; - testScript = '' - target.wait_for_unit("nginx.service") - target.wait_for_open_port(443) - target.wait_for_unit("dnsmasq.service") - target.wait_for_open_port(53) + testScript = '' + target.wait_for_unit("nginx.service") + target.wait_for_open_port(443) + target.wait_for_unit("dnsmasq.service") + target.wait_for_open_port(53) - server.wait_for_unit("sing-box.service") - server.wait_for_open_port(1080) - server.wait_for_unit("wg-quick-wg0.service") - server.wait_for_file("/sys/class/net/wg0") + server.wait_for_unit("sing-box.service") + server.wait_for_open_port(1080) + server.wait_for_unit("wg-quick-wg0.service") + server.wait_for_file("/sys/class/net/wg0") - def test_curl(machine, extra_args=""): - assert ( - machine.succeed(f"curl --fail --max-time 10 --http2 https://${target_host} {extra_args}") - == "HTTP/2.0 ${hosts.${server_host}}" - ) - assert ( - machine.succeed(f"curl --fail --max-time 10 --http3-only https://${target_host} {extra_args}") - == "HTTP/3.0 ${hosts.${server_host}}" - ) + def test_curl(machine, extra_args=""): + assert ( + machine.succeed(f"curl --fail --max-time 10 --http2 https://${target_host} {extra_args}") + == "HTTP/2.0 ${hosts.${server_host}}" + ) + assert ( + machine.succeed(f"curl --fail --max-time 10 --http3-only https://${target_host} {extra_args}") + == "HTTP/3.0 ${hosts.${server_host}}" + ) - with subtest("tun"): - tun.wait_for_unit("sing-box.service") - tun.wait_for_unit("sys-devices-virtual-net-${tunInbound.interface_name}.device") - tun.wait_until_succeeds("ip route get ${hosts."${target_host}"} | grep 'dev ${tunInbound.interface_name}'") - tun.succeed("ip addr show ${tunInbound.interface_name}") - tun.succeed("ip route show table ${toString tunInbound.iproute2_table_index} | grep ${tunInbound.interface_name}") - assert ( - tun.succeed("ip rule list table ${toString tunInbound.iproute2_table_index} | sort | head -1 | awk -F: '{print $1}' | tr -d '\n'") - == "${toString tunInbound.iproute2_rule_index}" - ) - test_curl(tun) + with subtest("tun"): + tun.wait_for_unit("sing-box.service") + tun.wait_for_unit("sys-devices-virtual-net-${tunInbound.interface_name}.device") + tun.wait_until_succeeds("ip route get ${hosts."${target_host}"} | grep 'dev ${tunInbound.interface_name}'") + tun.succeed("ip addr show ${tunInbound.interface_name}") + tun.succeed("ip route show table ${toString tunInbound.iproute2_table_index} | grep ${tunInbound.interface_name}") + assert ( + tun.succeed("ip rule list table ${toString tunInbound.iproute2_table_index} | sort | head -1 | awk -F: '{print $1}' | tr -d '\n'") + == "${toString tunInbound.iproute2_rule_index}" + ) + test_curl(tun) - with subtest("wireguard"): - wireguard.wait_for_unit("sing-box.service") - wireguard.wait_for_unit("sys-devices-virtual-net-wg0.device") - wireguard.succeed("ip addr show wg0") - test_curl(wireguard, "--interface wg0") + with subtest("wireguard"): + wireguard.wait_for_unit("sing-box.service") + wireguard.wait_for_unit("sys-devices-virtual-net-wg0.device") + wireguard.succeed("ip addr show wg0") + test_curl(wireguard, "--interface wg0") - with subtest("tproxy"): - tproxy.wait_for_unit("sing-box.service") - test_curl(tproxy) + with subtest("tproxy"): + tproxy.wait_for_unit("sing-box.service") + test_curl(tproxy) - with subtest("fakeip"): - fakeip.wait_for_unit("sing-box.service") - fakeip.wait_for_unit("sys-devices-virtual-net-${tunInbound.interface_name}.device") - fakeip.wait_until_succeeds("ip route get ${hosts."${target_host}"} | grep 'dev ${tunInbound.interface_name}'") - fakeip.succeed("dig +short A ${target_host} @${target_host} | grep '^198.18.'") - ''; + with subtest("fakeip"): + fakeip.wait_for_unit("sing-box.service") + fakeip.wait_for_unit("sys-devices-virtual-net-${tunInbound.interface_name}.device") + fakeip.wait_until_succeeds("ip route get ${hosts."${target_host}"} | grep 'dev ${tunInbound.interface_name}'") + fakeip.succeed("dig +short A ${target_host} @${target_host} | grep '^198.18.'") + ''; - } -) +} diff --git a/nixos/tests/slimserver.nix b/nixos/tests/slimserver.nix index b9821a7b1c33..2c18d0183319 100644 --- a/nixos/tests/slimserver.nix +++ b/nixos/tests/slimserver.nix @@ -1,49 +1,47 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "slimserver"; - meta.maintainers = with pkgs.lib.maintainers; [ adamcstephens ]; +{ pkgs, ... }: +{ + name = "slimserver"; + meta.maintainers = with pkgs.lib.maintainers; [ adamcstephens ]; - nodes.machine = - { ... }: - { - services.slimserver.enable = true; - services.squeezelite = { - enable = true; - extraArguments = "-s 127.0.0.1 -d slimproto=info"; - }; - boot.kernelModules = [ "snd-dummy" ]; + nodes.machine = + { ... }: + { + services.slimserver.enable = true; + services.squeezelite = { + enable = true; + extraArguments = "-s 127.0.0.1 -d slimproto=info"; }; + boot.kernelModules = [ "snd-dummy" ]; + }; - testScript = # python - '' - import json - rpc_get_player = { - "id": 1, - "method": "slim.request", - "params":[0,["player", "id", "0", "?"]] - } + testScript = # python + '' + import json + rpc_get_player = { + "id": 1, + "method": "slim.request", + "params":[0,["player", "id", "0", "?"]] + } - with subtest("slimserver is started"): - machine.wait_for_unit("slimserver.service") - # give slimserver a moment to report errors - machine.sleep(2) - machine.wait_until_succeeds("journalctl -u slimserver.service | grep 'Starting Lyrion Music'", timeout=120) - machine.wait_for_open_port(9000) + with subtest("slimserver is started"): + machine.wait_for_unit("slimserver.service") + # give slimserver a moment to report errors + machine.sleep(2) + machine.wait_until_succeeds("journalctl -u slimserver.service | grep 'Starting Lyrion Music'", timeout=120) + machine.wait_for_open_port(9000) - with subtest('slimserver module errors are not reported'): - machine.fail("journalctl -u slimserver.service | grep 'throw_exception'") - machine.fail("journalctl -u slimserver.service | grep 'not installed'") - machine.fail("journalctl -u slimserver.service | grep 'not found'") - machine.fail("journalctl -u slimserver.service | grep 'The following CPAN modules were found but cannot work with Logitech Media Server'") - machine.fail("journalctl -u slimserver.service | grep 'please use the buildme.sh'") + with subtest('slimserver module errors are not reported'): + machine.fail("journalctl -u slimserver.service | grep 'throw_exception'") + machine.fail("journalctl -u slimserver.service | grep 'not installed'") + machine.fail("journalctl -u slimserver.service | grep 'not found'") + machine.fail("journalctl -u slimserver.service | grep 'The following CPAN modules were found but cannot work with Logitech Media Server'") + machine.fail("journalctl -u slimserver.service | grep 'please use the buildme.sh'") - with subtest("squeezelite player successfully connects to slimserver"): - machine.wait_for_unit("squeezelite.service") - machine.wait_until_succeeds("journalctl -u squeezelite.service | grep -E 'slimproto:[0-9]+ connected'", timeout=120) - player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep -E 'sendHELO:[0-9]+ mac:'", timeout=120).strip().split(" ")[-1] - player_id = machine.succeed(f"curl http://localhost:9000/jsonrpc.js -g -X POST -d '{json.dumps(rpc_get_player)}'") - assert player_mac == json.loads(player_id)["result"]["_id"], "squeezelite player not found" - ''; - } -) + with subtest("squeezelite player successfully connects to slimserver"): + machine.wait_for_unit("squeezelite.service") + machine.wait_until_succeeds("journalctl -u squeezelite.service | grep -E 'slimproto:[0-9]+ connected'", timeout=120) + player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep -E 'sendHELO:[0-9]+ mac:'", timeout=120).strip().split(" ")[-1] + player_id = machine.succeed(f"curl http://localhost:9000/jsonrpc.js -g -X POST -d '{json.dumps(rpc_get_player)}'") + assert player_mac == json.loads(player_id)["result"]["_id"], "squeezelite player not found" + ''; +} diff --git a/nixos/tests/slurm.nix b/nixos/tests/slurm.nix index 02fe38c6ecfe..cac0a671ca8b 100644 --- a/nixos/tests/slurm.nix +++ b/nixos/tests/slurm.nix @@ -1,177 +1,175 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - slurmconfig = { - services.slurm = { - controlMachine = "control"; - nodeName = [ "node[1-3] CPUs=1 State=UNKNOWN" ]; - partitionName = [ "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP" ]; - extraConfig = '' - AccountingStorageHost=dbd - AccountingStorageType=accounting_storage/slurmdbd - ''; - }; - environment.systemPackages = [ mpitest ]; - networking.firewall.enable = false; - systemd.tmpfiles.rules = [ - "f /etc/munge/munge.key 0400 munge munge - mungeverryweakkeybuteasytointegratoinatest" - ]; +{ lib, pkgs, ... }: +let + slurmconfig = { + services.slurm = { + controlMachine = "control"; + nodeName = [ "node[1-3] CPUs=1 State=UNKNOWN" ]; + partitionName = [ "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP" ]; + extraConfig = '' + AccountingStorageHost=dbd + AccountingStorageType=accounting_storage/slurmdbd + ''; + }; + environment.systemPackages = [ mpitest ]; + networking.firewall.enable = false; + systemd.tmpfiles.rules = [ + "f /etc/munge/munge.key 0400 munge munge - mungeverryweakkeybuteasytointegratoinatest" + ]; + }; + + mpitest = + let + mpitestC = pkgs.writeText "mpitest.c" '' + #include + #include + #include + + int + main (int argc, char *argv[]) + { + int rank, size, length; + char name[512]; + + MPI_Init (&argc, &argv); + MPI_Comm_rank (MPI_COMM_WORLD, &rank); + MPI_Comm_size (MPI_COMM_WORLD, &size); + MPI_Get_processor_name (name, &length); + + if ( rank == 0 ) printf("size=%d\n", size); + + printf ("%s: hello world from process %d of %d\n", name, rank, size); + + MPI_Finalize (); + + return EXIT_SUCCESS; + } + ''; + in + pkgs.runCommand "mpitest" { } '' + mkdir -p $out/bin + ${lib.getDev pkgs.mpi}/bin/mpicc ${mpitestC} -o $out/bin/mpitest + ''; +in +{ + name = "slurm"; + + meta.maintainers = [ lib.maintainers.markuskowa ]; + + nodes = + let + computeNode = + { ... }: + { + imports = [ slurmconfig ]; + # TODO slurmd port and slurmctld port should be configurations and + # automatically allowed by the firewall. + services.slurm = { + client.enable = true; + }; + }; + in + { + + control = + { ... }: + { + imports = [ slurmconfig ]; + services.slurm = { + server.enable = true; + }; + }; + + submit = + { ... }: + { + imports = [ slurmconfig ]; + services.slurm = { + enableStools = true; + }; + }; + + dbd = + { pkgs, ... }: + let + passFile = pkgs.writeText "dbdpassword" "password123"; + in + { + networking.firewall.enable = false; + systemd.tmpfiles.rules = [ + "f /etc/munge/munge.key 0400 munge munge - mungeverryweakkeybuteasytointegratoinatest" + ]; + services.slurm.dbdserver = { + enable = true; + storagePassFile = "${passFile}"; + }; + services.mysql = { + enable = true; + package = pkgs.mariadb; + initialScript = pkgs.writeText "mysql-init.sql" '' + CREATE USER 'slurm'@'localhost' IDENTIFIED BY 'password123'; + GRANT ALL PRIVILEGES ON slurm_acct_db.* TO 'slurm'@'localhost'; + ''; + ensureDatabases = [ "slurm_acct_db" ]; + ensureUsers = [ + { + ensurePermissions = { + "slurm_acct_db.*" = "ALL PRIVILEGES"; + }; + name = "slurm"; + } + ]; + settings.mysqld = { + # recommendations from: https://slurm.schedmd.com/accounting.html#mysql-configuration + innodb_buffer_pool_size = "1024M"; + innodb_log_file_size = "64M"; + innodb_lock_wait_timeout = 900; + }; + }; + }; + + node1 = computeNode; + node2 = computeNode; + node3 = computeNode; }; - mpitest = - let - mpitestC = pkgs.writeText "mpitest.c" '' - #include - #include - #include + testScript = '' + start_all() - int - main (int argc, char *argv[]) - { - int rank, size, length; - char name[512]; + # Make sure DBD is up after DB initialzation + with subtest("can_start_slurmdbd"): + dbd.succeed("systemctl restart slurmdbd") + dbd.wait_for_unit("slurmdbd.service") + dbd.wait_for_open_port(6819) - MPI_Init (&argc, &argv); - MPI_Comm_rank (MPI_COMM_WORLD, &rank); - MPI_Comm_size (MPI_COMM_WORLD, &size); - MPI_Get_processor_name (name, &length); + # there needs to be an entry for the current + # cluster in the database before slurmctld is restarted + with subtest("add_account"): + control.succeed("sacctmgr -i add cluster default") + # check for cluster entry + control.succeed("sacctmgr list cluster | awk '{ print $1 }' | grep default") - if ( rank == 0 ) printf("size=%d\n", size); + with subtest("can_start_slurmctld"): + control.succeed("systemctl restart slurmctld") + control.wait_for_unit("slurmctld.service") - printf ("%s: hello world from process %d of %d\n", name, rank, size); + with subtest("can_start_slurmd"): + for node in [node1, node2, node3]: + node.succeed("systemctl restart slurmd.service") + node.wait_for_unit("slurmd") - MPI_Finalize (); + # Test that the cluster works and can distribute jobs; - return EXIT_SUCCESS; - } - ''; - in - pkgs.runCommand "mpitest" { } '' - mkdir -p $out/bin - ${lib.getDev pkgs.mpi}/bin/mpicc ${mpitestC} -o $out/bin/mpitest - ''; - in - { - name = "slurm"; + with subtest("run_distributed_command"): + # Run `hostname` on 3 nodes of the partition (so on all the 3 nodes). + # The output must contain the 3 different names + submit.succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq") - meta.maintainers = [ lib.maintainers.markuskowa ]; + with subtest("check_slurm_dbd"): + # find the srun job from above in the database + control.succeed("sleep 5") + control.succeed("sacct | grep hostname") - nodes = - let - computeNode = - { ... }: - { - imports = [ slurmconfig ]; - # TODO slurmd port and slurmctld port should be configurations and - # automatically allowed by the firewall. - services.slurm = { - client.enable = true; - }; - }; - in - { - - control = - { ... }: - { - imports = [ slurmconfig ]; - services.slurm = { - server.enable = true; - }; - }; - - submit = - { ... }: - { - imports = [ slurmconfig ]; - services.slurm = { - enableStools = true; - }; - }; - - dbd = - { pkgs, ... }: - let - passFile = pkgs.writeText "dbdpassword" "password123"; - in - { - networking.firewall.enable = false; - systemd.tmpfiles.rules = [ - "f /etc/munge/munge.key 0400 munge munge - mungeverryweakkeybuteasytointegratoinatest" - ]; - services.slurm.dbdserver = { - enable = true; - storagePassFile = "${passFile}"; - }; - services.mysql = { - enable = true; - package = pkgs.mariadb; - initialScript = pkgs.writeText "mysql-init.sql" '' - CREATE USER 'slurm'@'localhost' IDENTIFIED BY 'password123'; - GRANT ALL PRIVILEGES ON slurm_acct_db.* TO 'slurm'@'localhost'; - ''; - ensureDatabases = [ "slurm_acct_db" ]; - ensureUsers = [ - { - ensurePermissions = { - "slurm_acct_db.*" = "ALL PRIVILEGES"; - }; - name = "slurm"; - } - ]; - settings.mysqld = { - # recommendations from: https://slurm.schedmd.com/accounting.html#mysql-configuration - innodb_buffer_pool_size = "1024M"; - innodb_log_file_size = "64M"; - innodb_lock_wait_timeout = 900; - }; - }; - }; - - node1 = computeNode; - node2 = computeNode; - node3 = computeNode; - }; - - testScript = '' - start_all() - - # Make sure DBD is up after DB initialzation - with subtest("can_start_slurmdbd"): - dbd.succeed("systemctl restart slurmdbd") - dbd.wait_for_unit("slurmdbd.service") - dbd.wait_for_open_port(6819) - - # there needs to be an entry for the current - # cluster in the database before slurmctld is restarted - with subtest("add_account"): - control.succeed("sacctmgr -i add cluster default") - # check for cluster entry - control.succeed("sacctmgr list cluster | awk '{ print $1 }' | grep default") - - with subtest("can_start_slurmctld"): - control.succeed("systemctl restart slurmctld") - control.wait_for_unit("slurmctld.service") - - with subtest("can_start_slurmd"): - for node in [node1, node2, node3]: - node.succeed("systemctl restart slurmd.service") - node.wait_for_unit("slurmd") - - # Test that the cluster works and can distribute jobs; - - with subtest("run_distributed_command"): - # Run `hostname` on 3 nodes of the partition (so on all the 3 nodes). - # The output must contain the 3 different names - submit.succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq") - - with subtest("check_slurm_dbd"): - # find the srun job from above in the database - control.succeed("sleep 5") - control.succeed("sacct | grep hostname") - - with subtest("run_PMIx_mpitest"): - submit.succeed("srun -N 3 --mpi=pmix mpitest | grep size=3") - ''; - } -) + with subtest("run_PMIx_mpitest"): + submit.succeed("srun -N 3 --mpi=pmix mpitest | grep size=3") + ''; +} diff --git a/nixos/tests/smokeping.nix b/nixos/tests/smokeping.nix index 9aa25ee49d6d..ec359176fa6b 100644 --- a/nixos/tests/smokeping.nix +++ b/nixos/tests/smokeping.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "smokeping"; - meta = with pkgs.lib.maintainers; { - maintainers = [ cransom ]; - }; +{ pkgs, ... }: +{ + name = "smokeping"; + meta = with pkgs.lib.maintainers; { + maintainers = [ cransom ]; + }; - nodes = { - sm = - { ... }: - { - networking.domain = "example.com"; # FQDN: sm.example.com - services.smokeping = { - enable = true; - mailHost = "127.0.0.2"; - probeConfig = '' - + FPing - binary = /run/wrappers/bin/fping - offset = 0% - ''; - }; + nodes = { + sm = + { ... }: + { + networking.domain = "example.com"; # FQDN: sm.example.com + services.smokeping = { + enable = true; + mailHost = "127.0.0.2"; + probeConfig = '' + + FPing + binary = /run/wrappers/bin/fping + offset = 0% + ''; }; - }; + }; + }; - testScript = '' - start_all() - sm.wait_for_unit("smokeping") - sm.wait_for_unit("nginx") - sm.wait_for_file("/var/lib/smokeping/data/Local/LocalMachine.rrd") - sm.succeed("curl -s -f localhost/smokeping.fcgi?target=Local") - # Check that there's a helpful page without explicit path as well. - sm.succeed("curl -s -f localhost") - sm.succeed("ls /var/lib/smokeping/cache/Local/LocalMachine_mini.png") - sm.succeed("ls /var/lib/smokeping/cache/index.html") + testScript = '' + start_all() + sm.wait_for_unit("smokeping") + sm.wait_for_unit("nginx") + sm.wait_for_file("/var/lib/smokeping/data/Local/LocalMachine.rrd") + sm.succeed("curl -s -f localhost/smokeping.fcgi?target=Local") + # Check that there's a helpful page without explicit path as well. + sm.succeed("curl -s -f localhost") + sm.succeed("ls /var/lib/smokeping/cache/Local/LocalMachine_mini.png") + sm.succeed("ls /var/lib/smokeping/cache/index.html") - # stop and start the service like nixos-rebuild would do - # see https://github.com/NixOS/nixpkgs/issues/265953) - sm.succeed("systemctl stop smokeping") - sm.succeed("systemctl start smokeping") - # ensure all services restarted properly - sm.succeed("systemctl --failed | grep -q '0 loaded units listed'") - ''; - } -) + # stop and start the service like nixos-rebuild would do + # see https://github.com/NixOS/nixpkgs/issues/265953) + sm.succeed("systemctl stop smokeping") + sm.succeed("systemctl start smokeping") + # ensure all services restarted properly + sm.succeed("systemctl --failed | grep -q '0 loaded units listed'") + ''; +} diff --git a/nixos/tests/snapper.nix b/nixos/tests/snapper.nix index 683d2f58ea3c..4a03b85cc71d 100644 --- a/nixos/tests/snapper.nix +++ b/nixos/tests/snapper.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "snapper"; +{ ... }: +{ + name = "snapper"; - nodes.machine = - { pkgs, lib, ... }: - { - boot.initrd.postDeviceCommands = '' - ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux /dev/vdb - ''; - - virtualisation.emptyDiskImages = [ 4096 ]; - - virtualisation.fileSystems = { - "/home" = { - device = "/dev/disk/by-label/aux"; - fsType = "btrfs"; - }; - }; - services.snapper.configs.home.SUBVOLUME = "/home"; - services.snapper.filters = "/nix"; - }; - - testScript = - { nodes, ... }: - let - inherit (nodes.machine.services.snapper) snapshotRootOnBoot; - in - '' - machine.succeed("btrfs subvolume create /home/.snapshots") - machine.succeed("snapper -c home list") - machine.succeed("snapper -c home create --description empty") - machine.succeed("echo test > /home/file") - machine.succeed("snapper -c home create --description file") - machine.succeed("snapper -c home status 1..2") - machine.succeed("snapper -c home undochange 1..2") - machine.fail("ls /home/file") - machine.succeed("snapper -c home delete 2") - machine.succeed("systemctl --wait start snapper-timeline.service") - machine.succeed("systemctl --wait start snapper-cleanup.service") - machine.${if snapshotRootOnBoot then "succeed" else "fail"}("systemctl cat snapper-boot.service") + nodes.machine = + { pkgs, lib, ... }: + { + boot.initrd.postDeviceCommands = '' + ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux /dev/vdb ''; - } -) + + virtualisation.emptyDiskImages = [ 4096 ]; + + virtualisation.fileSystems = { + "/home" = { + device = "/dev/disk/by-label/aux"; + fsType = "btrfs"; + }; + }; + services.snapper.configs.home.SUBVOLUME = "/home"; + services.snapper.filters = "/nix"; + }; + + testScript = + { nodes, ... }: + let + inherit (nodes.machine.services.snapper) snapshotRootOnBoot; + in + '' + machine.succeed("btrfs subvolume create /home/.snapshots") + machine.succeed("snapper -c home list") + machine.succeed("snapper -c home create --description empty") + machine.succeed("echo test > /home/file") + machine.succeed("snapper -c home create --description file") + machine.succeed("snapper -c home status 1..2") + machine.succeed("snapper -c home undochange 1..2") + machine.fail("ls /home/file") + machine.succeed("snapper -c home delete 2") + machine.succeed("systemctl --wait start snapper-timeline.service") + machine.succeed("systemctl --wait start snapper-cleanup.service") + machine.${if snapshotRootOnBoot then "succeed" else "fail"}("systemctl cat snapper-boot.service") + ''; +} diff --git a/nixos/tests/snmpd.nix b/nixos/tests/snmpd.nix index 24d414e03d4d..619c08426df9 100644 --- a/nixos/tests/snmpd.nix +++ b/nixos/tests/snmpd.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "snmpd"; +{ pkgs, lib, ... }: +{ + name = "snmpd"; - nodes.snmpd = { - environment.systemPackages = with pkgs; [ - net-snmp - ]; + nodes.snmpd = { + environment.systemPackages = with pkgs; [ + net-snmp + ]; - services.snmpd = { - enable = true; - configText = '' - rocommunity public - ''; - }; + services.snmpd = { + enable = true; + configText = '' + rocommunity public + ''; }; + }; - testScript = '' - start_all(); - machine.wait_for_unit("snmpd.service") - machine.succeed("snmpwalk -v 2c -c public localhost | grep SNMPv2-MIB::sysName.0"); - ''; + testScript = '' + start_all(); + machine.wait_for_unit("snmpd.service") + machine.succeed("snmpwalk -v 2c -c public localhost | grep SNMPv2-MIB::sysName.0"); + ''; - } -) +} diff --git a/nixos/tests/soapui.nix b/nixos/tests/soapui.nix index 834369f9dcc7..9906678e560f 100644 --- a/nixos/tests/soapui.nix +++ b/nixos/tests/soapui.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "soapui"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; +{ pkgs, ... }: +{ + name = "soapui"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + + environment.systemPackages = [ pkgs.soapui ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; - - services.xserver.enable = true; - - environment.systemPackages = [ pkgs.soapui ]; - }; - - testScript = '' - machine.wait_for_x() - machine.succeed("soapui >&2 &") - machine.wait_for_window(r"SoapUI \d+\.\d+\.\d+") - machine.sleep(1) - machine.screenshot("soapui") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.succeed("soapui >&2 &") + machine.wait_for_window(r"SoapUI \d+\.\d+\.\d+") + machine.sleep(1) + machine.screenshot("soapui") + ''; +} diff --git a/nixos/tests/soft-serve.nix b/nixos/tests/soft-serve.nix index 1fa365c93ef1..a9bed17a1976 100644 --- a/nixos/tests/soft-serve.nix +++ b/nixos/tests/soft-serve.nix @@ -1,110 +1,108 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; - sshPort = 8231; - httpPort = 8232; - statsPort = 8233; - gitPort = 8418; - in - { - name = "soft-serve"; - meta.maintainers = with lib.maintainers; [ dadada ]; - nodes = { - client = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - curl - git - openssh - ]; - environment.etc.sshKey = { - source = snakeOilPrivateKey; - mode = "0600"; +{ pkgs, lib, ... }: +let + inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; + sshPort = 8231; + httpPort = 8232; + statsPort = 8233; + gitPort = 8418; +in +{ + name = "soft-serve"; + meta.maintainers = with lib.maintainers; [ dadada ]; + nodes = { + client = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + curl + git + openssh + ]; + environment.etc.sshKey = { + source = snakeOilPrivateKey; + mode = "0600"; + }; + }; + + server = + { config, ... }: + { + services.soft-serve = { + enable = true; + settings = { + name = "TestServer"; + ssh.listen_addr = ":${toString sshPort}"; + git.listen_addr = ":${toString gitPort}"; + http.listen_addr = ":${toString httpPort}"; + stats.listen_addr = ":${toString statsPort}"; + initial_admin_keys = [ snakeOilPublicKey ]; }; }; + networking.firewall.allowedTCPPorts = [ + sshPort + httpPort + statsPort + ]; + }; + }; - server = - { config, ... }: - { - services.soft-serve = { - enable = true; - settings = { - name = "TestServer"; - ssh.listen_addr = ":${toString sshPort}"; - git.listen_addr = ":${toString gitPort}"; - http.listen_addr = ":${toString httpPort}"; - stats.listen_addr = ":${toString statsPort}"; - initial_admin_keys = [ snakeOilPublicKey ]; - }; - }; - networking.firewall.allowedTCPPorts = [ - sshPort - httpPort - statsPort - ]; - }; - }; + testScript = + { ... }: + '' + SSH_PORT = ${toString sshPort} + HTTP_PORT = ${toString httpPort} + STATS_PORT = ${toString statsPort} + KEY = "${snakeOilPublicKey}" + SSH_KEY = "/etc/sshKey" + SSH_COMMAND = f"ssh -p {SSH_PORT} -i {SSH_KEY} -o StrictHostKeyChecking=no" + TEST_DIR = "/tmp/test" + GIT = f"git -C {TEST_DIR}" - testScript = - { ... }: - '' - SSH_PORT = ${toString sshPort} - HTTP_PORT = ${toString httpPort} - STATS_PORT = ${toString statsPort} - KEY = "${snakeOilPublicKey}" - SSH_KEY = "/etc/sshKey" - SSH_COMMAND = f"ssh -p {SSH_PORT} -i {SSH_KEY} -o StrictHostKeyChecking=no" - TEST_DIR = "/tmp/test" - GIT = f"git -C {TEST_DIR}" + for machine in client, server: + machine.wait_for_unit("network.target") - for machine in client, server: - machine.wait_for_unit("network.target") + server.wait_for_unit("soft-serve.service") + server.wait_for_open_port(SSH_PORT) - server.wait_for_unit("soft-serve.service") - server.wait_for_open_port(SSH_PORT) + with subtest("Get info"): + status, test = client.execute(f"{SSH_COMMAND} server info") + if status != 0: + raise Exception("Failed to get SSH info") + key = " ".join(KEY.split(" ")[0:2]) + if not key in test: + raise Exception("Admin key must be configured correctly") - with subtest("Get info"): - status, test = client.execute(f"{SSH_COMMAND} server info") - if status != 0: - raise Exception("Failed to get SSH info") - key = " ".join(KEY.split(" ")[0:2]) - if not key in test: - raise Exception("Admin key must be configured correctly") + with subtest("Create user"): + client.succeed(f"{SSH_COMMAND} server user create beatrice") + client.succeed(f"{SSH_COMMAND} server user info beatrice") - with subtest("Create user"): - client.succeed(f"{SSH_COMMAND} server user create beatrice") - client.succeed(f"{SSH_COMMAND} server user info beatrice") + with subtest("Create repo"): + client.succeed(f"git init {TEST_DIR}") + client.succeed(f"{GIT} config --global user.email you@example.com") + client.succeed(f"touch {TEST_DIR}/foo") + client.succeed(f"{GIT} add foo") + client.succeed(f"{GIT} commit --allow-empty -m test") + client.succeed(f"{GIT} remote add origin git@server:test") + client.succeed(f"GIT_SSH_COMMAND='{SSH_COMMAND}' {GIT} push -u origin master") + client.execute("rm -r /tmp/test") - with subtest("Create repo"): - client.succeed(f"git init {TEST_DIR}") - client.succeed(f"{GIT} config --global user.email you@example.com") - client.succeed(f"touch {TEST_DIR}/foo") - client.succeed(f"{GIT} add foo") - client.succeed(f"{GIT} commit --allow-empty -m test") - client.succeed(f"{GIT} remote add origin git@server:test") - client.succeed(f"GIT_SSH_COMMAND='{SSH_COMMAND}' {GIT} push -u origin master") - client.execute("rm -r /tmp/test") + server.wait_for_open_port(HTTP_PORT) - server.wait_for_open_port(HTTP_PORT) + with subtest("Clone over HTTP"): + client.succeed(f"curl --connect-timeout 10 http://server:{HTTP_PORT}/") + client.succeed(f"git clone http://server:{HTTP_PORT}/test /tmp/test") + client.execute("rm -r /tmp/test") - with subtest("Clone over HTTP"): - client.succeed(f"curl --connect-timeout 10 http://server:{HTTP_PORT}/") - client.succeed(f"git clone http://server:{HTTP_PORT}/test /tmp/test") - client.execute("rm -r /tmp/test") + with subtest("Clone over SSH"): + client.succeed(f"GIT_SSH_COMMAND='{SSH_COMMAND}' git clone git@server:test /tmp/test") + client.execute("rm -r /tmp/test") - with subtest("Clone over SSH"): - client.succeed(f"GIT_SSH_COMMAND='{SSH_COMMAND}' git clone git@server:test /tmp/test") - client.execute("rm -r /tmp/test") - - with subtest("Get stats over HTTP"): - server.wait_for_open_port(STATS_PORT) - status, test = client.execute(f"curl --connect-timeout 10 http://server:{STATS_PORT}/metrics") - if status != 0: - raise Exception("Failed to get metrics from status port") - if not "go_gc_duration_seconds_count" in test: - raise Exception("Metrics did not contain key 'go_gc_duration_seconds_count'") - ''; - } -) + with subtest("Get stats over HTTP"): + server.wait_for_open_port(STATS_PORT) + status, test = client.execute(f"curl --connect-timeout 10 http://server:{STATS_PORT}/metrics") + if status != 0: + raise Exception("Failed to get metrics from status port") + if not "go_gc_duration_seconds_count" in test: + raise Exception("Metrics did not contain key 'go_gc_duration_seconds_count'") + ''; +} diff --git a/nixos/tests/sogo.nix b/nixos/tests/sogo.nix index a3ad3bd5b7ac..38e13a506b69 100644 --- a/nixos/tests/sogo.nix +++ b/nixos/tests/sogo.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "sogo"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; +{ pkgs, ... }: +{ + name = "sogo"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; - nodes = { - sogo = - { config, pkgs, ... }: - { - services.nginx.enable = true; + nodes = { + sogo = + { config, pkgs, ... }: + { + services.nginx.enable = true; - services.mysql = { - enable = true; - package = pkgs.mariadb; - ensureDatabases = [ "sogo" ]; - ensureUsers = [ - { - name = "sogo"; - ensurePermissions = { - "sogo.*" = "ALL PRIVILEGES"; - }; - } - ]; - }; - - services.sogo = { - enable = true; - timezone = "Europe/Berlin"; - extraConfig = '' - WOWorkersCount = 1; - - SOGoUserSources = ( - { - type = sql; - userPasswordAlgorithm = md5; - viewURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_users"; - canAuthenticate = YES; - id = users; - } - ); - - SOGoProfileURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_user_profile"; - OCSFolderInfoURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_folder_info"; - OCSSessionsFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_sessions_folder"; - OCSEMailAlarmsFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_alarms_folder"; - OCSStoreURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_store"; - OCSAclURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_acl"; - OCSCacheFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_cache_folder"; - ''; - }; + services.mysql = { + enable = true; + package = pkgs.mariadb; + ensureDatabases = [ "sogo" ]; + ensureUsers = [ + { + name = "sogo"; + ensurePermissions = { + "sogo.*" = "ALL PRIVILEGES"; + }; + } + ]; }; - }; - testScript = '' - start_all() - sogo.wait_for_unit("multi-user.target") - sogo.wait_for_open_port(20000) - sogo.wait_for_open_port(80) - sogo.succeed("curl -sSfL http://sogo/SOGo") - ''; - } -) + services.sogo = { + enable = true; + timezone = "Europe/Berlin"; + extraConfig = '' + WOWorkersCount = 1; + + SOGoUserSources = ( + { + type = sql; + userPasswordAlgorithm = md5; + viewURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_users"; + canAuthenticate = YES; + id = users; + } + ); + + SOGoProfileURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_user_profile"; + OCSFolderInfoURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_folder_info"; + OCSSessionsFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_sessions_folder"; + OCSEMailAlarmsFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_alarms_folder"; + OCSStoreURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_store"; + OCSAclURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_acl"; + OCSCacheFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_cache_folder"; + ''; + }; + }; + }; + + testScript = '' + start_all() + sogo.wait_for_unit("multi-user.target") + sogo.wait_for_open_port(20000) + sogo.wait_for_open_port(80) + sogo.succeed("curl -sSfL http://sogo/SOGo") + ''; +} diff --git a/nixos/tests/soju.nix b/nixos/tests/soju.nix index 71f457b14204..53acae2a57c7 100644 --- a/nixos/tests/soju.nix +++ b/nixos/tests/soju.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - certs = import ./common/acme/server/snakeoil-certs.nix; - domain = certs.domain; +{ pkgs, lib, ... }: +let + certs = import ./common/acme/server/snakeoil-certs.nix; + domain = certs.domain; - user = "testuser"; - pass = "hunter2"; - in - { - name = "soju"; - meta.maintainers = [ ]; + user = "testuser"; + pass = "hunter2"; +in +{ + name = "soju"; + meta.maintainers = [ ]; - nodes.machine = - { ... }: - { - services.soju = { - enable = true; - adminSocket.enable = true; - hostName = domain; - tlsCertificate = certs.${domain}.cert; - tlsCertificateKey = certs.${domain}.key; - }; + nodes.machine = + { ... }: + { + services.soju = { + enable = true; + adminSocket.enable = true; + hostName = domain; + tlsCertificate = certs.${domain}.cert; + tlsCertificateKey = certs.${domain}.key; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("soju") - machine.wait_for_file("/run/soju/admin") + machine.wait_for_unit("soju") + machine.wait_for_file("/run/soju/admin") - machine.succeed("sojuctl user create -username ${user} -password ${pass}") - ''; - } -) + machine.succeed("sojuctl user create -username ${user} -password ${pass}") + ''; +} diff --git a/nixos/tests/solanum.nix b/nixos/tests/solanum.nix index be3f4bfd45ae..a2c3013125e8 100644 --- a/nixos/tests/solanum.nix +++ b/nixos/tests/solanum.nix @@ -9,100 +9,98 @@ let iiDir = "/tmp/irc"; in -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "solanum"; - nodes = - { - "${server}" = { - networking.firewall.allowedTCPPorts = [ ircPort ]; - services.solanum = { - enable = true; - motd = '' - The default MOTD doesn't contain the word "nixos" in it. - This one does. - ''; - }; +{ pkgs, lib, ... }: +{ + name = "solanum"; + nodes = + { + "${server}" = { + networking.firewall.allowedTCPPorts = [ ircPort ]; + services.solanum = { + enable = true; + motd = '' + The default MOTD doesn't contain the word "nixos" in it. + This one does. + ''; }; - } - // lib.listToAttrs ( - builtins.map ( - client: - lib.nameValuePair client { - imports = [ - ./common/user-account.nix - ]; + }; + } + // lib.listToAttrs ( + builtins.map ( + client: + lib.nameValuePair client { + imports = [ + ./common/user-account.nix + ]; - systemd.services.ii = { - requires = [ "network.target" ]; - wantedBy = [ "default.target" ]; + systemd.services.ii = { + requires = [ "network.target" ]; + wantedBy = [ "default.target" ]; - serviceConfig = { - Type = "simple"; - ExecPreStartPre = "mkdir -p ${iiDir}"; - ExecStart = '' - ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir} - ''; - User = "alice"; - }; + serviceConfig = { + Type = "simple"; + ExecPreStartPre = "mkdir -p ${iiDir}"; + ExecStart = '' + ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir} + ''; + User = "alice"; }; - } - ) clients - ); + }; + } + ) clients + ); - testScript = - let - msg = client: "Hello, my name is ${client}"; - clientScript = - client: - [ - '' - ${client}.wait_for_unit("network.target") - ${client}.systemctl("start ii") - ${client}.wait_for_unit("ii") - ${client}.wait_for_file("${iiDir}/${server}/out") - '' - # look for the custom text in the MOTD. - '' - ${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out") - '' - # wait until first PING from server arrives before joining, - # so we don't try it too early - '' - ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out") - '' - # join ${channel} - '' - ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in") - ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in") - '' - # send a greeting - '' - ${client}.succeed( - "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in" - ) - '' - # check that all greetings arrived on all clients - ] - ++ builtins.map (other: '' + testScript = + let + msg = client: "Hello, my name is ${client}"; + clientScript = + client: + [ + '' + ${client}.wait_for_unit("network.target") + ${client}.systemctl("start ii") + ${client}.wait_for_unit("ii") + ${client}.wait_for_file("${iiDir}/${server}/out") + '' + # look for the custom text in the MOTD. + '' + ${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out") + '' + # wait until first PING from server arrives before joining, + # so we don't try it too early + '' + ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out") + '' + # join ${channel} + '' + ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in") + ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in") + '' + # send a greeting + '' ${client}.succeed( - "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out" + "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in" ) - '') clients; + '' + # check that all greetings arrived on all clients + ] + ++ builtins.map (other: '' + ${client}.succeed( + "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out" + ) + '') clients; - # foldl', but requires a non-empty list instead of a start value - reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list); - in - '' - start_all() - ${server}.systemctl("status solanum") - ${server}.wait_for_open_port(${toString ircPort}) + # foldl', but requires a non-empty list instead of a start value + reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list); + in + '' + start_all() + ${server}.systemctl("status solanum") + ${server}.wait_for_open_port(${toString ircPort}) - # run clientScript for all clients so that every list - # entry is executed by every client before advancing - # to the next one. - '' - + lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients)); - } -) + # run clientScript for all clients so that every list + # entry is executed by every client before advancing + # to the next one. + '' + + lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients)); +} diff --git a/nixos/tests/sonarr.nix b/nixos/tests/sonarr.nix index e9d6bdf62594..44dcaf3c893a 100644 --- a/nixos/tests/sonarr.nix +++ b/nixos/tests/sonarr.nix @@ -1,20 +1,18 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "sonarr"; - meta.maintainers = with lib.maintainers; [ etu ]; +{ + name = "sonarr"; + meta.maintainers = with lib.maintainers; [ etu ]; - nodes.machine = - { pkgs, ... }: - { - services.sonarr.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.sonarr.enable = true; + }; - testScript = '' - machine.wait_for_unit("sonarr.service") - machine.wait_for_open_port(8989) - machine.succeed("curl --fail http://localhost:8989/") - ''; - } -) + testScript = '' + machine.wait_for_unit("sonarr.service") + machine.wait_for_open_port(8989) + machine.succeed("curl --fail http://localhost:8989/") + ''; +} diff --git a/nixos/tests/sonic-server.nix b/nixos/tests/sonic-server.nix index bef0316d66f0..40f9802ec391 100644 --- a/nixos/tests/sonic-server.nix +++ b/nixos/tests/sonic-server.nix @@ -1,27 +1,25 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "sonic-server"; +{ pkgs, lib, ... }: +{ + name = "sonic-server"; - meta = { - maintainers = with lib.maintainers; [ anthonyroussel ]; + meta = { + maintainers = with lib.maintainers; [ anthonyroussel ]; + }; + + nodes.machine = + { pkgs, ... }: + { + services.sonic-server.enable = true; }; - nodes.machine = - { pkgs, ... }: - { - services.sonic-server.enable = true; - }; + testScript = '' + machine.start() - testScript = '' - machine.start() + machine.wait_for_unit("sonic-server.service") + machine.wait_for_open_port(1491) - machine.wait_for_unit("sonic-server.service") - machine.wait_for_open_port(1491) - - with subtest("Check control mode"): - result = machine.succeed('(echo START control; sleep 1; echo PING; echo QUIT) | nc localhost 1491').splitlines() - assert result[2] == "PONG", f"expected 'PONG', got '{result[2]}'" - ''; - } -) + with subtest("Check control mode"): + result = machine.succeed('(echo START control; sleep 1; echo PING; echo QUIT) | nc localhost 1491').splitlines() + assert result[2] == "PONG", f"expected 'PONG', got '{result[2]}'" + ''; +} diff --git a/nixos/tests/spacecookie.nix b/nixos/tests/spacecookie.nix index 696cdee902c4..79d348e33ff3 100644 --- a/nixos/tests/spacecookie.nix +++ b/nixos/tests/spacecookie.nix @@ -5,55 +5,53 @@ let fileContent = "Hello Gopher!\n"; fileName = "file.txt"; in -import ./make-test-python.nix ( - { ... }: - { - name = "spacecookie"; - nodes = { - ${gopherHost} = { - systemd.services.spacecookie = { - preStart = '' - mkdir -p ${gopherRoot}/directory - printf "%s" "${fileContent}" > ${gopherRoot}/${fileName} - ''; - }; - - services.spacecookie = { - enable = true; - openFirewall = true; - settings = { - root = gopherRoot; - hostname = gopherHost; - }; - }; +{ ... }: +{ + name = "spacecookie"; + nodes = { + ${gopherHost} = { + systemd.services.spacecookie = { + preStart = '' + mkdir -p ${gopherRoot}/directory + printf "%s" "${fileContent}" > ${gopherRoot}/${fileName} + ''; }; - ${gopherClient} = { }; + services.spacecookie = { + enable = true; + openFirewall = true; + settings = { + root = gopherRoot; + hostname = gopherHost; + }; + }; }; - testScript = '' - start_all() + ${gopherClient} = { }; + }; - # with daemon type notify, the unit being started - # should also mean the port is open - ${gopherHost}.wait_for_unit("spacecookie.service") - ${gopherClient}.wait_for_unit("network.target") + testScript = '' + start_all() - fileResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}/0/${fileName}") + # with daemon type notify, the unit being started + # should also mean the port is open + ${gopherHost}.wait_for_unit("spacecookie.service") + ${gopherClient}.wait_for_unit("network.target") - # the file response should return our created file exactly - if not (fileResponse == "${builtins.replaceStrings [ "\n" ] [ "\\n" ] fileContent}"): - raise Exception("Unexpected file response") + fileResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}/0/${fileName}") - # sanity check on the directory listing: we serve a directory and a file - # via gopher, so the directory listing should have exactly two entries, - # one with gopher file type 0 (file) and one with file type 1 (directory). - dirResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}") - dirEntries = [l[0] for l in dirResponse.split("\n") if len(l) > 0] - dirEntries.sort() + # the file response should return our created file exactly + if not (fileResponse == "${builtins.replaceStrings [ "\n" ] [ "\\n" ] fileContent}"): + raise Exception("Unexpected file response") - if not (["0", "1"] == dirEntries): - raise Exception("Unexpected directory response") - ''; - } -) + # sanity check on the directory listing: we serve a directory and a file + # via gopher, so the directory listing should have exactly two entries, + # one with gopher file type 0 (file) and one with file type 1 (directory). + dirResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}") + dirEntries = [l[0] for l in dirResponse.split("\n") if len(l) > 0] + dirEntries.sort() + + if not (["0", "1"] == dirEntries): + raise Exception("Unexpected directory response") + ''; +} diff --git a/nixos/tests/sqlite3-to-mysql.nix b/nixos/tests/sqlite3-to-mysql.nix index 25ffa13ba4f5..4c1a40f74148 100644 --- a/nixos/tests/sqlite3-to-mysql.nix +++ b/nixos/tests/sqlite3-to-mysql.nix @@ -1,70 +1,68 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - /* - This test suite replaces the typical pytestCheckHook function in - sqlite3-to-mysql due to the need of a running mysql instance. - */ +/* + This test suite replaces the typical pytestCheckHook function in + sqlite3-to-mysql due to the need of a running mysql instance. +*/ - { - name = "sqlite3-to-mysql"; - meta.maintainers = with lib.maintainers; [ gador ]; +{ + name = "sqlite3-to-mysql"; + meta.maintainers = with lib.maintainers; [ gador ]; - nodes.machine = - { pkgs, ... }: - { - environment.systemPackages = with pkgs; [ - sqlite3-to-mysql - # create one coherent python environment - (python3.withPackages ( - ps: - sqlite3-to-mysql.propagatedBuildInputs - ++ [ - python3Packages.pytest - python3Packages.pytest-mock - python3Packages.pytest-timeout - python3Packages.factory-boy - python3Packages.docker # only needed so import does not fail - sqlite3-to-mysql - ] - )) - ]; - services.mysql = { - package = pkgs.mariadb; - enable = true; - # from https://github.com/techouse/sqlite3-to-mysql/blob/master/tests/conftest.py - # and https://github.com/techouse/sqlite3-to-mysql/blob/master/.github/workflows/test.yml - initialScript = pkgs.writeText "mysql-init.sql" '' - create database test_db DEFAULT CHARACTER SET utf8mb4; - create user tester identified by 'testpass'; - grant all on test_db.* to tester; - create user tester@localhost identified by 'testpass'; - grant all on test_db.* to tester@localhost; - ''; - settings = { - mysqld = { - character-set-server = "utf8mb4"; - collation-server = "utf8mb4_unicode_ci"; - log_warnings = 1; - }; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = with pkgs; [ + sqlite3-to-mysql + # create one coherent python environment + (python3.withPackages ( + ps: + sqlite3-to-mysql.propagatedBuildInputs + ++ [ + python3Packages.pytest + python3Packages.pytest-mock + python3Packages.pytest-timeout + python3Packages.factory-boy + python3Packages.docker # only needed so import does not fail + sqlite3-to-mysql + ] + )) + ]; + services.mysql = { + package = pkgs.mariadb; + enable = true; + # from https://github.com/techouse/sqlite3-to-mysql/blob/master/tests/conftest.py + # and https://github.com/techouse/sqlite3-to-mysql/blob/master/.github/workflows/test.yml + initialScript = pkgs.writeText "mysql-init.sql" '' + create database test_db DEFAULT CHARACTER SET utf8mb4; + create user tester identified by 'testpass'; + grant all on test_db.* to tester; + create user tester@localhost identified by 'testpass'; + grant all on test_db.* to tester@localhost; + ''; + settings = { + mysqld = { + character-set-server = "utf8mb4"; + collation-server = "utf8mb4_unicode_ci"; + log_warnings = 1; }; }; }; + }; - testScript = '' - machine.wait_for_unit("mysql") + testScript = '' + machine.wait_for_unit("mysql") - machine.succeed( - "sqlite3mysql --version | grep ${pkgs.sqlite3-to-mysql.version}" - ) + machine.succeed( + "sqlite3mysql --version | grep ${pkgs.sqlite3-to-mysql.version}" + ) - # invalid_database_name: assert '1045 (28000): Access denied' in "1044 (42000): Access denied [...] - # invalid_database_user: does not return non-zero exit for some reason - # test_version: has problems importing sqlite3_to_mysql and determining the version - machine.succeed( - "cd ${pkgs.sqlite3-to-mysql.src} \ - && pytest -v --no-docker -k \"not test_invalid_database_name and not test_invalid_database_user and not test_version\"" - ) - ''; - } -) + # invalid_database_name: assert '1045 (28000): Access denied' in "1044 (42000): Access denied [...] + # invalid_database_user: does not return non-zero exit for some reason + # test_version: has problems importing sqlite3_to_mysql and determining the version + machine.succeed( + "cd ${pkgs.sqlite3-to-mysql.src} \ + && pytest -v --no-docker -k \"not test_invalid_database_name and not test_invalid_database_user and not test_version\"" + ) + ''; +} diff --git a/nixos/tests/squid.nix b/nixos/tests/squid.nix index 6518158b0e49..7c38b05475c1 100644 --- a/nixos/tests/squid.nix +++ b/nixos/tests/squid.nix @@ -2,187 +2,185 @@ # - "external" -- i.e. the internet, where the proxy and server communicate # - "internal" -- i.e. an office LAN, where the client and proxy communicat -import ./make-test-python.nix ( - { - pkgs, - lib, - ... - }: - # VLANS: - # 1 -- simulates the internal network - # 2 -- simulates the external network - let - commonConfig = { - # Disable eth0 autoconfiguration - networking.useDHCP = false; +{ + pkgs, + lib, + ... +}: +# VLANS: +# 1 -- simulates the internal network +# 2 -- simulates the external network +let + commonConfig = { + # Disable eth0 autoconfiguration + networking.useDHCP = false; - environment.systemPackages = [ - (pkgs.writeScriptBin "check-connection" '' - #!/usr/bin/env bash + environment.systemPackages = [ + (pkgs.writeScriptBin "check-connection" '' + #!/usr/bin/env bash - set -e + set -e - if [[ "$2" == "" || "$1" == "--help" || "$1" == "-h" ]]; - then - echo "check-connection <[expect-success|expect-failure]>" - exit 1 - fi + if [[ "$2" == "" || "$1" == "--help" || "$1" == "-h" ]]; + then + echo "check-connection <[expect-success|expect-failure]>" + exit 1 + fi - ADDRESS="$1" + ADDRESS="$1" - function test_icmp() { timeout 3 ping -c 1 "$ADDRESS"; } + function test_icmp() { timeout 3 ping -c 1 "$ADDRESS"; } - if [[ "$2" == "expect-success" ]]; - then - test_icmp - else - ! test_icmp - fi - '') + if [[ "$2" == "expect-success" ]]; + then + test_icmp + else + ! test_icmp + fi + '') + ]; + }; +in +{ + name = "squid"; + meta = with pkgs.lib.maintainers; { + maintainers = [ cobalt ]; + }; + + nodes = { + client = + { ... }: + lib.mkMerge [ + commonConfig + { + virtualisation.vlans = [ 1 ]; + networking.firewall.enable = true; + + # NOTE: the client doesn't need a HTTP server, this is here to allow a validation of the proxy acl + networking.firewall.allowedTCPPorts = [ 80 ]; + + services.nginx = { + enable = true; + + virtualHosts."server" = { + root = "/etc"; + locations."/".index = "hostname"; + listen = [ + { + addr = "0.0.0.0"; + port = 80; + } + ]; + }; + }; + } ]; - }; - in - { - name = "squid"; - meta = with pkgs.lib.maintainers; { - maintainers = [ cobalt ]; - }; - nodes = { - client = - { ... }: - lib.mkMerge [ - commonConfig - { - virtualisation.vlans = [ 1 ]; - networking.firewall.enable = true; - - # NOTE: the client doesn't need a HTTP server, this is here to allow a validation of the proxy acl - networking.firewall.allowedTCPPorts = [ 80 ]; - - services.nginx = { - enable = true; - - virtualHosts."server" = { - root = "/etc"; - locations."/".index = "hostname"; - listen = [ - { - addr = "0.0.0.0"; - port = 80; - } - ]; - }; - }; - } - ]; - - proxy = - { config, nodes, ... }: - let - clientIp = (pkgs.lib.head nodes.client.networking.interfaces.eth1.ipv4.addresses).address; - serverIp = (pkgs.lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses).address; - in - lib.mkMerge [ - commonConfig - { - nixpkgs.config.permittedInsecurePackages = [ "squid-7.0.1" ]; - - virtualisation.vlans = [ - 1 - 2 - ]; - networking.firewall.enable = true; - networking.firewall.allowedTCPPorts = [ config.services.squid.proxyPort ]; - - services.squid = { - enable = true; - - extraConfig = '' - acl client src ${clientIp} - acl server dst ${serverIp} - http_access allow client server - http_access deny all - ''; - }; - } - ]; - - server = - { ... }: - lib.mkMerge [ - commonConfig - { - virtualisation.vlans = [ 2 ]; - networking.firewall.enable = true; - networking.firewall.allowedTCPPorts = [ 80 ]; - - services.nginx = { - enable = true; - - virtualHosts."server" = { - root = "/etc"; - locations."/".index = "hostname"; - listen = [ - { - addr = "0.0.0.0"; - port = 80; - } - ]; - }; - }; - } - ]; - }; - - testScript = - { nodes, ... }: + proxy = + { config, nodes, ... }: let clientIp = (pkgs.lib.head nodes.client.networking.interfaces.eth1.ipv4.addresses).address; serverIp = (pkgs.lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses).address; - proxyExternalIp = (pkgs.lib.head nodes.proxy.networking.interfaces.eth2.ipv4.addresses).address; - proxyInternalIp = (pkgs.lib.head nodes.proxy.networking.interfaces.eth1.ipv4.addresses).address; in - '' - client.start() - proxy.start() - server.start() + lib.mkMerge [ + commonConfig + { + nixpkgs.config.permittedInsecurePackages = [ "squid-7.0.1" ]; - proxy.wait_for_unit("network.target") - proxy.wait_for_unit("squid.service") - client.wait_for_unit("network.target") - server.wait_for_unit("network.target") - server.wait_for_unit("nginx.service") + virtualisation.vlans = [ + 1 + 2 + ]; + networking.firewall.enable = true; + networking.firewall.allowedTCPPorts = [ config.services.squid.proxyPort ]; - # Topology checks. - with subtest("proxy connectivity"): - ## The proxy should have direct access to the server and client - proxy.succeed("check-connection ${serverIp} expect-success") - proxy.succeed("check-connection ${clientIp} expect-success") + services.squid = { + enable = true; - with subtest("server connectivity"): - ## The server should have direct access to the proxy - server.succeed("check-connection ${proxyExternalIp} expect-success") - ## ... and not have access to the client - server.succeed("check-connection ${clientIp} expect-failure") + extraConfig = '' + acl client src ${clientIp} + acl server dst ${serverIp} + http_access allow client server + http_access deny all + ''; + }; + } + ]; - with subtest("client connectivity"): - # The client should be also able to connect to the proxy - client.succeed("check-connection ${proxyInternalIp} expect-success") - # but not the client to the server - client.succeed("check-connection ${serverIp} expect-failure") + server = + { ... }: + lib.mkMerge [ + commonConfig + { + virtualisation.vlans = [ 2 ]; + networking.firewall.enable = true; + networking.firewall.allowedTCPPorts = [ 80 ]; - with subtest("HTTP"): - # the client cannot reach the server directly over HTTP - client.fail('[[ `timeout 3 curl --fail-with-body http://${serverIp}` ]]') - # ... but can with the proxy - client.succeed('[[ `timeout 3 curl --fail-with-body --proxy http://${proxyInternalIp}:3128 http://${serverIp}` == "server" ]]') - # and cannot from the server (with a 4xx error code) and ... - server.fail('[[ `timeout 3 curl --fail-with-body --proxy http://${proxyExternalIp}:3128 http://${clientIp}` == "client" ]]') - # .. not the client hostname - server.fail('[[ `timeout 3 curl --proxy http://${proxyExternalIp}:3128 http://${clientIp}` == "client" ]]') - # with an explicit deny message (no --fail because we want to parse the returned message) - server.succeed('[[ `timeout 3 curl --proxy http://${proxyExternalIp}:3128 http://${clientIp}` == *"ERR_ACCESS_DENIED"* ]]') - ''; - } -) + services.nginx = { + enable = true; + + virtualHosts."server" = { + root = "/etc"; + locations."/".index = "hostname"; + listen = [ + { + addr = "0.0.0.0"; + port = 80; + } + ]; + }; + }; + } + ]; + }; + + testScript = + { nodes, ... }: + let + clientIp = (pkgs.lib.head nodes.client.networking.interfaces.eth1.ipv4.addresses).address; + serverIp = (pkgs.lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses).address; + proxyExternalIp = (pkgs.lib.head nodes.proxy.networking.interfaces.eth2.ipv4.addresses).address; + proxyInternalIp = (pkgs.lib.head nodes.proxy.networking.interfaces.eth1.ipv4.addresses).address; + in + '' + client.start() + proxy.start() + server.start() + + proxy.wait_for_unit("network.target") + proxy.wait_for_unit("squid.service") + client.wait_for_unit("network.target") + server.wait_for_unit("network.target") + server.wait_for_unit("nginx.service") + + # Topology checks. + with subtest("proxy connectivity"): + ## The proxy should have direct access to the server and client + proxy.succeed("check-connection ${serverIp} expect-success") + proxy.succeed("check-connection ${clientIp} expect-success") + + with subtest("server connectivity"): + ## The server should have direct access to the proxy + server.succeed("check-connection ${proxyExternalIp} expect-success") + ## ... and not have access to the client + server.succeed("check-connection ${clientIp} expect-failure") + + with subtest("client connectivity"): + # The client should be also able to connect to the proxy + client.succeed("check-connection ${proxyInternalIp} expect-success") + # but not the client to the server + client.succeed("check-connection ${serverIp} expect-failure") + + with subtest("HTTP"): + # the client cannot reach the server directly over HTTP + client.fail('[[ `timeout 3 curl --fail-with-body http://${serverIp}` ]]') + # ... but can with the proxy + client.succeed('[[ `timeout 3 curl --fail-with-body --proxy http://${proxyInternalIp}:3128 http://${serverIp}` == "server" ]]') + # and cannot from the server (with a 4xx error code) and ... + server.fail('[[ `timeout 3 curl --fail-with-body --proxy http://${proxyExternalIp}:3128 http://${clientIp}` == "client" ]]') + # .. not the client hostname + server.fail('[[ `timeout 3 curl --proxy http://${proxyExternalIp}:3128 http://${clientIp}` == "client" ]]') + # with an explicit deny message (no --fail because we want to parse the returned message) + server.succeed('[[ `timeout 3 curl --proxy http://${proxyExternalIp}:3128 http://${clientIp}` == *"ERR_ACCESS_DENIED"* ]]') + ''; +} diff --git a/nixos/tests/ssh-agent-auth.nix b/nixos/tests/ssh-agent-auth.nix index 4bfe453586f9..58ef908fd518 100644 --- a/nixos/tests/ssh-agent-auth.nix +++ b/nixos/tests/ssh-agent-auth.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; - in - { - name = "ssh-agent-auth"; - meta.maintainers = with lib.maintainers; [ nicoo ]; +{ lib, pkgs, ... }: +let + inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; +in +{ + name = "ssh-agent-auth"; + meta.maintainers = with lib.maintainers; [ nicoo ]; - nodes = - let - nodeConfig = - n: - { ... }: - { - users.users = { - admin = { - isNormalUser = true; - extraGroups = [ "wheel" ]; - openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; - foo.isNormalUser = true; + nodes = + let + nodeConfig = + n: + { ... }: + { + users.users = { + admin = { + isNormalUser = true; + extraGroups = [ "wheel" ]; + openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; }; - - security.pam.sshAgentAuth = { - # Must be specified, as nixpkgs CI expects everything to eval without warning - authorizedKeysFiles = [ "/etc/ssh/authorized_keys.d/%u" ]; - enable = true; - }; - security.${lib.replaceStrings [ "_" ] [ "-" ] n} = { - enable = true; - wheelNeedsPassword = true; # We are checking `pam_ssh_agent_auth(8)` works for a sudoer - }; - - # Necessary for pam_ssh_agent_auth >_>' - services.openssh.enable = true; + foo.isNormalUser = true; }; - in - lib.genAttrs [ "sudo" "sudo_rs" ] nodeConfig; - testScript = - let - privateKeyPath = "/home/admin/.ssh/id_ecdsa"; - userScript = pkgs.writeShellScript "test-script" '' - set -e - ssh-add -q ${privateKeyPath} + security.pam.sshAgentAuth = { + # Must be specified, as nixpkgs CI expects everything to eval without warning + authorizedKeysFiles = [ "/etc/ssh/authorized_keys.d/%u" ]; + enable = true; + }; + security.${lib.replaceStrings [ "_" ] [ "-" ] n} = { + enable = true; + wheelNeedsPassword = true; # We are checking `pam_ssh_agent_auth(8)` works for a sudoer + }; - # faketty needed to ensure `sudo` doesn't write to the controlling PTY, - # which would break the test-driver's line-oriented protocol. - ${lib.getExe pkgs.faketty} sudo -u foo -- id -un - ''; - in - '' - for vm in (sudo, sudo_rs): - sudo_impl = vm.name.replace("_", "-") - with subtest(f"wheel user can auth with ssh-agent for {sudo_impl}"): - vm.copy_from_host("${snakeOilPrivateKey}", "${privateKeyPath}") - vm.succeed("chmod -R 0700 /home/admin") - vm.succeed("chown -R admin:users /home/admin") + # Necessary for pam_ssh_agent_auth >_>' + services.openssh.enable = true; + }; + in + lib.genAttrs [ "sudo" "sudo_rs" ] nodeConfig; - # Run `userScript` in an environment with an SSH-agent available - assert vm.succeed("sudo -u admin -- ssh-agent ${userScript} 2>&1").strip() == "foo" + testScript = + let + privateKeyPath = "/home/admin/.ssh/id_ecdsa"; + userScript = pkgs.writeShellScript "test-script" '' + set -e + ssh-add -q ${privateKeyPath} + + # faketty needed to ensure `sudo` doesn't write to the controlling PTY, + # which would break the test-driver's line-oriented protocol. + ${lib.getExe pkgs.faketty} sudo -u foo -- id -un ''; - } -) + in + '' + for vm in (sudo, sudo_rs): + sudo_impl = vm.name.replace("_", "-") + with subtest(f"wheel user can auth with ssh-agent for {sudo_impl}"): + vm.copy_from_host("${snakeOilPrivateKey}", "${privateKeyPath}") + vm.succeed("chmod -R 0700 /home/admin") + vm.succeed("chown -R admin:users /home/admin") + + # Run `userScript` in an environment with an SSH-agent available + assert vm.succeed("sudo -u admin -- ssh-agent ${userScript} 2>&1").strip() == "foo" + ''; +} diff --git a/nixos/tests/ssh-audit.nix b/nixos/tests/ssh-audit.nix index 3e0c33c6850f..88232a9010d3 100644 --- a/nixos/tests/ssh-audit.nix +++ b/nixos/tests/ssh-audit.nix @@ -1,106 +1,104 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - sshKeys = import (pkgs.path + "/nixos/tests/ssh-keys.nix") pkgs; - sshUsername = "any-user"; - serverName = "server"; - clientName = "client"; - sshAuditPort = 2222; - in - { - name = "ssh"; +{ pkgs, ... }: +let + sshKeys = import (pkgs.path + "/nixos/tests/ssh-keys.nix") pkgs; + sshUsername = "any-user"; + serverName = "server"; + clientName = "client"; + sshAuditPort = 2222; +in +{ + name = "ssh"; - nodes = { - "${serverName}" = { - networking.firewall.allowedTCPPorts = [ - sshAuditPort + nodes = { + "${serverName}" = { + networking.firewall.allowedTCPPorts = [ + sshAuditPort + ]; + services.openssh.enable = true; + users.users."${sshUsername}" = { + isNormalUser = true; + openssh.authorizedKeys.keys = [ + sshKeys.snakeOilPublicKey ]; - services.openssh.enable = true; - users.users."${sshUsername}" = { - isNormalUser = true; - openssh.authorizedKeys.keys = [ - sshKeys.snakeOilPublicKey - ]; - }; - }; - "${clientName}" = { - programs.ssh = { - ciphers = [ - "aes128-ctr" - "aes128-gcm@openssh.com" - "aes192-ctr" - "aes256-ctr" - "aes256-gcm@openssh.com" - "chacha20-poly1305@openssh.com" - ]; - extraConfig = '' - IdentitiesOnly yes - ''; - hostKeyAlgorithms = [ - "rsa-sha2-256" - "rsa-sha2-256-cert-v01@openssh.com" - "rsa-sha2-512" - "rsa-sha2-512-cert-v01@openssh.com" - "sk-ssh-ed25519-cert-v01@openssh.com" - "sk-ssh-ed25519@openssh.com" - "ssh-ed25519" - "ssh-ed25519-cert-v01@openssh.com" - ]; - kexAlgorithms = [ - "curve25519-sha256" - "curve25519-sha256@libssh.org" - "diffie-hellman-group-exchange-sha256" - "diffie-hellman-group16-sha512" - "diffie-hellman-group18-sha512" - "sntrup761x25519-sha512@openssh.com" - ]; - macs = [ - "hmac-sha2-256-etm@openssh.com" - "hmac-sha2-512-etm@openssh.com" - "umac-128-etm@openssh.com" - ]; - }; }; }; + "${clientName}" = { + programs.ssh = { + ciphers = [ + "aes128-ctr" + "aes128-gcm@openssh.com" + "aes192-ctr" + "aes256-ctr" + "aes256-gcm@openssh.com" + "chacha20-poly1305@openssh.com" + ]; + extraConfig = '' + IdentitiesOnly yes + ''; + hostKeyAlgorithms = [ + "rsa-sha2-256" + "rsa-sha2-256-cert-v01@openssh.com" + "rsa-sha2-512" + "rsa-sha2-512-cert-v01@openssh.com" + "sk-ssh-ed25519-cert-v01@openssh.com" + "sk-ssh-ed25519@openssh.com" + "ssh-ed25519" + "ssh-ed25519-cert-v01@openssh.com" + ]; + kexAlgorithms = [ + "curve25519-sha256" + "curve25519-sha256@libssh.org" + "diffie-hellman-group-exchange-sha256" + "diffie-hellman-group16-sha512" + "diffie-hellman-group18-sha512" + "sntrup761x25519-sha512@openssh.com" + ]; + macs = [ + "hmac-sha2-256-etm@openssh.com" + "hmac-sha2-512-etm@openssh.com" + "umac-128-etm@openssh.com" + ]; + }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - ${serverName}.wait_for_open_port(22) + ${serverName}.wait_for_open_port(22) - # Should pass SSH server audit - ${serverName}.succeed("${pkgs.ssh-audit}/bin/ssh-audit 127.0.0.1") + # Should pass SSH server audit + ${serverName}.succeed("${pkgs.ssh-audit}/bin/ssh-audit 127.0.0.1") - # Wait for client to be able to connect to the server - ${clientName}.systemctl("start network-online.target") - ${clientName}.wait_for_unit("network-online.target") + # Wait for client to be able to connect to the server + ${clientName}.systemctl("start network-online.target") + ${clientName}.wait_for_unit("network-online.target") - # Set up trusted private key - ${clientName}.succeed("cat ${sshKeys.snakeOilPrivateKey} > privkey.snakeoil") - ${clientName}.succeed("chmod 600 privkey.snakeoil") + # Set up trusted private key + ${clientName}.succeed("cat ${sshKeys.snakeOilPrivateKey} > privkey.snakeoil") + ${clientName}.succeed("chmod 600 privkey.snakeoil") - # Fail fast and disable interactivity - ssh_options = "-o BatchMode=yes -o ConnectTimeout=1 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + # Fail fast and disable interactivity + ssh_options = "-o BatchMode=yes -o ConnectTimeout=1 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - # Should deny root user - ${clientName}.fail(f"ssh {ssh_options} root@${serverName} true") + # Should deny root user + ${clientName}.fail(f"ssh {ssh_options} root@${serverName} true") - # Should deny non-root user password login - ${clientName}.fail(f"ssh {ssh_options} -o PasswordAuthentication=yes ${sshUsername}@${serverName} true") + # Should deny non-root user password login + ${clientName}.fail(f"ssh {ssh_options} -o PasswordAuthentication=yes ${sshUsername}@${serverName} true") - # Should allow non-root user certificate login - ${clientName}.succeed(f"ssh {ssh_options} -i privkey.snakeoil ${sshUsername}@${serverName} true") + # Should allow non-root user certificate login + ${clientName}.succeed(f"ssh {ssh_options} -i privkey.snakeoil ${sshUsername}@${serverName} true") - # Should pass SSH client audit - service_name = "ssh-audit.service" - ${serverName}.succeed(f"systemd-run --unit={service_name} ${pkgs.ssh-audit}/bin/ssh-audit --client-audit --port=${toString sshAuditPort}") - ${clientName}.sleep(5) # We can't use wait_for_open_port because ssh-audit exits as soon as anything talks to it - ${clientName}.execute( - f"ssh {ssh_options} -i privkey.snakeoil -p ${toString sshAuditPort} ${sshUsername}@${serverName} true", - check_return=False, - timeout=10 - ) - ${serverName}.succeed(f"exit $(systemctl show --property=ExecMainStatus --value {service_name})") - ''; - } -) + # Should pass SSH client audit + service_name = "ssh-audit.service" + ${serverName}.succeed(f"systemd-run --unit={service_name} ${pkgs.ssh-audit}/bin/ssh-audit --client-audit --port=${toString sshAuditPort}") + ${clientName}.sleep(5) # We can't use wait_for_open_port because ssh-audit exits as soon as anything talks to it + ${clientName}.execute( + f"ssh {ssh_options} -i privkey.snakeoil -p ${toString sshAuditPort} ${sshUsername}@${serverName} true", + check_return=False, + timeout=10 + ) + ${serverName}.succeed(f"exit $(systemctl show --property=ExecMainStatus --value {service_name})") + ''; +} diff --git a/nixos/tests/stalwart-mail.nix b/nixos/tests/stalwart-mail.nix index 38b47cb88f90..cfc0d3bb1c5b 100644 --- a/nixos/tests/stalwart-mail.nix +++ b/nixos/tests/stalwart-mail.nix @@ -7,125 +7,123 @@ let domain = certs.domain; in -import ./make-test-python.nix ( - { lib, ... }: - { - name = "stalwart-mail"; +{ lib, ... }: +{ + name = "stalwart-mail"; - nodes.main = - { pkgs, ... }: - { - security.pki.certificateFiles = [ certs.ca.cert ]; + nodes.main = + { pkgs, ... }: + { + security.pki.certificateFiles = [ certs.ca.cert ]; - services.stalwart-mail = { - enable = true; - settings = { - server.hostname = domain; + services.stalwart-mail = { + enable = true; + settings = { + server.hostname = domain; - certificate."snakeoil" = { - cert = "%{file:${certs.${domain}.cert}}%"; - private-key = "%{file:${certs.${domain}.key}}%"; + certificate."snakeoil" = { + cert = "%{file:${certs.${domain}.cert}}%"; + private-key = "%{file:${certs.${domain}.key}}%"; + }; + + server.tls = { + certificate = "snakeoil"; + enable = true; + implicit = false; + }; + + server.listener = { + "smtp-submission" = { + bind = [ "[::]:587" ]; + protocol = "smtp"; }; - server.tls = { - certificate = "snakeoil"; - enable = true; - implicit = false; - }; - - server.listener = { - "smtp-submission" = { - bind = [ "[::]:587" ]; - protocol = "smtp"; - }; - - "imap" = { - bind = [ "[::]:143" ]; - protocol = "imap"; - }; - }; - - session.auth.mechanisms = "[plain]"; - session.auth.directory = "'in-memory'"; - storage.directory = "in-memory"; - - session.rcpt.directory = "'in-memory'"; - queue.outbound.next-hop = "'local'"; - - directory."in-memory" = { - type = "memory"; - principals = [ - { - class = "individual"; - name = "alice"; - secret = "foobar"; - email = [ "alice@${domain}" ]; - } - { - class = "individual"; - name = "bob"; - secret = "foobar"; - email = [ "bob@${domain}" ]; - } - ]; + "imap" = { + bind = [ "[::]:143" ]; + protocol = "imap"; }; }; + + session.auth.mechanisms = "[plain]"; + session.auth.directory = "'in-memory'"; + storage.directory = "in-memory"; + + session.rcpt.directory = "'in-memory'"; + queue.outbound.next-hop = "'local'"; + + directory."in-memory" = { + type = "memory"; + principals = [ + { + class = "individual"; + name = "alice"; + secret = "foobar"; + email = [ "alice@${domain}" ]; + } + { + class = "individual"; + name = "bob"; + secret = "foobar"; + email = [ "bob@${domain}" ]; + } + ]; + }; }; - - environment.systemPackages = [ - (pkgs.writers.writePython3Bin "test-smtp-submission" { } '' - from smtplib import SMTP - - with SMTP('localhost', 587) as smtp: - smtp.starttls() - smtp.login('alice', 'foobar') - smtp.sendmail( - 'alice@${domain}', - 'bob@${domain}', - """ - From: alice@${domain} - To: bob@${domain} - Subject: Some test message - - This is a test message. - """.strip() - ) - '') - - (pkgs.writers.writePython3Bin "test-imap-read" { } '' - from imaplib import IMAP4 - - with IMAP4('localhost') as imap: - imap.starttls() - status, [caps] = imap.login('bob', 'foobar') - assert status == 'OK' - imap.select() - status, [ref] = imap.search(None, 'ALL') - assert status == 'OK' - [msgId] = ref.split() - status, msg = imap.fetch(msgId, 'BODY[TEXT]') - assert status == 'OK' - assert msg[0][1].strip() == b'This is a test message.' - '') - ]; }; - testScript = # python - '' - main.wait_for_unit("stalwart-mail.service") - main.wait_for_open_port(587) - main.wait_for_open_port(143) + environment.systemPackages = [ + (pkgs.writers.writePython3Bin "test-smtp-submission" { } '' + from smtplib import SMTP - main.succeed("test-smtp-submission") - main.succeed("test-imap-read") - ''; + with SMTP('localhost', 587) as smtp: + smtp.starttls() + smtp.login('alice', 'foobar') + smtp.sendmail( + 'alice@${domain}', + 'bob@${domain}', + """ + From: alice@${domain} + To: bob@${domain} + Subject: Some test message - meta = { - maintainers = with lib.maintainers; [ - happysalada - euxane - onny + This is a test message. + """.strip() + ) + '') + + (pkgs.writers.writePython3Bin "test-imap-read" { } '' + from imaplib import IMAP4 + + with IMAP4('localhost') as imap: + imap.starttls() + status, [caps] = imap.login('bob', 'foobar') + assert status == 'OK' + imap.select() + status, [ref] = imap.search(None, 'ALL') + assert status == 'OK' + [msgId] = ref.split() + status, msg = imap.fetch(msgId, 'BODY[TEXT]') + assert status == 'OK' + assert msg[0][1].strip() == b'This is a test message.' + '') ]; }; - } -) + + testScript = # python + '' + main.wait_for_unit("stalwart-mail.service") + main.wait_for_open_port(587) + main.wait_for_open_port(143) + + main.succeed("test-smtp-submission") + main.succeed("test-imap-read") + ''; + + meta = { + maintainers = with lib.maintainers; [ + happysalada + euxane + onny + ]; + }; +} diff --git a/nixos/tests/strongswan-swanctl.nix b/nixos/tests/strongswan-swanctl.nix index a36757bf9eaa..e5ef5f1d5ccf 100644 --- a/nixos/tests/strongswan-swanctl.nix +++ b/nixos/tests/strongswan-swanctl.nix @@ -16,150 +16,148 @@ # See the NixOS manual for how to run this test: # https://nixos.org/nixos/manual/index.html#sec-running-nixos-tests-interactively -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - allowESP = "iptables --insert INPUT --protocol ESP --jump ACCEPT"; +let + allowESP = "iptables --insert INPUT --protocol ESP --jump ACCEPT"; - # Shared VPN settings: - vlan0 = "192.168.0.0/24"; - carolIp = "192.168.1.2"; - moonIp = "192.168.1.3"; - version = 2; - secret = "0sFpZAZqEN6Ti9sqt4ZP5EWcqx"; - esp_proposals = [ "aes128gcm128-x25519" ]; - proposals = [ "aes128-sha256-x25519" ]; - in - { - name = "strongswan-swanctl"; - meta.maintainers = with pkgs.lib.maintainers; [ basvandijk ]; - nodes = { + # Shared VPN settings: + vlan0 = "192.168.0.0/24"; + carolIp = "192.168.1.2"; + moonIp = "192.168.1.3"; + version = 2; + secret = "0sFpZAZqEN6Ti9sqt4ZP5EWcqx"; + esp_proposals = [ "aes128gcm128-x25519" ]; + proposals = [ "aes128-sha256-x25519" ]; +in +{ + name = "strongswan-swanctl"; + meta.maintainers = with pkgs.lib.maintainers; [ basvandijk ]; + nodes = { - alice = - { ... }: - { - virtualisation.vlans = [ 0 ]; - networking = { - dhcpcd.enable = false; - defaultGateway = "192.168.0.3"; - }; + alice = + { ... }: + { + virtualisation.vlans = [ 0 ]; + networking = { + dhcpcd.enable = false; + defaultGateway = "192.168.0.3"; }; + }; - moon = - { config, ... }: - let - strongswan = config.services.strongswan-swanctl.package; - in - { - virtualisation.vlans = [ - 0 - 1 - ]; - networking = { - dhcpcd.enable = false; - firewall = { - allowedUDPPorts = [ - 4500 - 500 - ]; - extraCommands = allowESP; - }; - nat = { - enable = true; - internalIPs = [ vlan0 ]; - internalInterfaces = [ "eth1" ]; - externalIP = moonIp; - externalInterface = "eth2"; - }; + moon = + { config, ... }: + let + strongswan = config.services.strongswan-swanctl.package; + in + { + virtualisation.vlans = [ + 0 + 1 + ]; + networking = { + dhcpcd.enable = false; + firewall = { + allowedUDPPorts = [ + 4500 + 500 + ]; + extraCommands = allowESP; }; - environment.systemPackages = [ strongswan ]; - services.strongswan-swanctl = { + nat = { enable = true; - swanctl = { - connections = { - rw = { - local_addrs = [ moonIp ]; - local.main = { - auth = "psk"; - }; - remote.main = { - auth = "psk"; - }; - children = { - net = { - local_ts = [ vlan0 ]; - updown = "${strongswan}/libexec/ipsec/_updown iptables"; - inherit esp_proposals; - }; - }; - inherit version; - inherit proposals; + internalIPs = [ vlan0 ]; + internalInterfaces = [ "eth1" ]; + externalIP = moonIp; + externalInterface = "eth2"; + }; + }; + environment.systemPackages = [ strongswan ]; + services.strongswan-swanctl = { + enable = true; + swanctl = { + connections = { + rw = { + local_addrs = [ moonIp ]; + local.main = { + auth = "psk"; }; + remote.main = { + auth = "psk"; + }; + children = { + net = { + local_ts = [ vlan0 ]; + updown = "${strongswan}/libexec/ipsec/_updown iptables"; + inherit esp_proposals; + }; + }; + inherit version; + inherit proposals; }; - secrets = { - ike.carol = { - id.main = carolIp; - inherit secret; - }; + }; + secrets = { + ike.carol = { + id.main = carolIp; + inherit secret; }; }; }; }; + }; - carol = - { config, ... }: - let - strongswan = config.services.strongswan-swanctl.package; - in - { - virtualisation.vlans = [ 1 ]; - networking = { - dhcpcd.enable = false; - firewall.extraCommands = allowESP; - }; - environment.systemPackages = [ strongswan ]; - services.strongswan-swanctl = { - enable = true; - swanctl = { - connections = { - home = { - local_addrs = [ carolIp ]; - remote_addrs = [ moonIp ]; - local.main = { - auth = "psk"; - id = carolIp; - }; - remote.main = { - auth = "psk"; - id = moonIp; - }; - children = { - home = { - remote_ts = [ vlan0 ]; - start_action = "trap"; - updown = "${strongswan}/libexec/ipsec/_updown iptables"; - inherit esp_proposals; - }; - }; - inherit version; - inherit proposals; + carol = + { config, ... }: + let + strongswan = config.services.strongswan-swanctl.package; + in + { + virtualisation.vlans = [ 1 ]; + networking = { + dhcpcd.enable = false; + firewall.extraCommands = allowESP; + }; + environment.systemPackages = [ strongswan ]; + services.strongswan-swanctl = { + enable = true; + swanctl = { + connections = { + home = { + local_addrs = [ carolIp ]; + remote_addrs = [ moonIp ]; + local.main = { + auth = "psk"; + id = carolIp; }; + remote.main = { + auth = "psk"; + id = moonIp; + }; + children = { + home = { + remote_ts = [ vlan0 ]; + start_action = "trap"; + updown = "${strongswan}/libexec/ipsec/_updown iptables"; + inherit esp_proposals; + }; + }; + inherit version; + inherit proposals; }; - secrets = { - ike.moon = { - id.main = moonIp; - inherit secret; - }; + }; + secrets = { + ike.moon = { + id.main = moonIp; + inherit secret; }; }; }; }; + }; - }; - testScript = '' - start_all() - carol.wait_until_succeeds("ping -c 1 alice") - ''; - } -) + }; + testScript = '' + start_all() + carol.wait_until_succeeds("ping -c 1 alice") + ''; +} diff --git a/nixos/tests/sudo.nix b/nixos/tests/sudo.nix index 77accf8e82fb..13246be95289 100644 --- a/nixos/tests/sudo.nix +++ b/nixos/tests/sudo.nix @@ -3,181 +3,179 @@ let password = "helloworld"; in -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "sudo"; - meta.maintainers = pkgs.sudo.meta.maintainers; +{ lib, pkgs, ... }: +{ + name = "sudo"; + meta.maintainers = pkgs.sudo.meta.maintainers; - nodes.machine = - { lib, ... }: - { - users.groups = { - foobar = { }; - barfoo = { }; - baz = { - gid = 1337; - }; + nodes.machine = + { lib, ... }: + { + users.groups = { + foobar = { }; + barfoo = { }; + baz = { + gid = 1337; }; - users.users = { - test0 = { - isNormalUser = true; - extraGroups = [ "wheel" ]; - }; - test1 = { - isNormalUser = true; - password = password; - }; - test2 = { - isNormalUser = true; - extraGroups = [ "foobar" ]; - password = password; - }; - test3 = { - isNormalUser = true; - extraGroups = [ "barfoo" ]; - }; - test4 = { - isNormalUser = true; - extraGroups = [ "baz" ]; - }; - test5 = { - isNormalUser = true; - }; + }; + users.users = { + test0 = { + isNormalUser = true; + extraGroups = [ "wheel" ]; }; - - security.sudo = { - # Explicitly _not_ defining 'enable = true;' here, to check that sudo is enabled by default - - wheelNeedsPassword = false; - - extraConfig = '' - Defaults lecture="never" - ''; - - extraRules = [ - # SUDOERS SYNTAX CHECK (Test whether the module produces a valid output; - # errors being detected by the visudo checks. - - # These should not create any entries - { - users = [ "notest1" ]; - commands = [ ]; - } - { - commands = [ - { - command = "ALL"; - options = [ ]; - } - ]; - } - - # Test defining commands with the options syntax, though not setting any options - { - users = [ "notest2" ]; - commands = [ - { - command = "ALL"; - options = [ ]; - } - ]; - } - - # CONFIGURATION FOR TEST CASES - { - users = [ "test1" ]; - groups = [ "foobar" ]; - commands = [ "ALL" ]; - } - { - groups = [ - "barfoo" - 1337 - ]; - commands = [ - { - command = "ALL"; - options = [ - "NOPASSWD" - "NOSETENV" - ]; - } - ]; - } - { - users = [ "test5" ]; - commands = [ - { - command = "ALL"; - options = [ - "NOPASSWD" - "SETENV" - ]; - } - ]; - runAs = "test1:barfoo"; - } - ]; + test1 = { + isNormalUser = true; + password = password; + }; + test2 = { + isNormalUser = true; + extraGroups = [ "foobar" ]; + password = password; + }; + test3 = { + isNormalUser = true; + extraGroups = [ "barfoo" ]; + }; + test4 = { + isNormalUser = true; + extraGroups = [ "baz" ]; + }; + test5 = { + isNormalUser = true; }; }; - nodes.strict = - { ... }: - { - users.users = { - admin = { - isNormalUser = true; - extraGroups = [ "wheel" ]; - }; - noadmin = { - isNormalUser = true; - }; - }; + security.sudo = { + # Explicitly _not_ defining 'enable = true;' here, to check that sudo is enabled by default - security.sudo = { - enable = true; - wheelNeedsPassword = false; - execWheelOnly = true; + wheelNeedsPassword = false; + + extraConfig = '' + Defaults lecture="never" + ''; + + extraRules = [ + # SUDOERS SYNTAX CHECK (Test whether the module produces a valid output; + # errors being detected by the visudo checks. + + # These should not create any entries + { + users = [ "notest1" ]; + commands = [ ]; + } + { + commands = [ + { + command = "ALL"; + options = [ ]; + } + ]; + } + + # Test defining commands with the options syntax, though not setting any options + { + users = [ "notest2" ]; + commands = [ + { + command = "ALL"; + options = [ ]; + } + ]; + } + + # CONFIGURATION FOR TEST CASES + { + users = [ "test1" ]; + groups = [ "foobar" ]; + commands = [ "ALL" ]; + } + { + groups = [ + "barfoo" + 1337 + ]; + commands = [ + { + command = "ALL"; + options = [ + "NOPASSWD" + "NOSETENV" + ]; + } + ]; + } + { + users = [ "test5" ]; + commands = [ + { + command = "ALL"; + options = [ + "NOPASSWD" + "SETENV" + ]; + } + ]; + runAs = "test1:barfoo"; + } + ]; + }; + }; + + nodes.strict = + { ... }: + { + users.users = { + admin = { + isNormalUser = true; + extraGroups = [ "wheel" ]; + }; + noadmin = { + isNormalUser = true; }; }; - testScript = '' - with subtest("users in wheel group should have passwordless sudo"): - machine.succeed('su - test0 -c "sudo -u root true"') + security.sudo = { + enable = true; + wheelNeedsPassword = false; + execWheelOnly = true; + }; + }; - with subtest("test1 user should have sudo with password"): - machine.succeed('su - test1 -c "echo ${password} | sudo -S -u root true"') + testScript = '' + with subtest("users in wheel group should have passwordless sudo"): + machine.succeed('su - test0 -c "sudo -u root true"') - with subtest("test1 user should not be able to use sudo without password"): - machine.fail('su - test1 -c "sudo -n -u root true"') + with subtest("test1 user should have sudo with password"): + machine.succeed('su - test1 -c "echo ${password} | sudo -S -u root true"') - with subtest("users in group 'foobar' should be able to use sudo with password"): - machine.succeed('su - test2 -c "echo ${password} | sudo -S -u root true"') + with subtest("test1 user should not be able to use sudo without password"): + machine.fail('su - test1 -c "sudo -n -u root true"') - with subtest("users in group 'barfoo' should be able to use sudo without password"): - machine.succeed("sudo -u test3 sudo -n -u root true") + with subtest("users in group 'foobar' should be able to use sudo with password"): + machine.succeed('su - test2 -c "echo ${password} | sudo -S -u root true"') - with subtest("users in group 'baz' (GID 1337)"): - machine.succeed("sudo -u test4 sudo -n -u root echo true") + with subtest("users in group 'barfoo' should be able to use sudo without password"): + machine.succeed("sudo -u test3 sudo -n -u root true") - with subtest("test5 user should be able to run commands under test1"): - machine.succeed("sudo -u test5 sudo -n -u test1 true") + with subtest("users in group 'baz' (GID 1337)"): + machine.succeed("sudo -u test4 sudo -n -u root echo true") - with subtest("test5 user should not be able to run commands under root"): - machine.fail("sudo -u test5 sudo -n -u root true") + with subtest("test5 user should be able to run commands under test1"): + machine.succeed("sudo -u test5 sudo -n -u test1 true") - with subtest("test5 user should be able to keep their environment"): - machine.succeed("sudo -u test5 sudo -n -E -u test1 true") + with subtest("test5 user should not be able to run commands under root"): + machine.fail("sudo -u test5 sudo -n -u root true") - with subtest("users in group 'barfoo' should not be able to keep their environment"): - machine.fail("sudo -u test3 sudo -n -E -u root true") + with subtest("test5 user should be able to keep their environment"): + machine.succeed("sudo -u test5 sudo -n -E -u test1 true") - with subtest("users in wheel should be able to run sudo despite execWheelOnly"): - strict.succeed('su - admin -c "sudo -u root true"') + with subtest("users in group 'barfoo' should not be able to keep their environment"): + machine.fail("sudo -u test3 sudo -n -E -u root true") - with subtest("non-wheel users should be unable to run sudo thanks to execWheelOnly"): - strict.fail('su - noadmin -c "sudo --help"') - ''; - } -) + with subtest("users in wheel should be able to run sudo despite execWheelOnly"): + strict.succeed('su - admin -c "sudo -u root true"') + + with subtest("non-wheel users should be unable to run sudo thanks to execWheelOnly"): + strict.fail('su - noadmin -c "sudo --help"') + ''; +} diff --git a/nixos/tests/sunshine.nix b/nixos/tests/sunshine.nix index d492a85f90d7..176b7aa9f307 100644 --- a/nixos/tests/sunshine.nix +++ b/nixos/tests/sunshine.nix @@ -1,81 +1,79 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "sunshine"; - meta = { - # test is flaky on aarch64 - broken = pkgs.stdenv.hostPlatform.isAarch64; - maintainers = [ lib.maintainers.devusb ]; - timeout = 600; +{ pkgs, lib, ... }: +{ + name = "sunshine"; + meta = { + # test is flaky on aarch64 + broken = pkgs.stdenv.hostPlatform.isAarch64; + maintainers = [ lib.maintainers.devusb ]; + timeout = 600; + }; + + nodes.sunshine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.sunshine = { + enable = true; + openFirewall = true; + settings = { + capture = "x11"; + encoder = "software"; + output_name = 0; + }; + }; + + environment.systemPackages = with pkgs; [ + gxmessage + ]; + }; - nodes.sunshine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + nodes.moonlight = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; - services.sunshine = { - enable = true; - openFirewall = true; - settings = { - capture = "x11"; - encoder = "software"; - output_name = 0; - }; - }; + environment.systemPackages = with pkgs; [ + moonlight-qt + ]; - environment.systemPackages = with pkgs; [ - gxmessage - ]; + }; - }; + enableOCR = true; - nodes.moonlight = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + testScript = '' + # start the tests, wait for sunshine to be up + start_all() + sunshine.wait_for_open_port(48010,"localhost") - environment.systemPackages = with pkgs; [ - moonlight-qt - ]; + # set the admin username/password, restart sunshine + sunshine.execute("sunshine --creds sunshine sunshine") + sunshine.systemctl("restart sunshine","root") + sunshine.wait_for_open_port(48010,"localhost") - }; + # initiate pairing from moonlight + moonlight.execute("moonlight pair sunshine --pin 1234 >&2 & disown") + moonlight.wait_for_console_text("Executing request.*pair") - enableOCR = true; + # respond to pairing request from sunshine + sunshine.succeed("curl --fail --insecure -u sunshine:sunshine -d '{\"pin\":\"1234\",\"name\":\"1234\"}' https://localhost:47990/api/pin") - testScript = '' - # start the tests, wait for sunshine to be up - start_all() - sunshine.wait_for_open_port(48010,"localhost") + # wait until pairing is complete + moonlight.wait_for_console_text("Executing request.*phrase=pairchallenge") - # set the admin username/password, restart sunshine - sunshine.execute("sunshine --creds sunshine sunshine") - sunshine.systemctl("restart sunshine","root") - sunshine.wait_for_open_port(48010,"localhost") + # hide icewm panel + sunshine.send_key("ctrl-alt-h") + # put words on the sunshine screen for moonlight to see + sunshine.execute("gxmessage ' ABC' -center -font 'consolas 100' -fg '#FFFFFF' -bg '#000000' -borderless -geometry '2000x2000' -buttons \"\" >&2 & disown") - # initiate pairing from moonlight - moonlight.execute("moonlight pair sunshine --pin 1234 >&2 & disown") - moonlight.wait_for_console_text("Executing request.*pair") - - # respond to pairing request from sunshine - sunshine.succeed("curl --fail --insecure -u sunshine:sunshine -d '{\"pin\":\"1234\",\"name\":\"1234\"}' https://localhost:47990/api/pin") - - # wait until pairing is complete - moonlight.wait_for_console_text("Executing request.*phrase=pairchallenge") - - # hide icewm panel - sunshine.send_key("ctrl-alt-h") - # put words on the sunshine screen for moonlight to see - sunshine.execute("gxmessage ' ABC' -center -font 'consolas 100' -fg '#FFFFFF' -bg '#000000' -borderless -geometry '2000x2000' -buttons \"\" >&2 & disown") - - # connect to sunshine from moonlight and look for the words - moonlight.execute("moonlight --video-decoder software stream sunshine 'Desktop' >&2 & disown") - moonlight.wait_for_console_text("Dropping window event during flush") - moonlight.wait_for_text("ABC") - ''; - } -) + # connect to sunshine from moonlight and look for the words + moonlight.execute("moonlight --video-decoder software stream sunshine 'Desktop' >&2 & disown") + moonlight.wait_for_console_text("Dropping window event during flush") + moonlight.wait_for_text("ABC") + ''; +} diff --git a/nixos/tests/suricata.nix b/nixos/tests/suricata.nix index 9634f4af25a0..c79d0799551b 100644 --- a/nixos/tests/suricata.nix +++ b/nixos/tests/suricata.nix @@ -1,81 +1,79 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "suricata"; - meta.maintainers = with lib.maintainers; [ felbinger ]; +{ lib, pkgs, ... }: +{ + name = "suricata"; + meta.maintainers = with lib.maintainers; [ felbinger ]; - nodes = { - ids = { - networking.interfaces.eth1 = { - useDHCP = false; - ipv4.addresses = [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; - - # disable suricata-update because this requires an Internet connection - systemd.services.suricata-update.enable = false; - - # install suricata package to make suricatasc program available - environment.systemPackages = with pkgs; [ suricata ]; - - services.suricata = { - enable = true; - settings = { - vars.address-groups.HOME_NET = "192.168.1.0/24"; - unix-command.enabled = true; - outputs = [ { fast.enabled = true; } ]; - af-packet = [ { interface = "eth1"; } ]; - classification-file = "${pkgs.suricata}/etc/suricata/classification.config"; - }; - }; - - # create suricata.rules with the rule to detect the output of the id command - systemd.tmpfiles.rules = [ - ''f /var/lib/suricata/rules/suricata.rules 644 suricata suricata 0 alert ip any any -> any any (msg:"GPL ATTACK_RESPONSE id check returned root"; content:"uid=0|28|root|29|"; classtype:bad-unknown; sid:2100498; rev:7; metadata:created_at 2010_09_23, updated_at 2019_07_26;)'' + nodes = { + ids = { + networking.interfaces.eth1 = { + useDHCP = false; + ipv4.addresses = [ + { + address = "192.168.1.2"; + prefixLength = 24; + } ]; }; - helper = { - imports = [ ../modules/profiles/minimal.nix ]; - networking.interfaces.eth1 = { - useDHCP = false; - ipv4.addresses = [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; - }; + # disable suricata-update because this requires an Internet connection + systemd.services.suricata-update.enable = false; - services.nginx = { - enable = true; - virtualHosts."localhost".locations = { - "/id/".return = "200 'uid=0(root) gid=0(root) groups=0(root)'"; - }; + # install suricata package to make suricatasc program available + environment.systemPackages = with pkgs; [ suricata ]; + + services.suricata = { + enable = true; + settings = { + vars.address-groups.HOME_NET = "192.168.1.0/24"; + unix-command.enabled = true; + outputs = [ { fast.enabled = true; } ]; + af-packet = [ { interface = "eth1"; } ]; + classification-file = "${pkgs.suricata}/etc/suricata/classification.config"; }; - networking.firewall.allowedTCPPorts = [ 80 ]; }; + + # create suricata.rules with the rule to detect the output of the id command + systemd.tmpfiles.rules = [ + ''f /var/lib/suricata/rules/suricata.rules 644 suricata suricata 0 alert ip any any -> any any (msg:"GPL ATTACK_RESPONSE id check returned root"; content:"uid=0|28|root|29|"; classtype:bad-unknown; sid:2100498; rev:7; metadata:created_at 2010_09_23, updated_at 2019_07_26;)'' + ]; }; + helper = { + imports = [ ../modules/profiles/minimal.nix ]; - testScript = '' - start_all() + networking.interfaces.eth1 = { + useDHCP = false; + ipv4.addresses = [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; + }; - # check that configuration has been applied correctly with suricatasc - with subtest("suricata configuration test"): - ids.wait_for_unit("suricata.service") - assert '1' in ids.succeed("suricatasc -c 'iface-list' | ${pkgs.jq}/bin/jq .message.count") + services.nginx = { + enable = true; + virtualHosts."localhost".locations = { + "/id/".return = "200 'uid=0(root) gid=0(root) groups=0(root)'"; + }; + }; + networking.firewall.allowedTCPPorts = [ 80 ]; + }; + }; - # test detection of events based on a static ruleset (output of id command) - with subtest("suricata rule test"): - helper.wait_for_unit("nginx.service") - ids.wait_for_unit("suricata.service") + testScript = '' + start_all() - ids.succeed("curl http://192.168.1.1/id/") - assert "id check returned root [**] [Classification: Potentially Bad Traffic]" in ids.succeed("tail -n 1 /var/log/suricata/fast.log"), "Suricata didn't detect the output of id comment" - ''; - } -) + # check that configuration has been applied correctly with suricatasc + with subtest("suricata configuration test"): + ids.wait_for_unit("suricata.service") + assert '1' in ids.succeed("suricatasc -c 'iface-list' | ${pkgs.jq}/bin/jq .message.count") + + # test detection of events based on a static ruleset (output of id command) + with subtest("suricata rule test"): + helper.wait_for_unit("nginx.service") + ids.wait_for_unit("suricata.service") + + ids.succeed("curl http://192.168.1.1/id/") + assert "id check returned root [**] [Classification: Potentially Bad Traffic]" in ids.succeed("tail -n 1 /var/log/suricata/fast.log"), "Suricata didn't detect the output of id comment" + ''; +} diff --git a/nixos/tests/swap-file-btrfs.nix b/nixos/tests/swap-file-btrfs.nix index eaea9ad00125..d074a781ce0a 100644 --- a/nixos/tests/swap-file-btrfs.nix +++ b/nixos/tests/swap-file-btrfs.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "swap-file-btrfs"; +{ lib, ... }: +{ + name = "swap-file-btrfs"; - meta.maintainers = with lib.maintainers; [ oxalica ]; + meta.maintainers = with lib.maintainers; [ oxalica ]; - nodes.machine = - { pkgs, ... }: - { - virtualisation.useDefaultFilesystems = false; + nodes.machine = + { pkgs, ... }: + { + virtualisation.useDefaultFilesystems = false; - virtualisation.rootDevice = "/dev/vda"; + virtualisation.rootDevice = "/dev/vda"; - boot.initrd.postDeviceCommands = '' - ${pkgs.btrfs-progs}/bin/mkfs.btrfs --label root /dev/vda - ''; + boot.initrd.postDeviceCommands = '' + ${pkgs.btrfs-progs}/bin/mkfs.btrfs --label root /dev/vda + ''; - virtualisation.fileSystems = { - "/" = { - device = "/dev/disk/by-label/root"; - fsType = "btrfs"; - }; + virtualisation.fileSystems = { + "/" = { + device = "/dev/disk/by-label/root"; + fsType = "btrfs"; }; - - swapDevices = [ - { - device = "/var/swapfile"; - size = 1; # 1MiB. - } - ]; }; - testScript = '' - machine.wait_for_unit('var-swapfile.swap') - # Ensure the swap file creation script ran to completion without failing when creating the swap file - machine.fail("systemctl is-failed --quiet mkswap-var-swapfile.service") - machine.succeed("stat --file-system --format=%T /var/swapfile | grep btrfs") - # First run. Auto creation. - machine.succeed("swapon --show | grep /var/swapfile") + swapDevices = [ + { + device = "/var/swapfile"; + size = 1; # 1MiB. + } + ]; + }; - machine.shutdown() - machine.start() + testScript = '' + machine.wait_for_unit('var-swapfile.swap') + # Ensure the swap file creation script ran to completion without failing when creating the swap file + machine.fail("systemctl is-failed --quiet mkswap-var-swapfile.service") + machine.succeed("stat --file-system --format=%T /var/swapfile | grep btrfs") + # First run. Auto creation. + machine.succeed("swapon --show | grep /var/swapfile") - # Second run. Use it as-is. - machine.wait_for_unit('var-swapfile.swap') - # Ensure the swap file creation script ran to completion without failing when the swap file already exists - machine.fail("systemctl is-failed --quiet mkswap-var-swapfile.service") - machine.succeed("swapon --show | grep /var/swapfile") - ''; - } -) + machine.shutdown() + machine.start() + + # Second run. Use it as-is. + machine.wait_for_unit('var-swapfile.swap') + # Ensure the swap file creation script ran to completion without failing when the swap file already exists + machine.fail("systemctl is-failed --quiet mkswap-var-swapfile.service") + machine.succeed("swapon --show | grep /var/swapfile") + ''; +} diff --git a/nixos/tests/swap-partition.nix b/nixos/tests/swap-partition.nix index db04bbf559b5..d6310be61202 100644 --- a/nixos/tests/swap-partition.nix +++ b/nixos/tests/swap-partition.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "swap-partition"; +{ lib, pkgs, ... }: +{ + name = "swap-partition"; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - virtualisation.useDefaultFilesystems = false; + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + virtualisation.useDefaultFilesystems = false; - virtualisation.rootDevice = "/dev/vda1"; + virtualisation.rootDevice = "/dev/vda1"; - boot.initrd.postDeviceCommands = '' - if ! test -b /dev/vda1; then - ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos - ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB - ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100% - sync - fi + boot.initrd.postDeviceCommands = '' + if ! test -b /dev/vda1; then + ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos + ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB + ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100% + sync + fi - FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true) - if test -z "$FSTYPE"; then - ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1 - ${pkgs.util-linux}/bin/mkswap --label swap /dev/vda2 - fi - ''; + FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true) + if test -z "$FSTYPE"; then + ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1 + ${pkgs.util-linux}/bin/mkswap --label swap /dev/vda2 + fi + ''; - virtualisation.fileSystems = { - "/" = { - device = "/dev/disk/by-label/root"; - fsType = "ext4"; - }; + virtualisation.fileSystems = { + "/" = { + device = "/dev/disk/by-label/root"; + fsType = "ext4"; }; - - swapDevices = [ - { - device = "/dev/disk/by-label/swap"; - } - ]; }; - testScript = '' - machine.wait_for_unit("multi-user.target") + swapDevices = [ + { + device = "/dev/disk/by-label/swap"; + } + ]; + }; - with subtest("Swap is active"): - # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions. - machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'") - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + + with subtest("Swap is active"): + # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions. + machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'") + ''; +} diff --git a/nixos/tests/swap-random-encryption.nix b/nixos/tests/swap-random-encryption.nix index c48452412534..0b88c21654ca 100644 --- a/nixos/tests/swap-random-encryption.nix +++ b/nixos/tests/swap-random-encryption.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "swap-random-encryption"; +{ lib, pkgs, ... }: +{ + name = "swap-random-encryption"; - nodes.machine = - { - config, - pkgs, - lib, - ... - }: - { - environment.systemPackages = [ pkgs.cryptsetup ]; + nodes.machine = + { + config, + pkgs, + lib, + ... + }: + { + environment.systemPackages = [ pkgs.cryptsetup ]; - virtualisation.useDefaultFilesystems = false; + virtualisation.useDefaultFilesystems = false; - virtualisation.rootDevice = "/dev/vda1"; + virtualisation.rootDevice = "/dev/vda1"; - boot.initrd.postDeviceCommands = '' - if ! test -b /dev/vda1; then - ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos - ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB - ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100% - sync - fi + boot.initrd.postDeviceCommands = '' + if ! test -b /dev/vda1; then + ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos + ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB + ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100% + sync + fi - FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true) - if test -z "$FSTYPE"; then - ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1 - fi - ''; + FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true) + if test -z "$FSTYPE"; then + ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1 + fi + ''; - virtualisation.fileSystems = { - "/" = { - device = "/dev/disk/by-label/root"; - fsType = "ext4"; - }; + virtualisation.fileSystems = { + "/" = { + device = "/dev/disk/by-label/root"; + fsType = "ext4"; }; - - swapDevices = [ - { - device = "/dev/vda2"; - - randomEncryption = { - enable = true; - cipher = "aes-xts-plain64"; - keySize = 512; - sectorSize = 4096; - }; - } - ]; }; - testScript = '' - machine.wait_for_unit("multi-user.target") + swapDevices = [ + { + device = "/dev/vda2"; - with subtest("Swap is active"): - # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions. - machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'") + randomEncryption = { + enable = true; + cipher = "aes-xts-plain64"; + keySize = 512; + sectorSize = 4096; + }; + } + ]; + }; - with subtest("Swap device has 4k sector size"): - import json - result = json.loads(machine.succeed("lsblk -Jo PHY-SEC,LOG-SEC /dev/mapper/dev-vda2")) - block_devices = result["blockdevices"] - if len(block_devices) != 1: - raise Exception ("lsblk output did not report exactly one block device") + testScript = '' + machine.wait_for_unit("multi-user.target") - swapDevice = block_devices[0]; - if not (swapDevice["phy-sec"] == 4096 and swapDevice["log-sec"] == 4096): - raise Exception ("swap device does not have the sector size specified in the configuration") + with subtest("Swap is active"): + # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions. + machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'") - with subtest("Swap encrypt has assigned cipher and keysize"): - import re + with subtest("Swap device has 4k sector size"): + import json + result = json.loads(machine.succeed("lsblk -Jo PHY-SEC,LOG-SEC /dev/mapper/dev-vda2")) + block_devices = result["blockdevices"] + if len(block_devices) != 1: + raise Exception ("lsblk output did not report exactly one block device") - results = machine.succeed("cryptsetup status dev-vda2").splitlines() + swapDevice = block_devices[0]; + if not (swapDevice["phy-sec"] == 4096 and swapDevice["log-sec"] == 4096): + raise Exception ("swap device does not have the sector size specified in the configuration") - cipher_pattern = re.compile(r"\s*cipher:\s+aes-xts-plain64\s*") - if not any(cipher_pattern.fullmatch(line) for line in results): - raise Exception ("swap device encryption does not use the cipher specified in the configuration") + with subtest("Swap encrypt has assigned cipher and keysize"): + import re - key_size_pattern = re.compile(r"\s*keysize:\s+512\s+bits\s*") - if not any(key_size_pattern.fullmatch(line) for line in results): - raise Exception ("swap device encryption does not use the key size specified in the configuration") - ''; - } -) + results = machine.succeed("cryptsetup status dev-vda2").splitlines() + + cipher_pattern = re.compile(r"\s*cipher:\s+aes-xts-plain64\s*") + if not any(cipher_pattern.fullmatch(line) for line in results): + raise Exception ("swap device encryption does not use the cipher specified in the configuration") + + key_size_pattern = re.compile(r"\s*keysize:\s+512\s+bits\s*") + if not any(key_size_pattern.fullmatch(line) for line in results): + raise Exception ("swap device encryption does not use the key size specified in the configuration") + ''; +} diff --git a/nixos/tests/sway.nix b/nixos/tests/sway.nix index 75035c5b9f65..a79e692d24cb 100644 --- a/nixos/tests/sway.nix +++ b/nixos/tests/sway.nix @@ -1,207 +1,205 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "sway"; - meta = { - maintainers = with lib.maintainers; [ - primeos - synthetica - ]; - }; +{ pkgs, lib, ... }: +{ + name = "sway"; + meta = { + maintainers = with lib.maintainers; [ + primeos + synthetica + ]; + }; - # testScriptWithTypes:49: error: Cannot call function of unknown type - # (machine.succeed if succeed else machine.execute)( - # ^ - # Found 1 error in 1 file (checked 1 source file) - skipTypeCheck = true; + # testScriptWithTypes:49: error: Cannot call function of unknown type + # (machine.succeed if succeed else machine.execute)( + # ^ + # Found 1 error in 1 file (checked 1 source file) + skipTypeCheck = true; - nodes.machine = - { config, ... }: - { - # Automatically login on tty1 as a normal user: - imports = [ ./common/user-account.nix ]; - services.getty.autologinUser = "alice"; + nodes.machine = + { config, ... }: + { + # Automatically login on tty1 as a normal user: + imports = [ ./common/user-account.nix ]; + services.getty.autologinUser = "alice"; - environment = { - # For glinfo and wayland-info: - systemPackages = with pkgs; [ - mesa-demos - wayland-utils - alacritty - ]; - # Use a fixed SWAYSOCK path (for swaymsg): - variables = { - "SWAYSOCK" = "/tmp/sway-ipc.sock"; - # TODO: Investigate if we can get hardware acceleration to work (via - # virtio-gpu and Virgil). We currently have to use the Pixman software - # renderer since the GLES2 renderer doesn't work inside the VM (even - # with WLR_RENDERER_ALLOW_SOFTWARE): - # "WLR_RENDERER_ALLOW_SOFTWARE" = "1"; - "WLR_RENDERER" = "pixman"; - }; - # For convenience: - shellAliases = { - test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok"; - test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok"; - }; - - # To help with OCR: - etc."xdg/foot/foot.ini".text = lib.generators.toINI { } { - main = { - font = "inconsolata:size=14"; - }; - colors = rec { - foreground = "000000"; - background = "ffffff"; - regular2 = foreground; - }; - }; - - etc."gpg-agent.conf".text = '' - pinentry-timeout 86400 - ''; + environment = { + # For glinfo and wayland-info: + systemPackages = with pkgs; [ + mesa-demos + wayland-utils + alacritty + ]; + # Use a fixed SWAYSOCK path (for swaymsg): + variables = { + "SWAYSOCK" = "/tmp/sway-ipc.sock"; + # TODO: Investigate if we can get hardware acceleration to work (via + # virtio-gpu and Virgil). We currently have to use the Pixman software + # renderer since the GLES2 renderer doesn't work inside the VM (even + # with WLR_RENDERER_ALLOW_SOFTWARE): + # "WLR_RENDERER_ALLOW_SOFTWARE" = "1"; + "WLR_RENDERER" = "pixman"; + }; + # For convenience: + shellAliases = { + test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok"; + test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok"; }; - fonts.packages = [ pkgs.inconsolata ]; + # To help with OCR: + etc."xdg/foot/foot.ini".text = lib.generators.toINI { } { + main = { + font = "inconsolata:size=14"; + }; + colors = rec { + foreground = "000000"; + background = "ffffff"; + regular2 = foreground; + }; + }; - # Automatically configure and start Sway when logging in on tty1: - programs.bash.loginShellInit = '' - if [ "$(tty)" = "/dev/tty1" ]; then - set -e - - mkdir -p ~/.config/sway - sed s/Mod4/Mod1/ /etc/sway/config > ~/.config/sway/config - - sway --validate - sway && touch /tmp/sway-exit-ok - fi + etc."gpg-agent.conf".text = '' + pinentry-timeout 86400 ''; - - programs.sway.enable = true; - - # To test pinentry via gpg-agent: - programs.gnupg.agent.enable = true; - - # Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch: - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; }; - testScript = - { nodes, ... }: - '' - import shlex - import json + fonts.packages = [ pkgs.inconsolata ]; - q = shlex.quote - NODE_GROUPS = ["nodes", "floating_nodes"] + # Automatically configure and start Sway when logging in on tty1: + programs.bash.loginShellInit = '' + if [ "$(tty)" = "/dev/tty1" ]; then + set -e + mkdir -p ~/.config/sway + sed s/Mod4/Mod1/ /etc/sway/config > ~/.config/sway/config - def swaymsg(command: str = "", succeed=True, type="command"): - assert command != "" or type != "command", "Must specify command or type" - shell = q(f"swaymsg -t {q(type)} -- {q(command)}") - with machine.nested( - f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed) - ): - ret = (machine.succeed if succeed else machine.execute)( - f"su - alice -c {shell}" - ) - - # execute also returns a status code, but disregard. - if not succeed: - _, ret = ret - - if not succeed and not ret: - return None - - parsed = json.loads(ret) - return parsed - - - def walk(tree): - yield tree - for group in NODE_GROUPS: - for node in tree.get(group, []): - yield from walk(node) - - - def wait_for_window(pattern): - def func(last_chance): - nodes = (node["name"] for node in walk(swaymsg(type="get_tree"))) - - if last_chance: - nodes = list(nodes) - machine.log(f"Last call! Current list of windows: {nodes}") - - return any(pattern in name for name in nodes) - - retry(func) - - start_all() - machine.wait_for_unit("multi-user.target") - - # To check the version: - print(machine.succeed("sway --version")) - - # Wait for Sway to complete startup: - machine.wait_for_file("/run/user/1000/wayland-1") - machine.wait_for_file("/tmp/sway-ipc.sock") - - # Test XWayland (foot does not support X): - swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty") - wait_for_window("alice@machine") - machine.send_chars("test-x11\n") - machine.wait_for_file("/tmp/test-x11-exit-ok") - print(machine.succeed("cat /tmp/test-x11.out")) - machine.copy_from_vm("/tmp/test-x11.out") - machine.screenshot("alacritty_glinfo") - machine.succeed("pkill alacritty") - - # Start a terminal (foot) on workspace 3: - machine.send_key("alt-3") - machine.sleep(3) - machine.send_key("alt-ret") - wait_for_window("alice@machine") - machine.send_chars("test-wayland\n") - machine.wait_for_file("/tmp/test-wayland-exit-ok") - print(machine.succeed("cat /tmp/test-wayland.out")) - machine.copy_from_vm("/tmp/test-wayland.out") - machine.screenshot("foot_wayland_info") - machine.send_key("alt-shift-q") - machine.wait_until_fails("pgrep foot") - - # Test gpg-agent starting pinentry-gnome3 via D-Bus (tests if - # $WAYLAND_DISPLAY is correctly imported into the D-Bus user env): - swaymsg("exec mkdir -p ~/.gnupg") - swaymsg("exec cp /etc/gpg-agent.conf ~/.gnupg") - - swaymsg("exec DISPLAY=INVALID gpg --no-tty --yes --quick-generate-key test", succeed=False) - machine.wait_until_succeeds("pgrep --exact gpg") - wait_for_window("gpg") - machine.succeed("pgrep --exact gpg") - machine.screenshot("gpg_pinentry") - machine.send_key("alt-shift-q") - machine.wait_until_fails("pgrep --exact gpg") - - # Test swaynag: - def get_height(): - return [node['rect']['height'] for node in walk(swaymsg(type="get_tree")) if node['focused']][0] - - before = get_height() - machine.send_key("alt-shift-e") - retry(lambda _: get_height() < before) - machine.screenshot("sway_exit") - - swaymsg("exec swaylock") - machine.wait_until_succeeds("pgrep -xf swaylock") - machine.sleep(3) - machine.send_chars("${nodes.machine.config.users.users.alice.password}") - machine.send_key("ret") - machine.wait_until_fails("pgrep -xf swaylock") - - # Exit Sway and verify process exit status 0: - swaymsg("exit", succeed=False) - machine.wait_until_fails("pgrep -xf sway") - machine.wait_for_file("/tmp/sway-exit-ok") + sway --validate + sway && touch /tmp/sway-exit-ok + fi ''; - } -) + + programs.sway.enable = true; + + # To test pinentry via gpg-agent: + programs.gnupg.agent.enable = true; + + # Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch: + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + }; + + testScript = + { nodes, ... }: + '' + import shlex + import json + + q = shlex.quote + NODE_GROUPS = ["nodes", "floating_nodes"] + + + def swaymsg(command: str = "", succeed=True, type="command"): + assert command != "" or type != "command", "Must specify command or type" + shell = q(f"swaymsg -t {q(type)} -- {q(command)}") + with machine.nested( + f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed) + ): + ret = (machine.succeed if succeed else machine.execute)( + f"su - alice -c {shell}" + ) + + # execute also returns a status code, but disregard. + if not succeed: + _, ret = ret + + if not succeed and not ret: + return None + + parsed = json.loads(ret) + return parsed + + + def walk(tree): + yield tree + for group in NODE_GROUPS: + for node in tree.get(group, []): + yield from walk(node) + + + def wait_for_window(pattern): + def func(last_chance): + nodes = (node["name"] for node in walk(swaymsg(type="get_tree"))) + + if last_chance: + nodes = list(nodes) + machine.log(f"Last call! Current list of windows: {nodes}") + + return any(pattern in name for name in nodes) + + retry(func) + + start_all() + machine.wait_for_unit("multi-user.target") + + # To check the version: + print(machine.succeed("sway --version")) + + # Wait for Sway to complete startup: + machine.wait_for_file("/run/user/1000/wayland-1") + machine.wait_for_file("/tmp/sway-ipc.sock") + + # Test XWayland (foot does not support X): + swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty") + wait_for_window("alice@machine") + machine.send_chars("test-x11\n") + machine.wait_for_file("/tmp/test-x11-exit-ok") + print(machine.succeed("cat /tmp/test-x11.out")) + machine.copy_from_vm("/tmp/test-x11.out") + machine.screenshot("alacritty_glinfo") + machine.succeed("pkill alacritty") + + # Start a terminal (foot) on workspace 3: + machine.send_key("alt-3") + machine.sleep(3) + machine.send_key("alt-ret") + wait_for_window("alice@machine") + machine.send_chars("test-wayland\n") + machine.wait_for_file("/tmp/test-wayland-exit-ok") + print(machine.succeed("cat /tmp/test-wayland.out")) + machine.copy_from_vm("/tmp/test-wayland.out") + machine.screenshot("foot_wayland_info") + machine.send_key("alt-shift-q") + machine.wait_until_fails("pgrep foot") + + # Test gpg-agent starting pinentry-gnome3 via D-Bus (tests if + # $WAYLAND_DISPLAY is correctly imported into the D-Bus user env): + swaymsg("exec mkdir -p ~/.gnupg") + swaymsg("exec cp /etc/gpg-agent.conf ~/.gnupg") + + swaymsg("exec DISPLAY=INVALID gpg --no-tty --yes --quick-generate-key test", succeed=False) + machine.wait_until_succeeds("pgrep --exact gpg") + wait_for_window("gpg") + machine.succeed("pgrep --exact gpg") + machine.screenshot("gpg_pinentry") + machine.send_key("alt-shift-q") + machine.wait_until_fails("pgrep --exact gpg") + + # Test swaynag: + def get_height(): + return [node['rect']['height'] for node in walk(swaymsg(type="get_tree")) if node['focused']][0] + + before = get_height() + machine.send_key("alt-shift-e") + retry(lambda _: get_height() < before) + machine.screenshot("sway_exit") + + swaymsg("exec swaylock") + machine.wait_until_succeeds("pgrep -xf swaylock") + machine.sleep(3) + machine.send_chars("${nodes.machine.config.users.users.alice.password}") + machine.send_key("ret") + machine.wait_until_fails("pgrep -xf swaylock") + + # Exit Sway and verify process exit status 0: + swaymsg("exit", succeed=False) + machine.wait_until_fails("pgrep -xf sway") + machine.wait_for_file("/tmp/sway-exit-ok") + ''; +} diff --git a/nixos/tests/swayfx.nix b/nixos/tests/swayfx.nix index 7fa5c602f520..1f9b19f335f9 100644 --- a/nixos/tests/swayfx.nix +++ b/nixos/tests/swayfx.nix @@ -1,205 +1,203 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "swayfx"; - meta = { - maintainers = with lib.maintainers; [ ]; - }; +{ pkgs, lib, ... }: +{ + name = "swayfx"; + meta = { + maintainers = with lib.maintainers; [ ]; + }; - # testScriptWithTypes:49: error: Cannot call function of unknown type - # (machine.succeed if succeed else machine.execute)( - # ^ - # Found 1 error in 1 file (checked 1 source file) - skipTypeCheck = true; + # testScriptWithTypes:49: error: Cannot call function of unknown type + # (machine.succeed if succeed else machine.execute)( + # ^ + # Found 1 error in 1 file (checked 1 source file) + skipTypeCheck = true; - nodes.machine = { - # Automatically login on tty1 as a normal user: - imports = [ ./common/user-account.nix ]; - services.getty.autologinUser = "alice"; + nodes.machine = { + # Automatically login on tty1 as a normal user: + imports = [ ./common/user-account.nix ]; + services.getty.autologinUser = "alice"; - environment = { - # For glinfo and wayland-info: - systemPackages = with pkgs; [ - mesa-demos - wayland-utils - alacritty - ]; - # Use a fixed SWAYSOCK path (for swaymsg): - variables = { - "SWAYSOCK" = "/tmp/sway-ipc.sock"; - # TODO: Investigate if we can get hardware acceleration to work (via - # virtio-gpu and Virgil). We currently have to use the Pixman software - # renderer since the GLES2 renderer doesn't work inside the VM (even - # with WLR_RENDERER_ALLOW_SOFTWARE): - # "WLR_RENDERER_ALLOW_SOFTWARE" = "1"; - "WLR_RENDERER" = "pixman"; - }; - # For convenience: - shellAliases = { - test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok"; - test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok"; - }; - - # To help with OCR: - etc."xdg/foot/foot.ini".text = lib.generators.toINI { } { - main = { - font = "inconsolata:size=14"; - }; - colors = rec { - foreground = "000000"; - background = "ffffff"; - regular2 = foreground; - }; - }; - - etc."gpg-agent.conf".text = '' - pinentry-timeout 86400 - ''; + environment = { + # For glinfo and wayland-info: + systemPackages = with pkgs; [ + mesa-demos + wayland-utils + alacritty + ]; + # Use a fixed SWAYSOCK path (for swaymsg): + variables = { + "SWAYSOCK" = "/tmp/sway-ipc.sock"; + # TODO: Investigate if we can get hardware acceleration to work (via + # virtio-gpu and Virgil). We currently have to use the Pixman software + # renderer since the GLES2 renderer doesn't work inside the VM (even + # with WLR_RENDERER_ALLOW_SOFTWARE): + # "WLR_RENDERER_ALLOW_SOFTWARE" = "1"; + "WLR_RENDERER" = "pixman"; + }; + # For convenience: + shellAliases = { + test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok"; + test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok"; }; - fonts.packages = [ pkgs.inconsolata ]; - - # Automatically configure and start Sway when logging in on tty1: - programs.bash.loginShellInit = '' - if [ "$(tty)" = "/dev/tty1" ]; then - set -e - - mkdir -p ~/.config/sway - sed s/Mod4/Mod1/ /etc/sway/config > ~/.config/sway/config - - sway --validate - sway && touch /tmp/sway-exit-ok - fi - ''; - - programs.sway = { - enable = true; - package = pkgs.swayfx.override { isNixOS = true; }; + # To help with OCR: + etc."xdg/foot/foot.ini".text = lib.generators.toINI { } { + main = { + font = "inconsolata:size=14"; + }; + colors = rec { + foreground = "000000"; + background = "ffffff"; + regular2 = foreground; + }; }; - # To test pinentry via gpg-agent: - programs.gnupg.agent.enable = true; - - # Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch: - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + etc."gpg-agent.conf".text = '' + pinentry-timeout 86400 + ''; }; - testScript = - { nodes, ... }: - '' - import shlex - import json + fonts.packages = [ pkgs.inconsolata ]; - q = shlex.quote - NODE_GROUPS = ["nodes", "floating_nodes"] + # Automatically configure and start Sway when logging in on tty1: + programs.bash.loginShellInit = '' + if [ "$(tty)" = "/dev/tty1" ]; then + set -e + + mkdir -p ~/.config/sway + sed s/Mod4/Mod1/ /etc/sway/config > ~/.config/sway/config + + sway --validate + sway && touch /tmp/sway-exit-ok + fi + ''; + + programs.sway = { + enable = true; + package = pkgs.swayfx.override { isNixOS = true; }; + }; + + # To test pinentry via gpg-agent: + programs.gnupg.agent.enable = true; + + # Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch: + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + }; + + testScript = + { nodes, ... }: + '' + import shlex + import json + + q = shlex.quote + NODE_GROUPS = ["nodes", "floating_nodes"] - def swaymsg(command: str = "", succeed=True, type="command"): - assert command != "" or type != "command", "Must specify command or type" - shell = q(f"swaymsg -t {q(type)} -- {q(command)}") - with machine.nested( - f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed) - ): - ret = (machine.succeed if succeed else machine.execute)( - f"su - alice -c {shell}" - ) + def swaymsg(command: str = "", succeed=True, type="command"): + assert command != "" or type != "command", "Must specify command or type" + shell = q(f"swaymsg -t {q(type)} -- {q(command)}") + with machine.nested( + f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed) + ): + ret = (machine.succeed if succeed else machine.execute)( + f"su - alice -c {shell}" + ) - # execute also returns a status code, but disregard. - if not succeed: - _, ret = ret + # execute also returns a status code, but disregard. + if not succeed: + _, ret = ret - if not succeed and not ret: - return None + if not succeed and not ret: + return None - parsed = json.loads(ret) - return parsed + parsed = json.loads(ret) + return parsed - def walk(tree): - yield tree - for group in NODE_GROUPS: - for node in tree.get(group, []): - yield from walk(node) + def walk(tree): + yield tree + for group in NODE_GROUPS: + for node in tree.get(group, []): + yield from walk(node) - def wait_for_window(pattern): - def func(last_chance): - nodes = (node["name"] for node in walk(swaymsg(type="get_tree"))) + def wait_for_window(pattern): + def func(last_chance): + nodes = (node["name"] for node in walk(swaymsg(type="get_tree"))) - if last_chance: - nodes = list(nodes) - machine.log(f"Last call! Current list of windows: {nodes}") + if last_chance: + nodes = list(nodes) + machine.log(f"Last call! Current list of windows: {nodes}") - return any(pattern in name for name in nodes) + return any(pattern in name for name in nodes) - retry(func) + retry(func) - start_all() - machine.wait_for_unit("multi-user.target") + start_all() + machine.wait_for_unit("multi-user.target") - # To check the version: - print(machine.succeed("sway --version")) + # To check the version: + print(machine.succeed("sway --version")) - # Wait for Sway to complete startup: - machine.wait_for_file("/run/user/1000/wayland-1") - machine.wait_for_file("/tmp/sway-ipc.sock") + # Wait for Sway to complete startup: + machine.wait_for_file("/run/user/1000/wayland-1") + machine.wait_for_file("/tmp/sway-ipc.sock") - # Test XWayland (foot does not support X): - swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty") - wait_for_window("alice@machine") - machine.send_chars("test-x11\n") - machine.wait_for_file("/tmp/test-x11-exit-ok") - print(machine.succeed("cat /tmp/test-x11.out")) - machine.copy_from_vm("/tmp/test-x11.out") - machine.screenshot("alacritty_glinfo") - machine.succeed("pkill alacritty") + # Test XWayland (foot does not support X): + swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty") + wait_for_window("alice@machine") + machine.send_chars("test-x11\n") + machine.wait_for_file("/tmp/test-x11-exit-ok") + print(machine.succeed("cat /tmp/test-x11.out")) + machine.copy_from_vm("/tmp/test-x11.out") + machine.screenshot("alacritty_glinfo") + machine.succeed("pkill alacritty") - # Start a terminal (foot) on workspace 3: - machine.send_key("alt-3") - machine.sleep(3) - machine.send_key("alt-ret") - wait_for_window("alice@machine") - machine.send_chars("test-wayland\n") - machine.wait_for_file("/tmp/test-wayland-exit-ok") - print(machine.succeed("cat /tmp/test-wayland.out")) - machine.copy_from_vm("/tmp/test-wayland.out") - machine.screenshot("foot_wayland_info") - machine.send_key("alt-shift-q") - machine.wait_until_fails("pgrep foot") + # Start a terminal (foot) on workspace 3: + machine.send_key("alt-3") + machine.sleep(3) + machine.send_key("alt-ret") + wait_for_window("alice@machine") + machine.send_chars("test-wayland\n") + machine.wait_for_file("/tmp/test-wayland-exit-ok") + print(machine.succeed("cat /tmp/test-wayland.out")) + machine.copy_from_vm("/tmp/test-wayland.out") + machine.screenshot("foot_wayland_info") + machine.send_key("alt-shift-q") + machine.wait_until_fails("pgrep foot") - # Test gpg-agent starting pinentry-gnome3 via D-Bus (tests if - # $WAYLAND_DISPLAY is correctly imported into the D-Bus user env): - swaymsg("exec mkdir -p ~/.gnupg") - swaymsg("exec cp /etc/gpg-agent.conf ~/.gnupg") + # Test gpg-agent starting pinentry-gnome3 via D-Bus (tests if + # $WAYLAND_DISPLAY is correctly imported into the D-Bus user env): + swaymsg("exec mkdir -p ~/.gnupg") + swaymsg("exec cp /etc/gpg-agent.conf ~/.gnupg") - swaymsg("exec DISPLAY=INVALID gpg --no-tty --yes --quick-generate-key test", succeed=False) - machine.wait_until_succeeds("pgrep --exact gpg") - wait_for_window("gpg") - machine.succeed("pgrep --exact gpg") - machine.screenshot("gpg_pinentry") - machine.send_key("alt-shift-q") - machine.wait_until_fails("pgrep --exact gpg") + swaymsg("exec DISPLAY=INVALID gpg --no-tty --yes --quick-generate-key test", succeed=False) + machine.wait_until_succeeds("pgrep --exact gpg") + wait_for_window("gpg") + machine.succeed("pgrep --exact gpg") + machine.screenshot("gpg_pinentry") + machine.send_key("alt-shift-q") + machine.wait_until_fails("pgrep --exact gpg") - # Test swaynag: - def get_height(): - return [node['rect']['height'] for node in walk(swaymsg(type="get_tree")) if node['focused']][0] + # Test swaynag: + def get_height(): + return [node['rect']['height'] for node in walk(swaymsg(type="get_tree")) if node['focused']][0] - before = get_height() - machine.send_key("alt-shift-e") - retry(lambda _: get_height() < before) - machine.screenshot("sway_exit") + before = get_height() + machine.send_key("alt-shift-e") + retry(lambda _: get_height() < before) + machine.screenshot("sway_exit") - swaymsg("exec swaylock") - machine.wait_until_succeeds("pgrep -xf swaylock") - machine.sleep(3) - machine.send_chars("${nodes.machine.users.users.alice.password}") - machine.send_key("ret") - machine.wait_until_fails("pgrep -xf swaylock") + swaymsg("exec swaylock") + machine.wait_until_succeeds("pgrep -xf swaylock") + machine.sleep(3) + machine.send_chars("${nodes.machine.users.users.alice.password}") + machine.send_key("ret") + machine.wait_until_fails("pgrep -xf swaylock") - # Exit Sway and verify process exit status 0: - swaymsg("exit", succeed=False) - machine.wait_until_fails("pgrep -xf sway") - machine.wait_for_file("/tmp/sway-exit-ok") - ''; - } -) + # Exit Sway and verify process exit status 0: + swaymsg("exit", succeed=False) + machine.wait_until_fails("pgrep -xf sway") + machine.wait_for_file("/tmp/sway-exit-ok") + ''; +} diff --git a/nixos/tests/sx.nix b/nixos/tests/sx.nix index 1cdc4858cf00..8fc41621a7c4 100644 --- a/nixos/tests/sx.nix +++ b/nixos/tests/sx.nix @@ -1,63 +1,61 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "sx"; - meta.maintainers = with lib.maintainers; [ - figsoda - thiagokokada - ]; +{ pkgs, lib, ... }: +{ + name = "sx"; + meta.maintainers = with lib.maintainers; [ + figsoda + thiagokokada + ]; - nodes.machine = - { ... }: - { - imports = [ ./common/user-account.nix ]; + nodes.machine = + { ... }: + { + imports = [ ./common/user-account.nix ]; - environment.systemPackages = with pkgs; [ icewm ]; + environment.systemPackages = with pkgs; [ icewm ]; - services.getty.autologinUser = "alice"; + services.getty.autologinUser = "alice"; - services.xserver = { - enable = true; - displayManager.sx.enable = true; - }; - - # Create sxrc file on login and start sx - programs.bash.loginShellInit = - # bash - '' - mkdir -p "$HOME/.config/sx" - echo 'exec icewm' > "$HOME/.config/sx/sxrc" - chmod +x "$HOME/.config/sx/sxrc" - - sx - ''; + services.xserver = { + enable = true; + displayManager.sx.enable = true; }; - testScript = - { nodes, ... }: - let - user = nodes.machine.users.users.alice; - in - # python - '' - start_all() + # Create sxrc file on login and start sx + programs.bash.loginShellInit = + # bash + '' + mkdir -p "$HOME/.config/sx" + echo 'exec icewm' > "$HOME/.config/sx/sxrc" + chmod +x "$HOME/.config/sx/sxrc" - machine.wait_for_unit("multi-user.target") + sx + ''; + }; - xauthority = "${user.home}/.local/share/sx/xauthority" - machine.wait_for_file(xauthority) - machine.succeed(f"xauth merge {xauthority}") + testScript = + { nodes, ... }: + let + user = nodes.machine.users.users.alice; + in + # python + '' + start_all() - def icewm_is_visible(_last_try: bool) -> bool: - # sx will set DISPLAY as the TTY number we started, in this case - # TTY1: - # https://github.com/Earnestly/sx/blob/master/sx#L41. - # We can't use `machine.wait_for_window` here since we are running - # X as alice and not root. - return "IceWM" in machine.succeed("DISPLAY=:1 xwininfo -root -tree") + machine.wait_for_unit("multi-user.target") - # Adding a retry logic to increase reliability - retry(icewm_is_visible) - ''; - } -) + xauthority = "${user.home}/.local/share/sx/xauthority" + machine.wait_for_file(xauthority) + machine.succeed(f"xauth merge {xauthority}") + + def icewm_is_visible(_last_try: bool) -> bool: + # sx will set DISPLAY as the TTY number we started, in this case + # TTY1: + # https://github.com/Earnestly/sx/blob/master/sx#L41. + # We can't use `machine.wait_for_window` here since we are running + # X as alice and not root. + return "IceWM" in machine.succeed("DISPLAY=:1 xwininfo -root -tree") + + # Adding a retry logic to increase reliability + retry(icewm_is_visible) + ''; +} diff --git a/nixos/tests/sympa.nix b/nixos/tests/sympa.nix index 57284b75643e..de7e469f74d1 100644 --- a/nixos/tests/sympa.nix +++ b/nixos/tests/sympa.nix @@ -1,38 +1,36 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "sympa"; - meta.maintainers = with lib.maintainers; [ ]; +{ pkgs, lib, ... }: +{ + name = "sympa"; + meta.maintainers = with lib.maintainers; [ ]; - nodes.machine = - { ... }: - { + nodes.machine = + { ... }: + { - services.sympa = { - enable = true; - domains = { - "lists.example.org" = { - webHost = "localhost"; - }; - }; - listMasters = [ "bob@example.org" ]; - web.enable = true; - web.https = false; - database = { - type = "PostgreSQL"; - createLocally = true; + services.sympa = { + enable = true; + domains = { + "lists.example.org" = { + webHost = "localhost"; }; }; + listMasters = [ "bob@example.org" ]; + web.enable = true; + web.https = false; + database = { + type = "PostgreSQL"; + createLocally = true; + }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("sympa.service") - machine.wait_for_unit("wwsympa.service") - assert "Mailing lists service" in machine.succeed( - "curl --fail --insecure -L http://localhost/" - ) - ''; - } -) + machine.wait_for_unit("sympa.service") + machine.wait_for_unit("wwsympa.service") + assert "Mailing lists service" in machine.succeed( + "curl --fail --insecure -L http://localhost/" + ) + ''; +} diff --git a/nixos/tests/syncthing-init.nix b/nixos/tests/syncthing-init.nix index a4401805dcb2..8424b052f6fc 100644 --- a/nixos/tests/syncthing-init.nix +++ b/nixos/tests/syncthing-init.nix @@ -1,35 +1,33 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let +{ lib, pkgs, ... }: +let - testId = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU"; + testId = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU"; - in - { - name = "syncthing-init"; - meta.maintainers = with pkgs.lib.maintainers; [ lassulus ]; +in +{ + name = "syncthing-init"; + meta.maintainers = with pkgs.lib.maintainers; [ lassulus ]; - nodes.machine = { - services.syncthing = { - enable = true; - settings.devices.testDevice = { - id = testId; - }; - settings.folders.testFolder = { - path = "/tmp/test"; - devices = [ "testDevice" ]; - }; - settings.gui.user = "guiUser"; + nodes.machine = { + services.syncthing = { + enable = true; + settings.devices.testDevice = { + id = testId; }; + settings.folders.testFolder = { + path = "/tmp/test"; + devices = [ "testDevice" ]; + }; + settings.gui.user = "guiUser"; }; + }; - testScript = '' - machine.wait_for_unit("syncthing-init.service") - config = machine.succeed("cat /var/lib/syncthing/.config/syncthing/config.xml") + testScript = '' + machine.wait_for_unit("syncthing-init.service") + config = machine.succeed("cat /var/lib/syncthing/.config/syncthing/config.xml") - assert "testFolder" in config - assert "${testId}" in config - assert "guiUser" in config - ''; - } -) + assert "testFolder" in config + assert "${testId}" in config + assert "guiUser" in config + ''; +} diff --git a/nixos/tests/syncthing-many-devices.nix b/nixos/tests/syncthing-many-devices.nix index b60569578071..a00026faf299 100644 --- a/nixos/tests/syncthing-many-devices.nix +++ b/nixos/tests/syncthing-many-devices.nix @@ -1,225 +1,223 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - # This nixosTest is supposed to check the following: - # - # - Whether syncthing's API handles multiple requests for many devices, see - # https://github.com/NixOS/nixpkgs/issues/260262 - # - # - Whether syncthing-init.service generated bash script removes devices and - # folders that are not present in the user's configuration, which is partly - # injected into the script. See also: - # https://github.com/NixOS/nixpkgs/issues/259256 - # +# This nixosTest is supposed to check the following: +# +# - Whether syncthing's API handles multiple requests for many devices, see +# https://github.com/NixOS/nixpkgs/issues/260262 +# +# - Whether syncthing-init.service generated bash script removes devices and +# folders that are not present in the user's configuration, which is partly +# injected into the script. See also: +# https://github.com/NixOS/nixpkgs/issues/259256 +# - let - # Just a long path not to copy paste - configPath = "/var/lib/syncthing/.config/syncthing/config.xml"; +let + # Just a long path not to copy paste + configPath = "/var/lib/syncthing/.config/syncthing/config.xml"; - # We will iterate this and more attribute sets defined here, later in the - # testScript. Start with this, and distinguish these settings from other - # settings, as we check these differently with xmllint, due to the ID. - settingsWithId = { - devices = { - # All of the device IDs used here were generated by the following command: - # - # (${pkgs.syncthing}/bin/syncthing generate --home /tmp/foo\ - # | grep ID: | sed 's/.*ID: *//') && rm -rf /tmp/foo - # - # See also discussion at: - # https://forum.syncthing.net/t/how-to-generate-dummy-device-ids/20927/8 - test_device1.id = "IVTZ5XF-EF3GKFT-GS4AZLG-IT6H2ZP-6WK75SF-AFXQXJJ-BNRZ4N6-XPDKVAU"; - test_device2.id = "5C35H56-Z2GFF4F-F3IVD4B-GJYVWIE-SMDBJZN-GI66KWP-52JIQGN-4AVLYAM"; - test_device3.id = "XKLSKHE-BZOHV7B-WQZACEF-GTH36NP-6JSBB6L-RXS3M7C-EEVWO2L-C5B4OAJ"; - test_device4.id = "APN5Q7J-35GZETO-5KCLF35-ZA7KBWK-HGWPBNG-FERF24R-UTLGMEX-4VJ6PQX"; - test_device5.id = "D4YXQEE-5MK6LIK-BRU5QWM-ZRXJCK2-N3RQBJE-23JKTQQ-LYGDPHF-RFPZIQX"; - test_device6.id = "TKMCH64-T44VSLI-6FN2YLF-URBZOBR-ATO4DYX-GEDRIII-CSMRQAI-UAQMDQG"; - test_device7.id = "472EEBG-Q4PZCD4-4CX6PGF-XS3FSQ2-UFXBZVB-PGNXWLX-7FKBLER-NJ3EMAR"; - test_device8.id = "HW6KUMK-WTBG24L-2HZQXLO-TGJSG2M-2JG3FHX-5OGYRUJ-T6L5NN7-L364QAZ"; - test_device9.id = "YAE24AP-7LSVY4T-J74ZSEM-A2IK6RB-FGA35TP-AG4CSLU-ED4UYYY-2J2TDQU"; - test_device10.id = "277XFSB-OFMQOBI-3XGNGUE-Y7FWRV3-QQDADIY-QIIPQ26-EOGTYKW-JP2EXAI"; - test_device11.id = "2WWXVTN-Q3QWAAY-XFORMRM-2FDI5XZ-OGN33BD-XOLL42R-DHLT2ML-QYXDQAU"; - }; - # Generates a few folders with IDs and paths as written... - folders = lib.pipe 6 [ - (builtins.genList (x: { - name = "/var/lib/syncthing/test_folder${builtins.toString x}"; - value = { - id = "DontDeleteMe${builtins.toString x}"; - }; - })) - builtins.listToAttrs - ]; + # We will iterate this and more attribute sets defined here, later in the + # testScript. Start with this, and distinguish these settings from other + # settings, as we check these differently with xmllint, due to the ID. + settingsWithId = { + devices = { + # All of the device IDs used here were generated by the following command: + # + # (${pkgs.syncthing}/bin/syncthing generate --home /tmp/foo\ + # | grep ID: | sed 's/.*ID: *//') && rm -rf /tmp/foo + # + # See also discussion at: + # https://forum.syncthing.net/t/how-to-generate-dummy-device-ids/20927/8 + test_device1.id = "IVTZ5XF-EF3GKFT-GS4AZLG-IT6H2ZP-6WK75SF-AFXQXJJ-BNRZ4N6-XPDKVAU"; + test_device2.id = "5C35H56-Z2GFF4F-F3IVD4B-GJYVWIE-SMDBJZN-GI66KWP-52JIQGN-4AVLYAM"; + test_device3.id = "XKLSKHE-BZOHV7B-WQZACEF-GTH36NP-6JSBB6L-RXS3M7C-EEVWO2L-C5B4OAJ"; + test_device4.id = "APN5Q7J-35GZETO-5KCLF35-ZA7KBWK-HGWPBNG-FERF24R-UTLGMEX-4VJ6PQX"; + test_device5.id = "D4YXQEE-5MK6LIK-BRU5QWM-ZRXJCK2-N3RQBJE-23JKTQQ-LYGDPHF-RFPZIQX"; + test_device6.id = "TKMCH64-T44VSLI-6FN2YLF-URBZOBR-ATO4DYX-GEDRIII-CSMRQAI-UAQMDQG"; + test_device7.id = "472EEBG-Q4PZCD4-4CX6PGF-XS3FSQ2-UFXBZVB-PGNXWLX-7FKBLER-NJ3EMAR"; + test_device8.id = "HW6KUMK-WTBG24L-2HZQXLO-TGJSG2M-2JG3FHX-5OGYRUJ-T6L5NN7-L364QAZ"; + test_device9.id = "YAE24AP-7LSVY4T-J74ZSEM-A2IK6RB-FGA35TP-AG4CSLU-ED4UYYY-2J2TDQU"; + test_device10.id = "277XFSB-OFMQOBI-3XGNGUE-Y7FWRV3-QQDADIY-QIIPQ26-EOGTYKW-JP2EXAI"; + test_device11.id = "2WWXVTN-Q3QWAAY-XFORMRM-2FDI5XZ-OGN33BD-XOLL42R-DHLT2ML-QYXDQAU"; }; - # Non default options that we check later if were applied - settingsWithoutId = { - options = { - autoUpgradeIntervalH = 0; - urAccepted = -1; - }; - gui = { - theme = "dark"; - }; + # Generates a few folders with IDs and paths as written... + folders = lib.pipe 6 [ + (builtins.genList (x: { + name = "/var/lib/syncthing/test_folder${builtins.toString x}"; + value = { + id = "DontDeleteMe${builtins.toString x}"; + }; + })) + builtins.listToAttrs + ]; + }; + # Non default options that we check later if were applied + settingsWithoutId = { + options = { + autoUpgradeIntervalH = 0; + urAccepted = -1; }; - # Used later when checking whether settings were set in config.xml: - checkSettingWithId = - { - t, # t for type - id, - not ? false, - }: - '' - print("Searching for a ${t} with id ${id}") - configVal_${t} = machine.succeed( - "${pkgs.libxml2}/bin/xmllint " - "--xpath 'string(//${t}[@id=\"${id}\"]/@id)' ${configPath}" - ) - print("${t}.id = {}".format(configVal_${t})) - assert "${id}" ${if not then "not" else ""} in configVal_${t} - ''; - # Same as checkSettingWithId, but for 'options' and 'gui' - checkSettingWithoutId = - { - t, # t for type - n, # n for name - v, # v for value - not ? false, - }: - '' - print("checking whether setting ${t}.${n} is set to ${v}") - configVal_${t}_${n} = machine.succeed( - "${pkgs.libxml2}/bin/xmllint " - "--xpath 'string(/configuration/${t}/${n})' ${configPath}" - ) - print("${t}.${n} = {}".format(configVal_${t}_${n})) - assert "${v}" ${if not then "not" else ""} in configVal_${t}_${n} - ''; - # Removes duplication a bit to define this function for the IDs to delete - - # we check whether they were added after our script ran, and before the - # systemd unit's bash script ran, and afterwards - whether the systemd unit - # worked. - checkSettingsToDelete = - { - not, - }: - lib.pipe IDsToDelete [ - (lib.mapAttrsToList ( - t: id: - checkSettingWithId { - inherit t id; - inherit not; - } - )) - lib.concatStrings - ]; - # These IDs are added to syncthing using the API, similarly to how the - # generated systemd unit's bash script does it. Only we add it and expect the - # systemd unit bash script to remove them when executed. - IDsToDelete = { - # Also created using the syncthing generate command above - device = "LZ2CTHT-3W2M7BC-CMKDFZL-DLUQJFS-WJR73PA-NZGODWG-DZBHCHI-OXTQXAK"; - # Intentionally this is a substring of the IDs of the 'test_folder's, as - # explained in: https://github.com/NixOS/nixpkgs/issues/259256 - folder = "DeleteMe"; + gui = { + theme = "dark"; }; - addDeviceToDeleteScript = pkgs.writers.writeBash "syncthing-add-device-to-delete.sh" '' - set -euo pipefail - - export RUNTIME_DIRECTORY=/tmp - - curl() { - # get the api key by parsing the config.xml - while - ! ${pkgs.libxml2}/bin/xmllint \ - --xpath 'string(configuration/gui/apikey)' \ - ${configPath} \ - >"$RUNTIME_DIRECTORY/api_key" - do sleep 1; done - - (printf "X-API-Key: "; cat "$RUNTIME_DIRECTORY/api_key") >"$RUNTIME_DIRECTORY/headers" - - ${pkgs.curl}/bin/curl -sSLk -H "@$RUNTIME_DIRECTORY/headers" \ - --retry 1000 --retry-delay 1 --retry-all-errors \ - "$@" - } - curl -d ${lib.escapeShellArg (builtins.toJSON { deviceID = IDsToDelete.device; })} \ - -X POST 127.0.0.1:8384/rest/config/devices - curl -d ${lib.escapeShellArg (builtins.toJSON { id = IDsToDelete.folder; })} \ - -X POST 127.0.0.1:8384/rest/config/folders + }; + # Used later when checking whether settings were set in config.xml: + checkSettingWithId = + { + t, # t for type + id, + not ? false, + }: + '' + print("Searching for a ${t} with id ${id}") + configVal_${t} = machine.succeed( + "${pkgs.libxml2}/bin/xmllint " + "--xpath 'string(//${t}[@id=\"${id}\"]/@id)' ${configPath}" + ) + print("${t}.id = {}".format(configVal_${t})) + assert "${id}" ${if not then "not" else ""} in configVal_${t} ''; - in - { - name = "syncthing-many-devices"; - meta.maintainers = with lib.maintainers; [ doronbehar ]; + # Same as checkSettingWithId, but for 'options' and 'gui' + checkSettingWithoutId = + { + t, # t for type + n, # n for name + v, # v for value + not ? false, + }: + '' + print("checking whether setting ${t}.${n} is set to ${v}") + configVal_${t}_${n} = machine.succeed( + "${pkgs.libxml2}/bin/xmllint " + "--xpath 'string(/configuration/${t}/${n})' ${configPath}" + ) + print("${t}.${n} = {}".format(configVal_${t}_${n})) + assert "${v}" ${if not then "not" else ""} in configVal_${t}_${n} + ''; + # Removes duplication a bit to define this function for the IDs to delete - + # we check whether they were added after our script ran, and before the + # systemd unit's bash script ran, and afterwards - whether the systemd unit + # worked. + checkSettingsToDelete = + { + not, + }: + lib.pipe IDsToDelete [ + (lib.mapAttrsToList ( + t: id: + checkSettingWithId { + inherit t id; + inherit not; + } + )) + lib.concatStrings + ]; + # These IDs are added to syncthing using the API, similarly to how the + # generated systemd unit's bash script does it. Only we add it and expect the + # systemd unit bash script to remove them when executed. + IDsToDelete = { + # Also created using the syncthing generate command above + device = "LZ2CTHT-3W2M7BC-CMKDFZL-DLUQJFS-WJR73PA-NZGODWG-DZBHCHI-OXTQXAK"; + # Intentionally this is a substring of the IDs of the 'test_folder's, as + # explained in: https://github.com/NixOS/nixpkgs/issues/259256 + folder = "DeleteMe"; + }; + addDeviceToDeleteScript = pkgs.writers.writeBash "syncthing-add-device-to-delete.sh" '' + set -euo pipefail - nodes.machine = { - services.syncthing = { - enable = true; - overrideDevices = true; - overrideFolders = true; - settings = settingsWithoutId // settingsWithId; - }; + export RUNTIME_DIRECTORY=/tmp + + curl() { + # get the api key by parsing the config.xml + while + ! ${pkgs.libxml2}/bin/xmllint \ + --xpath 'string(configuration/gui/apikey)' \ + ${configPath} \ + >"$RUNTIME_DIRECTORY/api_key" + do sleep 1; done + + (printf "X-API-Key: "; cat "$RUNTIME_DIRECTORY/api_key") >"$RUNTIME_DIRECTORY/headers" + + ${pkgs.curl}/bin/curl -sSLk -H "@$RUNTIME_DIRECTORY/headers" \ + --retry 1000 --retry-delay 1 --retry-all-errors \ + "$@" + } + curl -d ${lib.escapeShellArg (builtins.toJSON { deviceID = IDsToDelete.device; })} \ + -X POST 127.0.0.1:8384/rest/config/devices + curl -d ${lib.escapeShellArg (builtins.toJSON { id = IDsToDelete.folder; })} \ + -X POST 127.0.0.1:8384/rest/config/folders + ''; +in +{ + name = "syncthing-many-devices"; + meta.maintainers = with lib.maintainers; [ doronbehar ]; + + nodes.machine = { + services.syncthing = { + enable = true; + overrideDevices = true; + overrideFolders = true; + settings = settingsWithoutId // settingsWithId; }; - testScript = - '' - machine.wait_for_unit("syncthing-init.service") - '' - + (lib.pipe settingsWithId [ - # Check that folders and devices were added properly and that all IDs exist - (lib.mapAttrsRecursive ( - path: id: - checkSettingWithId { - # plural -> solitary - t = (lib.removeSuffix "s" (builtins.elemAt path 0)); - inherit id; - } - )) - # Get all the values we applied the above function upon - (lib.collect builtins.isString) - lib.concatStrings - ]) - + (lib.pipe settingsWithoutId [ - # Check that all other syncthing.settings were added properly with correct - # values - (lib.mapAttrsRecursive ( - path: value: - checkSettingWithoutId { - t = (builtins.elemAt path 0); - n = (builtins.elemAt path 1); - v = (builtins.toString value); - } - )) - # Get all the values we applied the above function upon - (lib.collect builtins.isString) - lib.concatStrings - ]) - + '' - # Run the script on the machine - machine.succeed("${addDeviceToDeleteScript}") - '' - + (checkSettingsToDelete { - not = false; - }) - + '' - # Useful for debugging later - machine.copy_from_vm("${configPath}", "before") + }; + testScript = + '' + machine.wait_for_unit("syncthing-init.service") + '' + + (lib.pipe settingsWithId [ + # Check that folders and devices were added properly and that all IDs exist + (lib.mapAttrsRecursive ( + path: id: + checkSettingWithId { + # plural -> solitary + t = (lib.removeSuffix "s" (builtins.elemAt path 0)); + inherit id; + } + )) + # Get all the values we applied the above function upon + (lib.collect builtins.isString) + lib.concatStrings + ]) + + (lib.pipe settingsWithoutId [ + # Check that all other syncthing.settings were added properly with correct + # values + (lib.mapAttrsRecursive ( + path: value: + checkSettingWithoutId { + t = (builtins.elemAt path 0); + n = (builtins.elemAt path 1); + v = (builtins.toString value); + } + )) + # Get all the values we applied the above function upon + (lib.collect builtins.isString) + lib.concatStrings + ]) + + '' + # Run the script on the machine + machine.succeed("${addDeviceToDeleteScript}") + '' + + (checkSettingsToDelete { + not = false; + }) + + '' + # Useful for debugging later + machine.copy_from_vm("${configPath}", "before") - machine.systemctl("restart syncthing-init.service") - machine.wait_for_unit("syncthing-init.service") - '' - + (checkSettingsToDelete { - not = true; - }) - + '' - # Useful for debugging later - machine.copy_from_vm("${configPath}", "after") + machine.systemctl("restart syncthing-init.service") + machine.wait_for_unit("syncthing-init.service") + '' + + (checkSettingsToDelete { + not = true; + }) + + '' + # Useful for debugging later + machine.copy_from_vm("${configPath}", "after") - # Copy the systemd unit's bash script, to inspect it for debugging. - mergeScript = machine.succeed( - "systemctl cat syncthing-init.service | " - "${pkgs.initool}/bin/initool g - Service ExecStart --value-only" - ).strip() # strip from new lines - machine.copy_from_vm(mergeScript, "") - ''; - } -) + # Copy the systemd unit's bash script, to inspect it for debugging. + mergeScript = machine.succeed( + "systemctl cat syncthing-init.service | " + "${pkgs.initool}/bin/initool g - Service ExecStart --value-only" + ).strip() # strip from new lines + machine.copy_from_vm(mergeScript, "") + ''; +} diff --git a/nixos/tests/syncthing-no-settings.nix b/nixos/tests/syncthing-no-settings.nix index 904f3eb37356..ee79d389e92a 100644 --- a/nixos/tests/syncthing-no-settings.nix +++ b/nixos/tests/syncthing-no-settings.nix @@ -1,26 +1,24 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "syncthing"; - meta.maintainers = with pkgs.lib.maintainers; [ chkno ]; +{ lib, pkgs, ... }: +{ + name = "syncthing"; + meta.maintainers = with pkgs.lib.maintainers; [ chkno ]; - nodes = { - a = { - environment.systemPackages = with pkgs; [ - curl - libxml2 - syncthing - ]; - services.syncthing = { - enable = true; - }; + nodes = { + a = { + environment.systemPackages = with pkgs; [ + curl + libxml2 + syncthing + ]; + services.syncthing = { + enable = true; }; }; - # Test that indeed a syncthing-init.service systemd service is not created. - # - testScript = # python - '' - a.succeed("systemctl list-unit-files | awk '$1 == \"syncthing-init.service\" {exit 1;}'") - ''; - } -) + }; + # Test that indeed a syncthing-init.service systemd service is not created. + # + testScript = # python + '' + a.succeed("systemctl list-unit-files | awk '$1 == \"syncthing-init.service\" {exit 1;}'") + ''; +} diff --git a/nixos/tests/syncthing-relay.nix b/nixos/tests/syncthing-relay.nix index 9b44155415d2..29793363414e 100644 --- a/nixos/tests/syncthing-relay.nix +++ b/nixos/tests/syncthing-relay.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "syncthing-relay"; - meta.maintainers = with pkgs.lib.maintainers; [ ]; +{ lib, pkgs, ... }: +{ + name = "syncthing-relay"; + meta.maintainers = with pkgs.lib.maintainers; [ ]; - nodes.machine = { - environment.systemPackages = [ pkgs.jq ]; - services.syncthing.relay = { - enable = true; - providedBy = "nixos-test"; - pools = [ ]; # Don't connect to any pool while testing. - port = 12345; - statusPort = 12346; - }; + nodes.machine = { + environment.systemPackages = [ pkgs.jq ]; + services.syncthing.relay = { + enable = true; + providedBy = "nixos-test"; + pools = [ ]; # Don't connect to any pool while testing. + port = 12345; + statusPort = 12346; }; + }; - testScript = '' - machine.wait_for_unit("syncthing-relay.service") - machine.wait_for_open_port(12345) - machine.wait_for_open_port(12346) + testScript = '' + machine.wait_for_unit("syncthing-relay.service") + machine.wait_for_open_port(12345) + machine.wait_for_open_port(12346) - out = machine.succeed( - "curl -sSf http://localhost:12346/status | jq -r '.options.\"provided-by\"'" - ) - assert "nixos-test" in out - ''; - } -) + out = machine.succeed( + "curl -sSf http://localhost:12346/status | jq -r '.options.\"provided-by\"'" + ) + assert "nixos-test" in out + ''; +} diff --git a/nixos/tests/syncthing.nix b/nixos/tests/syncthing.nix index f3e2614a0b83..6ba21fb89fc2 100644 --- a/nixos/tests/syncthing.nix +++ b/nixos/tests/syncthing.nix @@ -1,72 +1,70 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "syncthing"; - meta.maintainers = with pkgs.lib.maintainers; [ chkno ]; +{ lib, pkgs, ... }: +{ + name = "syncthing"; + meta.maintainers = with pkgs.lib.maintainers; [ chkno ]; - nodes = rec { - a = { - environment.systemPackages = with pkgs; [ - curl - libxml2 - syncthing - ]; - services.syncthing = { - enable = true; - openDefaultPorts = true; - }; + nodes = rec { + a = { + environment.systemPackages = with pkgs; [ + curl + libxml2 + syncthing + ]; + services.syncthing = { + enable = true; + openDefaultPorts = true; }; - b = a; }; + b = a; + }; - testScript = '' - import json - import shlex + testScript = '' + import json + import shlex - confdir = "/var/lib/syncthing/.config/syncthing" + confdir = "/var/lib/syncthing/.config/syncthing" - def addPeer(host, name, deviceID): - APIKey = host.succeed( - "xmllint --xpath 'string(configuration/gui/apikey)' %s/config.xml" % confdir - ).strip() - oldConf = host.succeed( - "curl -Ssf -H 'X-API-Key: %s' 127.0.0.1:8384/rest/config" % APIKey - ) - conf = json.loads(oldConf) - conf["devices"].append({"deviceID": deviceID, "id": name}) - conf["folders"].append( - { - "devices": [{"deviceID": deviceID}], - "id": "foo", - "path": "/var/lib/syncthing/foo", - "rescanIntervalS": 1, - } - ) - newConf = json.dumps(conf) - host.succeed( - "curl -Ssf -H 'X-API-Key: %s' 127.0.0.1:8384/rest/config -X PUT -d %s" - % (APIKey, shlex.quote(newConf)) - ) + def addPeer(host, name, deviceID): + APIKey = host.succeed( + "xmllint --xpath 'string(configuration/gui/apikey)' %s/config.xml" % confdir + ).strip() + oldConf = host.succeed( + "curl -Ssf -H 'X-API-Key: %s' 127.0.0.1:8384/rest/config" % APIKey + ) + conf = json.loads(oldConf) + conf["devices"].append({"deviceID": deviceID, "id": name}) + conf["folders"].append( + { + "devices": [{"deviceID": deviceID}], + "id": "foo", + "path": "/var/lib/syncthing/foo", + "rescanIntervalS": 1, + } + ) + newConf = json.dumps(conf) + host.succeed( + "curl -Ssf -H 'X-API-Key: %s' 127.0.0.1:8384/rest/config -X PUT -d %s" + % (APIKey, shlex.quote(newConf)) + ) - start_all() - a.wait_for_unit("syncthing.service") - b.wait_for_unit("syncthing.service") - a.wait_for_open_port(22000) - b.wait_for_open_port(22000) + start_all() + a.wait_for_unit("syncthing.service") + b.wait_for_unit("syncthing.service") + a.wait_for_open_port(22000) + b.wait_for_open_port(22000) - aDeviceID = a.succeed("syncthing -home=%s -device-id" % confdir).strip() - bDeviceID = b.succeed("syncthing -home=%s -device-id" % confdir).strip() - addPeer(a, "b", bDeviceID) - addPeer(b, "a", aDeviceID) + aDeviceID = a.succeed("syncthing -home=%s -device-id" % confdir).strip() + bDeviceID = b.succeed("syncthing -home=%s -device-id" % confdir).strip() + addPeer(a, "b", bDeviceID) + addPeer(b, "a", aDeviceID) - a.wait_for_file("/var/lib/syncthing/foo") - b.wait_for_file("/var/lib/syncthing/foo") - a.succeed("echo a2b > /var/lib/syncthing/foo/a2b") - b.succeed("echo b2a > /var/lib/syncthing/foo/b2a") - a.wait_for_file("/var/lib/syncthing/foo/b2a") - b.wait_for_file("/var/lib/syncthing/foo/a2b") - ''; - } -) + a.wait_for_file("/var/lib/syncthing/foo") + b.wait_for_file("/var/lib/syncthing/foo") + a.succeed("echo a2b > /var/lib/syncthing/foo/a2b") + b.succeed("echo b2a > /var/lib/syncthing/foo/b2a") + a.wait_for_file("/var/lib/syncthing/foo/b2a") + b.wait_for_file("/var/lib/syncthing/foo/a2b") + ''; +} diff --git a/nixos/tests/systemd-analyze.nix b/nixos/tests/systemd-analyze.nix index 3b1365c0b4cf..6ec5697f1cb7 100644 --- a/nixos/tests/systemd-analyze.nix +++ b/nixos/tests/systemd-analyze.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { - pkgs, - latestKernel ? false, - ... - }: +{ + pkgs, + latestKernel ? false, + ... +}: - { - name = "systemd-analyze"; - meta = with pkgs.lib.maintainers; { - maintainers = [ raskin ]; +{ + name = "systemd-analyze"; + meta = with pkgs.lib.maintainers; { + maintainers = [ raskin ]; + }; + + nodes.machine = + { pkgs, lib, ... }: + { + boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest; }; - nodes.machine = - { pkgs, lib, ... }: - { - boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest; - }; + testScript = '' + machine.wait_for_unit("multi-user.target") - testScript = '' - machine.wait_for_unit("multi-user.target") - - # We create a special output directory to copy it as a whole - with subtest("Prepare output dir"): - machine.succeed("mkdir systemd-analyze") + # We create a special output directory to copy it as a whole + with subtest("Prepare output dir"): + machine.succeed("mkdir systemd-analyze") - # Save the output into a file with given name inside the common - # output directory - def run_systemd_analyze(args, name): - tgt_dir = "systemd-analyze" - machine.succeed( - "systemd-analyze {} > {}/{} 2> {}/{}.err".format( - " ".join(args), tgt_dir, name, tgt_dir, name - ) - ) + # Save the output into a file with given name inside the common + # output directory + def run_systemd_analyze(args, name): + tgt_dir = "systemd-analyze" + machine.succeed( + "systemd-analyze {} > {}/{} 2> {}/{}.err".format( + " ".join(args), tgt_dir, name, tgt_dir, name + ) + ) - with subtest("Print statistics"): - run_systemd_analyze(["blame"], "blame.txt") - run_systemd_analyze(["critical-chain"], "critical-chain.txt") - run_systemd_analyze(["dot"], "dependencies.dot") - run_systemd_analyze(["plot"], "systemd-analyze.svg") + with subtest("Print statistics"): + run_systemd_analyze(["blame"], "blame.txt") + run_systemd_analyze(["critical-chain"], "critical-chain.txt") + run_systemd_analyze(["dot"], "dependencies.dot") + run_systemd_analyze(["plot"], "systemd-analyze.svg") - # We copy the main graph into the $out (toplevel), and we also copy - # the entire output directory with additional data - with subtest("Copying the resulting data into $out"): - machine.copy_from_vm("systemd-analyze/", "") - machine.copy_from_vm("systemd-analyze/systemd-analyze.svg", "") - ''; - } -) + # We copy the main graph into the $out (toplevel), and we also copy + # the entire output directory with additional data + with subtest("Copying the resulting data into $out"): + machine.copy_from_vm("systemd-analyze/", "") + machine.copy_from_vm("systemd-analyze/systemd-analyze.svg", "") + ''; +} diff --git a/nixos/tests/systemd-bpf.nix b/nixos/tests/systemd-bpf.nix index bc51d08497de..6af553130385 100644 --- a/nixos/tests/systemd-bpf.nix +++ b/nixos/tests/systemd-bpf.nix @@ -1,53 +1,51 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "systemd-bpf"; - meta = with lib.maintainers; { - maintainers = [ veehaitch ]; - }; - nodes = { - node1 = { - virtualisation.vlans = [ 1 ]; - networking = { - useNetworkd = true; - useDHCP = false; - firewall.enable = false; - interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.1.1"; - prefixLength = 24; - } - ]; - }; - }; - - node2 = { - virtualisation.vlans = [ 1 ]; - networking = { - useNetworkd = true; - useDHCP = false; - firewall.enable = false; - interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.1.2"; - prefixLength = 24; - } - ]; - }; +{ lib, ... }: +{ + name = "systemd-bpf"; + meta = with lib.maintainers; { + maintainers = [ veehaitch ]; + }; + nodes = { + node1 = { + virtualisation.vlans = [ 1 ]; + networking = { + useNetworkd = true; + useDHCP = false; + firewall.enable = false; + interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.1.1"; + prefixLength = 24; + } + ]; }; }; - testScript = '' - start_all() - node1.systemctl("start systemd-networkd-wait-online.service") - node1.wait_for_unit("systemd-networkd-wait-online.service") - node2.systemctl("start systemd-networkd-wait-online.service") - node2.wait_for_unit("systemd-networkd-wait-online.service") + node2 = { + virtualisation.vlans = [ 1 ]; + networking = { + useNetworkd = true; + useDHCP = false; + firewall.enable = false; + interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.1.2"; + prefixLength = 24; + } + ]; + }; + }; + }; - with subtest("test RestrictNetworkInterfaces= works"): - node1.succeed("ping -c 5 192.168.1.2") - node1.succeed("systemd-run -t -p RestrictNetworkInterfaces='eth1' ping -c 5 192.168.1.2") - node1.fail("systemd-run -t -p RestrictNetworkInterfaces='lo' ping -c 5 192.168.1.2") - ''; - } -) + testScript = '' + start_all() + node1.systemctl("start systemd-networkd-wait-online.service") + node1.wait_for_unit("systemd-networkd-wait-online.service") + node2.systemctl("start systemd-networkd-wait-online.service") + node2.wait_for_unit("systemd-networkd-wait-online.service") + + with subtest("test RestrictNetworkInterfaces= works"): + node1.succeed("ping -c 5 192.168.1.2") + node1.succeed("systemd-run -t -p RestrictNetworkInterfaces='eth1' ping -c 5 192.168.1.2") + node1.fail("systemd-run -t -p RestrictNetworkInterfaces='lo' ping -c 5 192.168.1.2") + ''; +} diff --git a/nixos/tests/systemd-coredump.nix b/nixos/tests/systemd-coredump.nix index 06888682e138..54f00dbbbc02 100644 --- a/nixos/tests/systemd-coredump.nix +++ b/nixos/tests/systemd-coredump.nix @@ -1,48 +1,46 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let +let - crasher = pkgs.writeCBin "crasher" "int main;"; + crasher = pkgs.writeCBin "crasher" "int main;"; - commonConfig = { - systemd.services.crasher.serviceConfig = { - ExecStart = "${crasher}/bin/crasher"; - StateDirectory = "crasher"; - WorkingDirectory = "%S/crasher"; - Restart = "no"; + commonConfig = { + systemd.services.crasher.serviceConfig = { + ExecStart = "${crasher}/bin/crasher"; + StateDirectory = "crasher"; + WorkingDirectory = "%S/crasher"; + Restart = "no"; + }; + }; + +in + +{ + name = "systemd-coredump"; + meta = with pkgs.lib.maintainers; { + maintainers = [ squalus ]; + }; + + nodes.machine1 = { pkgs, lib, ... }: commonConfig; + nodes.machine2 = + { pkgs, lib, ... }: + lib.recursiveUpdate commonConfig { + systemd.coredump.enable = false; + systemd.package = pkgs.systemd.override { + withCoredump = false; }; }; - in + testScript = '' + with subtest("systemd-coredump enabled"): + machine1.wait_for_unit("multi-user.target") + machine1.wait_for_unit("systemd-coredump.socket") + machine1.systemctl("start crasher"); + machine1.wait_until_succeeds("coredumpctl list | grep crasher", timeout=10) + machine1.fail("stat /var/lib/crasher/core") - { - name = "systemd-coredump"; - meta = with pkgs.lib.maintainers; { - maintainers = [ squalus ]; - }; - - nodes.machine1 = { pkgs, lib, ... }: commonConfig; - nodes.machine2 = - { pkgs, lib, ... }: - lib.recursiveUpdate commonConfig { - systemd.coredump.enable = false; - systemd.package = pkgs.systemd.override { - withCoredump = false; - }; - }; - - testScript = '' - with subtest("systemd-coredump enabled"): - machine1.wait_for_unit("multi-user.target") - machine1.wait_for_unit("systemd-coredump.socket") - machine1.systemctl("start crasher"); - machine1.wait_until_succeeds("coredumpctl list | grep crasher", timeout=10) - machine1.fail("stat /var/lib/crasher/core") - - with subtest("systemd-coredump disabled"): - machine2.systemctl("start crasher"); - machine2.wait_until_succeeds("stat /var/lib/crasher/core", timeout=10) - ''; - } -) + with subtest("systemd-coredump disabled"): + machine2.systemctl("start crasher"); + machine2.wait_until_succeeds("stat /var/lib/crasher/core", timeout=10) + ''; +} diff --git a/nixos/tests/systemd-credentials-tpm2.nix b/nixos/tests/systemd-credentials-tpm2.nix index 085d05d4b926..b01e3a0850d9 100644 --- a/nixos/tests/systemd-credentials-tpm2.nix +++ b/nixos/tests/systemd-credentials-tpm2.nix @@ -1,73 +1,71 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-credentials-tpm2"; +{ lib, pkgs, ... }: +{ + name = "systemd-credentials-tpm2"; - meta = { - maintainers = with pkgs.lib.maintainers; [ tmarkus ]; + meta = { + maintainers = with pkgs.lib.maintainers; [ tmarkus ]; + }; + + nodes.machine = + { pkgs, ... }: + { + virtualisation.tpm.enable = true; + environment.systemPackages = with pkgs; [ diffutils ]; }; - nodes.machine = - { pkgs, ... }: - { - virtualisation.tpm.enable = true; - environment.systemPackages = with pkgs; [ diffutils ]; - }; + testScript = '' + CRED_NAME = "testkey" + CRED_RAW_FILE = f"/root/{CRED_NAME}" + CRED_FILE = f"/root/{CRED_NAME}.cred" - testScript = '' - CRED_NAME = "testkey" - CRED_RAW_FILE = f"/root/{CRED_NAME}" - CRED_FILE = f"/root/{CRED_NAME}.cred" + def systemd_run(machine, cmd): + machine.log(f"Executing command (via systemd-run): \"{cmd}\"") - def systemd_run(machine, cmd): - machine.log(f"Executing command (via systemd-run): \"{cmd}\"") + (status, out) = machine.execute( " ".join([ + "systemd-run", + "--service-type=exec", + "--quiet", + "--wait", + "-E PATH=\"$PATH\"", + "-p StandardOutput=journal", + "-p StandardError=journal", + f"-p LoadCredentialEncrypted={CRED_NAME}:{CRED_FILE}", + f"$SHELL -c '{cmd}'" + ]) ) - (status, out) = machine.execute( " ".join([ - "systemd-run", - "--service-type=exec", - "--quiet", - "--wait", - "-E PATH=\"$PATH\"", - "-p StandardOutput=journal", - "-p StandardError=journal", - f"-p LoadCredentialEncrypted={CRED_NAME}:{CRED_FILE}", - f"$SHELL -c '{cmd}'" - ]) ) + if status != 0: + raise Exception(f"systemd_run failed (status {status})") - if status != 0: - raise Exception(f"systemd_run failed (status {status})") + machine.log("systemd-run finished successfully") - machine.log("systemd-run finished successfully") + machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("multi-user.target") + with subtest("Check whether TPM device exists"): + machine.succeed("test -e /dev/tpm0") + machine.succeed("test -e /dev/tpmrm0") - with subtest("Check whether TPM device exists"): - machine.succeed("test -e /dev/tpm0") - machine.succeed("test -e /dev/tpmrm0") + with subtest("Check whether systemd-creds detects TPM2 correctly"): + cmd = "systemd-creds has-tpm2" + machine.log(f"Running \"{cmd}\"") + (status, _) = machine.execute(cmd) - with subtest("Check whether systemd-creds detects TPM2 correctly"): - cmd = "systemd-creds has-tpm2" - machine.log(f"Running \"{cmd}\"") - (status, _) = machine.execute(cmd) + # Check exit code equals 0 or 1 (1 means firmware support is missing, which is OK here) + if status != 0 and status != 1: + raise Exception("systemd-creds failed to detect TPM2") - # Check exit code equals 0 or 1 (1 means firmware support is missing, which is OK here) - if status != 0 and status != 1: - raise Exception("systemd-creds failed to detect TPM2") + with subtest("Encrypt credential using systemd-creds"): + machine.succeed(f"dd if=/dev/urandom of={CRED_RAW_FILE} bs=1k count=16") + machine.succeed(f"systemd-creds --with-key=host+tpm2 encrypt --name=testkey {CRED_RAW_FILE} {CRED_FILE}") - with subtest("Encrypt credential using systemd-creds"): - machine.succeed(f"dd if=/dev/urandom of={CRED_RAW_FILE} bs=1k count=16") - machine.succeed(f"systemd-creds --with-key=host+tpm2 encrypt --name=testkey {CRED_RAW_FILE} {CRED_FILE}") + with subtest("Write provided credential and check for equality"): + CRED_OUT_FILE = f"/root/{CRED_NAME}.out" + systemd_run(machine, f"systemd-creds cat testkey > {CRED_OUT_FILE}") + machine.succeed(f"cmp --silent -- {CRED_RAW_FILE} {CRED_OUT_FILE}") - with subtest("Write provided credential and check for equality"): - CRED_OUT_FILE = f"/root/{CRED_NAME}.out" - systemd_run(machine, f"systemd-creds cat testkey > {CRED_OUT_FILE}") - machine.succeed(f"cmp --silent -- {CRED_RAW_FILE} {CRED_OUT_FILE}") + with subtest("Check whether systemd service can see credential in systemd-creds list"): + systemd_run(machine, f"systemd-creds list | grep {CRED_NAME}") - with subtest("Check whether systemd service can see credential in systemd-creds list"): - systemd_run(machine, f"systemd-creds list | grep {CRED_NAME}") - - with subtest("Check whether systemd service can access credential in $CREDENTIALS_DIRECTORY"): - systemd_run(machine, f"cmp --silent -- $CREDENTIALS_DIRECTORY/{CRED_NAME} {CRED_RAW_FILE}") - ''; - } -) + with subtest("Check whether systemd service can access credential in $CREDENTIALS_DIRECTORY"): + systemd_run(machine, f"cmp --silent -- $CREDENTIALS_DIRECTORY/{CRED_NAME} {CRED_RAW_FILE}") + ''; +} diff --git a/nixos/tests/systemd-cryptenroll.nix b/nixos/tests/systemd-cryptenroll.nix index c8773888c086..d42a7db68432 100644 --- a/nixos/tests/systemd-cryptenroll.nix +++ b/nixos/tests/systemd-cryptenroll.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "systemd-cryptenroll"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ymatsiuk ]; +{ pkgs, ... }: +{ + name = "systemd-cryptenroll"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ymatsiuk ]; + }; + + nodes.machine = + { pkgs, lib, ... }: + { + environment.systemPackages = [ pkgs.cryptsetup ]; + virtualisation = { + emptyDiskImages = [ 512 ]; + tpm.enable = true; + }; }; - nodes.machine = - { pkgs, lib, ... }: - { - environment.systemPackages = [ pkgs.cryptsetup ]; - virtualisation = { - emptyDiskImages = [ 512 ]; - tpm.enable = true; - }; - }; + testScript = '' + machine.start() - testScript = '' - machine.start() + # Verify the TPM device is available and accessible by systemd-cryptenroll + machine.succeed("test -e /dev/tpm0") + machine.succeed("test -e /dev/tpmrm0") + machine.succeed("systemd-cryptenroll --tpm2-device=list") - # Verify the TPM device is available and accessible by systemd-cryptenroll - machine.succeed("test -e /dev/tpm0") - machine.succeed("test -e /dev/tpmrm0") - machine.succeed("systemd-cryptenroll --tpm2-device=list") + # Create LUKS partition + machine.succeed("echo -n lukspass | cryptsetup luksFormat -q /dev/vdb -") + # Enroll new LUKS key and bind it to Secure Boot state + # For more details on PASSWORD variable, check the following issue: + # https://github.com/systemd/systemd/issues/20955 + machine.succeed("PASSWORD=lukspass systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=7 /dev/vdb") + # Add LUKS partition to /etc/crypttab to test auto unlock + machine.succeed("echo 'luks /dev/vdb - tpm2-device=auto' >> /etc/crypttab") - # Create LUKS partition - machine.succeed("echo -n lukspass | cryptsetup luksFormat -q /dev/vdb -") - # Enroll new LUKS key and bind it to Secure Boot state - # For more details on PASSWORD variable, check the following issue: - # https://github.com/systemd/systemd/issues/20955 - machine.succeed("PASSWORD=lukspass systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=7 /dev/vdb") - # Add LUKS partition to /etc/crypttab to test auto unlock - machine.succeed("echo 'luks /dev/vdb - tpm2-device=auto' >> /etc/crypttab") + machine.shutdown() + machine.start() - machine.shutdown() - machine.start() - - # Test LUKS partition automatic unlock on boot - machine.wait_for_unit("systemd-cryptsetup@luks.service") - # Wipe TPM2 slot - machine.succeed("systemd-cryptenroll --wipe-slot=tpm2 /dev/vdb") - ''; - } -) + # Test LUKS partition automatic unlock on boot + machine.wait_for_unit("systemd-cryptsetup@luks.service") + # Wipe TPM2 slot + machine.succeed("systemd-cryptenroll --wipe-slot=tpm2 /dev/vdb") + ''; +} diff --git a/nixos/tests/systemd-escaping.nix b/nixos/tests/systemd-escaping.nix index 1eeb7dbe6090..7f80e2988bd1 100644 --- a/nixos/tests/systemd-escaping.nix +++ b/nixos/tests/systemd-escaping.nix @@ -1,63 +1,61 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - echoAll = pkgs.writeScript "echo-all" '' - #! ${pkgs.runtimeShell} - for s in "$@"; do - printf '%s\n' "$s" - done - ''; - # deliberately using a local empty file instead of pkgs.emptyFile to have - # a non-store path in the test - args = [ - "a%Nything" - "lang=\${LANG}" - ";" - "/bin/sh -c date" - ./empty-file - 4.2 - 23 - ]; - in - { - name = "systemd-escaping"; +let + echoAll = pkgs.writeScript "echo-all" '' + #! ${pkgs.runtimeShell} + for s in "$@"; do + printf '%s\n' "$s" + done + ''; + # deliberately using a local empty file instead of pkgs.emptyFile to have + # a non-store path in the test + args = [ + "a%Nything" + "lang=\${LANG}" + ";" + "/bin/sh -c date" + ./empty-file + 4.2 + 23 + ]; +in +{ + name = "systemd-escaping"; - nodes.machine = - { - pkgs, - lib, - utils, - ... - }: - { - systemd.services.echo = - assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ [ ] ])).success; - assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ { } ])).success; - assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ null ])).success; - assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ false ])).success; - assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ (_: _) ])).success; - { - description = "Echo to the journal"; - serviceConfig.Type = "oneshot"; - serviceConfig.ExecStart = '' - ${echoAll} ${utils.escapeSystemdExecArgs args} - ''; - }; - }; + nodes.machine = + { + pkgs, + lib, + utils, + ... + }: + { + systemd.services.echo = + assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ [ ] ])).success; + assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ { } ])).success; + assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ null ])).success; + assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ false ])).success; + assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ (_: _) ])).success; + { + description = "Echo to the journal"; + serviceConfig.Type = "oneshot"; + serviceConfig.ExecStart = '' + ${echoAll} ${utils.escapeSystemdExecArgs args} + ''; + }; + }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed("systemctl start echo.service") - # skip the first 'Starting ...' line - logs = machine.succeed("journalctl -u echo.service -o cat").splitlines()[1:] - assert "a%Nything" == logs[0] - assert "lang=''${LANG}" == logs[1] - assert ";" == logs[2] - assert "/bin/sh -c date" == logs[3] - assert "/nix/store/ij3gw72f4n5z4dz6nnzl1731p9kmjbwr-empty-file" == logs[4] - assert "4.2" in logs[5] # toString produces extra fractional digits! - assert "23" == logs[6] - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed("systemctl start echo.service") + # skip the first 'Starting ...' line + logs = machine.succeed("journalctl -u echo.service -o cat").splitlines()[1:] + assert "a%Nything" == logs[0] + assert "lang=''${LANG}" == logs[1] + assert ";" == logs[2] + assert "/bin/sh -c date" == logs[3] + assert "/nix/store/ij3gw72f4n5z4dz6nnzl1731p9kmjbwr-empty-file" == logs[4] + assert "4.2" in logs[5] # toString produces extra fractional digits! + assert "23" == logs[6] + ''; +} diff --git a/nixos/tests/systemd-homed.nix b/nixos/tests/systemd-homed.nix index b31f66cdd1c8..766fd4f2cc19 100644 --- a/nixos/tests/systemd-homed.nix +++ b/nixos/tests/systemd-homed.nix @@ -1,103 +1,101 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - password = "foobarfoo"; - newPass = "barfoobar"; - in - { - name = "systemd-homed"; - nodes.machine = - { config, pkgs, ... }: - { - services.homed.enable = true; +{ pkgs, lib, ... }: +let + password = "foobarfoo"; + newPass = "barfoobar"; +in +{ + name = "systemd-homed"; + nodes.machine = + { config, pkgs, ... }: + { + services.homed.enable = true; - users.users.test-normal-user = { - extraGroups = [ "wheel" ]; - isNormalUser = true; - initialPassword = password; - }; + users.users.test-normal-user = { + extraGroups = [ "wheel" ]; + isNormalUser = true; + initialPassword = password; }; - testScript = '' - def switchTTY(number): - machine.send_key(f"alt-f{number}") - machine.wait_until_succeeds(f"[ $(fgconsole) = {number} ]") - machine.wait_for_unit(f"getty@tty{number}.service") - machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{number}'") + }; + testScript = '' + def switchTTY(number): + machine.send_key(f"alt-f{number}") + machine.wait_until_succeeds(f"[ $(fgconsole) = {number} ]") + machine.wait_for_unit(f"getty@tty{number}.service") + machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{number}'") - machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("multi-user.target") - # Smoke test to make sure the pam changes didn't break regular users. - machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - with subtest("login as regular user"): - switchTTY(2) - machine.wait_until_tty_matches("2", "login: ") - machine.send_chars("test-normal-user\n") - machine.wait_until_tty_matches("2", "login: test-normal-user") - machine.wait_until_tty_matches("2", "Password: ") - machine.send_chars("${password}\n") - machine.wait_until_succeeds("pgrep -u test-normal-user bash") - machine.send_chars("whoami > /tmp/1\n") - machine.wait_for_file("/tmp/1") - assert "test-normal-user" in machine.succeed("cat /tmp/1") + # Smoke test to make sure the pam changes didn't break regular users. + machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + with subtest("login as regular user"): + switchTTY(2) + machine.wait_until_tty_matches("2", "login: ") + machine.send_chars("test-normal-user\n") + machine.wait_until_tty_matches("2", "login: test-normal-user") + machine.wait_until_tty_matches("2", "Password: ") + machine.send_chars("${password}\n") + machine.wait_until_succeeds("pgrep -u test-normal-user bash") + machine.send_chars("whoami > /tmp/1\n") + machine.wait_for_file("/tmp/1") + assert "test-normal-user" in machine.succeed("cat /tmp/1") - with subtest("create homed encrypted user"): - # TODO: Figure out how to pass password manually. - # - # This environment variable is used for homed internal testing - # and is not documented. - machine.succeed("NEWPASSWORD=${password} homectl create --shell=/run/current-system/sw/bin/bash --storage=luks -G wheel test-homed-user") + with subtest("create homed encrypted user"): + # TODO: Figure out how to pass password manually. + # + # This environment variable is used for homed internal testing + # and is not documented. + machine.succeed("NEWPASSWORD=${password} homectl create --shell=/run/current-system/sw/bin/bash --storage=luks -G wheel test-homed-user") - with subtest("login as homed user"): - switchTTY(3) - machine.wait_until_tty_matches("3", "login: ") + with subtest("login as homed user"): + switchTTY(3) + machine.wait_until_tty_matches("3", "login: ") + machine.send_chars("test-homed-user\n") + machine.wait_until_tty_matches("3", "login: test-homed-user") + machine.wait_until_tty_matches("3", "Password: ") + machine.send_chars("${password}\n") + machine.wait_until_succeeds("pgrep -t tty3 -u test-homed-user bash") + machine.send_chars("whoami > /tmp/2\n") + machine.wait_for_file("/tmp/2") + assert "test-homed-user" in machine.succeed("cat /tmp/2") + + with subtest("change homed user password"): + switchTTY(4) + machine.wait_until_tty_matches("4", "login: ") + machine.send_chars("test-homed-user\n") + machine.wait_until_tty_matches("4", "login: test-homed-user") + machine.wait_until_tty_matches("4", "Password: ") + machine.send_chars("${password}\n") + machine.wait_until_succeeds("pgrep -t tty4 -u test-homed-user bash") + machine.send_chars("passwd\n") + # homed does it in a weird order, it asks for new passes, then it asks + # for the old one. + machine.sleep(2) + machine.send_chars("${newPass}\n") + machine.sleep(2) + machine.send_chars("${newPass}\n") + machine.sleep(4) + machine.send_chars("${password}\n") + machine.wait_until_fails("pgrep -t tty4 passwd") + + @polling_condition + def not_logged_in_tty5(): + machine.fail("pgrep -t tty5 bash") + + switchTTY(5) + with not_logged_in_tty5: # type: ignore[union-attr] + machine.wait_until_tty_matches("5", "login: ") machine.send_chars("test-homed-user\n") - machine.wait_until_tty_matches("3", "login: test-homed-user") - machine.wait_until_tty_matches("3", "Password: ") + machine.wait_until_tty_matches("5", "login: test-homed-user") + machine.wait_until_tty_matches("5", "Password: ") machine.send_chars("${password}\n") - machine.wait_until_succeeds("pgrep -t tty3 -u test-homed-user bash") - machine.send_chars("whoami > /tmp/2\n") - machine.wait_for_file("/tmp/2") - assert "test-homed-user" in machine.succeed("cat /tmp/2") + machine.wait_until_tty_matches("5", "Password incorrect or not sufficient for authentication of user test-homed-user.") + machine.wait_until_tty_matches("5", "Sorry, try again: ") + machine.send_chars("${newPass}\n") + machine.send_chars("whoami > /tmp/4\n") + machine.wait_for_file("/tmp/4") + assert "test-homed-user" in machine.succeed("cat /tmp/4") - with subtest("change homed user password"): - switchTTY(4) - machine.wait_until_tty_matches("4", "login: ") - machine.send_chars("test-homed-user\n") - machine.wait_until_tty_matches("4", "login: test-homed-user") - machine.wait_until_tty_matches("4", "Password: ") - machine.send_chars("${password}\n") - machine.wait_until_succeeds("pgrep -t tty4 -u test-homed-user bash") - machine.send_chars("passwd\n") - # homed does it in a weird order, it asks for new passes, then it asks - # for the old one. - machine.sleep(2) - machine.send_chars("${newPass}\n") - machine.sleep(2) - machine.send_chars("${newPass}\n") - machine.sleep(4) - machine.send_chars("${password}\n") - machine.wait_until_fails("pgrep -t tty4 passwd") - - @polling_condition - def not_logged_in_tty5(): - machine.fail("pgrep -t tty5 bash") - - switchTTY(5) - with not_logged_in_tty5: # type: ignore[union-attr] - machine.wait_until_tty_matches("5", "login: ") - machine.send_chars("test-homed-user\n") - machine.wait_until_tty_matches("5", "login: test-homed-user") - machine.wait_until_tty_matches("5", "Password: ") - machine.send_chars("${password}\n") - machine.wait_until_tty_matches("5", "Password incorrect or not sufficient for authentication of user test-homed-user.") - machine.wait_until_tty_matches("5", "Sorry, try again: ") - machine.send_chars("${newPass}\n") - machine.send_chars("whoami > /tmp/4\n") - machine.wait_for_file("/tmp/4") - assert "test-homed-user" in machine.succeed("cat /tmp/4") - - with subtest("homed user should be in wheel according to NSS"): - machine.succeed("userdbctl group wheel -s io.systemd.NameServiceSwitch | grep test-homed-user") - ''; - } -) + with subtest("homed user should be in wheel according to NSS"): + machine.succeed("userdbctl group wheel -s io.systemd.NameServiceSwitch | grep test-homed-user") + ''; +} diff --git a/nixos/tests/systemd-initrd-bridge.nix b/nixos/tests/systemd-initrd-bridge.nix index 7ca3a6a93a00..f0c7d5f98f7a 100644 --- a/nixos/tests/systemd-initrd-bridge.nix +++ b/nixos/tests/systemd-initrd-bridge.nix @@ -1,80 +1,78 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "systemd-initrd-bridge"; - meta.maintainers = [ lib.maintainers.majiir ]; +{ lib, ... }: +{ + name = "systemd-initrd-bridge"; + meta.maintainers = [ lib.maintainers.majiir ]; - # Tests bridge interface configuration in systemd-initrd. - # - # The 'a' and 'b' nodes are connected to a 'bridge' node through different - # links. The 'bridge' node configures a bridge across them. It waits forever - # in initrd (stage 1) with networking enabled. 'a' and 'b' ping 'bridge' to - # test connectivity with the bridge interface. Then, 'a' pings 'b' to test - # the bridge itself. + # Tests bridge interface configuration in systemd-initrd. + # + # The 'a' and 'b' nodes are connected to a 'bridge' node through different + # links. The 'bridge' node configures a bridge across them. It waits forever + # in initrd (stage 1) with networking enabled. 'a' and 'b' ping 'bridge' to + # test connectivity with the bridge interface. Then, 'a' pings 'b' to test + # the bridge itself. - nodes = { - bridge = - { config, lib, ... }: - { - boot.initrd.systemd.enable = true; - boot.initrd.network.enable = true; - boot.initrd.systemd.services.boot-blocker = { - before = [ "initrd.target" ]; - wantedBy = [ "initrd.target" ]; - script = "sleep infinity"; - serviceConfig.Type = "oneshot"; - }; - - networking.primaryIPAddress = "192.168.1.${toString config.virtualisation.test.nodeNumber}"; - - virtualisation.vlans = [ - 1 - 2 - ]; - networking.bridges.br0.interfaces = [ - "eth1" - "eth2" - ]; - - networking.interfaces = { - eth1.ipv4.addresses = lib.mkForce [ ]; - eth2.ipv4.addresses = lib.mkForce [ ]; - br0.ipv4.addresses = [ - { - address = config.networking.primaryIPAddress; - prefixLength = 24; - } - ]; - }; + nodes = { + bridge = + { config, lib, ... }: + { + boot.initrd.systemd.enable = true; + boot.initrd.network.enable = true; + boot.initrd.systemd.services.boot-blocker = { + before = [ "initrd.target" ]; + wantedBy = [ "initrd.target" ]; + script = "sleep infinity"; + serviceConfig.Type = "oneshot"; }; - a = { - virtualisation.vlans = [ 1 ]; - }; + networking.primaryIPAddress = "192.168.1.${toString config.virtualisation.test.nodeNumber}"; - b = - { config, ... }: - { - virtualisation.vlans = [ 2 ]; - networking.primaryIPAddress = lib.mkForce "192.168.1.${toString config.virtualisation.test.nodeNumber}"; - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + virtualisation.vlans = [ + 1 + 2 + ]; + networking.bridges.br0.interfaces = [ + "eth1" + "eth2" + ]; + + networking.interfaces = { + eth1.ipv4.addresses = lib.mkForce [ ]; + eth2.ipv4.addresses = lib.mkForce [ ]; + br0.ipv4.addresses = [ { address = config.networking.primaryIPAddress; prefixLength = 24; } ]; }; + }; + + a = { + virtualisation.vlans = [ 1 ]; }; - testScript = '' - start_all() - a.wait_for_unit("network.target") - b.wait_for_unit("network.target") + b = + { config, ... }: + { + virtualisation.vlans = [ 2 ]; + networking.primaryIPAddress = lib.mkForce "192.168.1.${toString config.virtualisation.test.nodeNumber}"; + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = config.networking.primaryIPAddress; + prefixLength = 24; + } + ]; + }; + }; - a.succeed("ping -n -w 10 -c 1 bridge >&2") - b.succeed("ping -n -w 10 -c 1 bridge >&2") + testScript = '' + start_all() + a.wait_for_unit("network.target") + b.wait_for_unit("network.target") - a.succeed("ping -n -w 10 -c 1 b >&2") - ''; - } -) + a.succeed("ping -n -w 10 -c 1 bridge >&2") + b.succeed("ping -n -w 10 -c 1 bridge >&2") + + a.succeed("ping -n -w 10 -c 1 b >&2") + ''; +} diff --git a/nixos/tests/systemd-initrd-btrfs-raid.nix b/nixos/tests/systemd-initrd-btrfs-raid.nix index 75f9879628fa..1aa21fc326cd 100644 --- a/nixos/tests/systemd-initrd-btrfs-raid.nix +++ b/nixos/tests/systemd-initrd-btrfs-raid.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-btrfs-raid"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-btrfs-raid"; - nodes.machine = - { pkgs, ... }: - { - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ - 512 - 512 - ]; - useBootLoader = true; - # Booting off the BTRFS RAID requires an available init script from the Nix store - mountHostNixStore = true; - useEFIBoot = true; - }; - boot.loader.systemd-boot.enable = true; - boot.loader.efi.canTouchEfiVariables = true; + nodes.machine = + { pkgs, ... }: + { + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ + 512 + 512 + ]; + useBootLoader = true; + # Booting off the BTRFS RAID requires an available init script from the Nix store + mountHostNixStore = true; + useEFIBoot = true; + }; + boot.loader.systemd-boot.enable = true; + boot.loader.efi.canTouchEfiVariables = true; - environment.systemPackages = with pkgs; [ btrfs-progs ]; - boot.initrd.systemd = { - enable = true; - emergencyAccess = true; - }; - - specialisation.boot-btrfs-raid.configuration = { - fileSystems = lib.mkVMOverride { - "/".fsType = lib.mkForce "btrfs"; - }; - virtualisation.rootDevice = "/dev/vdb"; - }; + environment.systemPackages = with pkgs; [ btrfs-progs ]; + boot.initrd.systemd = { + enable = true; + emergencyAccess = true; }; - testScript = '' - # Create RAID - machine.succeed("mkfs.btrfs -d raid0 /dev/vdb /dev/vdc") - machine.succeed("mkdir -p /mnt && mount /dev/vdb /mnt && echo hello > /mnt/test && umount /mnt") + specialisation.boot-btrfs-raid.configuration = { + fileSystems = lib.mkVMOverride { + "/".fsType = lib.mkForce "btrfs"; + }; + virtualisation.rootDevice = "/dev/vdb"; + }; + }; - # Boot from the RAID - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-btrfs-raid.conf") - machine.succeed("sync") - machine.crash() - machine.wait_for_unit("multi-user.target") + testScript = '' + # Create RAID + machine.succeed("mkfs.btrfs -d raid0 /dev/vdb /dev/vdc") + machine.succeed("mkdir -p /mnt && mount /dev/vdb /mnt && echo hello > /mnt/test && umount /mnt") - # Ensure we have successfully booted from the RAID - assert "(initrd)" in machine.succeed("systemd-analyze") # booted with systemd in stage 1 - assert "/dev/vdb on / type btrfs" in machine.succeed("mount") - assert "hello" in machine.succeed("cat /test") - assert "Total devices 2" in machine.succeed("btrfs filesystem show") - ''; - } -) + # Boot from the RAID + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-btrfs-raid.conf") + machine.succeed("sync") + machine.crash() + machine.wait_for_unit("multi-user.target") + + # Ensure we have successfully booted from the RAID + assert "(initrd)" in machine.succeed("systemd-analyze") # booted with systemd in stage 1 + assert "/dev/vdb on / type btrfs" in machine.succeed("mount") + assert "hello" in machine.succeed("cat /test") + assert "Total devices 2" in machine.succeed("btrfs filesystem show") + ''; +} diff --git a/nixos/tests/systemd-initrd-luks-fido2.nix b/nixos/tests/systemd-initrd-luks-fido2.nix index e681525c99f8..e822c811cec5 100644 --- a/nixos/tests/systemd-initrd-luks-fido2.nix +++ b/nixos/tests/systemd-initrd-luks-fido2.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-luks-fido2"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-luks-fido2"; - nodes.machine = - { pkgs, config, ... }: - { - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ 512 ]; - useBootLoader = true; - # Booting off the encrypted disk requires having a Nix store available for the init script - mountHostNixStore = true; - useEFIBoot = true; - qemu.options = [ - "-device pci-ohci,id=usb-bus" - "-device canokey,bus=usb-bus.0,file=/tmp/canokey-file" - ]; - }; - boot.loader.systemd-boot.enable = true; - - boot.initrd.systemd.enable = true; - - environment.systemPackages = with pkgs; [ cryptsetup ]; - - specialisation.boot-luks.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - cryptroot = { - device = "/dev/vdb"; - crypttabExtraOpts = [ "fido2-device=auto" ]; - }; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - virtualisation.fileSystems."/".autoFormat = true; - }; + nodes.machine = + { pkgs, config, ... }: + { + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ 512 ]; + useBootLoader = true; + # Booting off the encrypted disk requires having a Nix store available for the init script + mountHostNixStore = true; + useEFIBoot = true; + qemu.options = [ + "-device pci-ohci,id=usb-bus" + "-device canokey,bus=usb-bus.0,file=/tmp/canokey-file" + ]; }; + boot.loader.systemd-boot.enable = true; - testScript = '' - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") - machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --fido2-device=auto /dev/vdb |& systemd-cat") + boot.initrd.systemd.enable = true; - # Boot from the encrypted disk - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") - machine.succeed("sync") - machine.crash() + environment.systemPackages = with pkgs; [ cryptsetup ]; - # Boot and decrypt the disk - machine.wait_for_unit("multi-user.target") - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") - ''; - } -) + specialisation.boot-luks.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + cryptroot = { + device = "/dev/vdb"; + crypttabExtraOpts = [ "fido2-device=auto" ]; + }; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + virtualisation.fileSystems."/".autoFormat = true; + }; + }; + + testScript = '' + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") + machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --fido2-device=auto /dev/vdb |& systemd-cat") + + # Boot from the encrypted disk + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") + machine.succeed("sync") + machine.crash() + + # Boot and decrypt the disk + machine.wait_for_unit("multi-user.target") + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/systemd-initrd-luks-keyfile.nix b/nixos/tests/systemd-initrd-luks-keyfile.nix index 22794c0dcd85..3723307946dd 100644 --- a/nixos/tests/systemd-initrd-luks-keyfile.nix +++ b/nixos/tests/systemd-initrd-luks-keyfile.nix @@ -1,62 +1,60 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let +{ lib, pkgs, ... }: +let - keyfile = pkgs.writeText "luks-keyfile" '' - MIGHAoGBAJ4rGTSo/ldyjQypd0kuS7k2OSsmQYzMH6TNj3nQ/vIUjDn7fqa3slt2 - gV6EK3TmTbGc4tzC1v4SWx2m+2Bjdtn4Fs4wiBwn1lbRdC6i5ZYCqasTWIntWn+6 - FllUkMD5oqjOR/YcboxG8Z3B5sJuvTP9llsF+gnuveWih9dpbBr7AgEC - ''; + keyfile = pkgs.writeText "luks-keyfile" '' + MIGHAoGBAJ4rGTSo/ldyjQypd0kuS7k2OSsmQYzMH6TNj3nQ/vIUjDn7fqa3slt2 + gV6EK3TmTbGc4tzC1v4SWx2m+2Bjdtn4Fs4wiBwn1lbRdC6i5ZYCqasTWIntWn+6 + FllUkMD5oqjOR/YcboxG8Z3B5sJuvTP9llsF+gnuveWih9dpbBr7AgEC + ''; - in - { - name = "systemd-initrd-luks-keyfile"; +in +{ + name = "systemd-initrd-luks-keyfile"; - nodes.machine = - { pkgs, ... }: - { - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ 512 ]; - useBootLoader = true; - # Necessary to boot off the encrypted disk because it requires a init script coming from the Nix store - mountHostNixStore = true; - useEFIBoot = true; - }; - boot.loader.systemd-boot.enable = true; + nodes.machine = + { pkgs, ... }: + { + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ 512 ]; + useBootLoader = true; + # Necessary to boot off the encrypted disk because it requires a init script coming from the Nix store + mountHostNixStore = true; + useEFIBoot = true; + }; + boot.loader.systemd-boot.enable = true; - environment.systemPackages = with pkgs; [ cryptsetup ]; - boot.initrd.systemd = { - enable = true; - emergencyAccess = true; - }; - - specialisation.boot-luks.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - cryptroot = { - device = "/dev/vdb"; - keyFile = "/etc/cryptroot.key"; - }; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - virtualisation.fileSystems."/".autoFormat = true; - boot.initrd.secrets."/etc/cryptroot.key" = keyfile; - }; + environment.systemPackages = with pkgs; [ cryptsetup ]; + boot.initrd.systemd = { + enable = true; + emergencyAccess = true; }; - testScript = '' - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("cryptsetup luksFormat -q --iter-time=1 -d ${keyfile} /dev/vdb") + specialisation.boot-luks.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + cryptroot = { + device = "/dev/vdb"; + keyFile = "/etc/cryptroot.key"; + }; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + virtualisation.fileSystems."/".autoFormat = true; + boot.initrd.secrets."/etc/cryptroot.key" = keyfile; + }; + }; - # Boot from the encrypted disk - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") - machine.succeed("sync") - machine.crash() + testScript = '' + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("cryptsetup luksFormat -q --iter-time=1 -d ${keyfile} /dev/vdb") - # Boot and decrypt the disk - machine.wait_for_unit("multi-user.target") - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") - ''; - } -) + # Boot from the encrypted disk + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") + machine.succeed("sync") + machine.crash() + + # Boot and decrypt the disk + machine.wait_for_unit("multi-user.target") + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/systemd-initrd-luks-password.nix b/nixos/tests/systemd-initrd-luks-password.nix index 941926f98192..0f7c2f51a034 100644 --- a/nixos/tests/systemd-initrd-luks-password.nix +++ b/nixos/tests/systemd-initrd-luks-password.nix @@ -1,64 +1,62 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-luks-password"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-luks-password"; - nodes.machine = - { pkgs, ... }: - { - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ - 512 - 512 - ]; - useBootLoader = true; - # Booting off the encrypted disk requires an available init script - mountHostNixStore = true; - useEFIBoot = true; - }; - boot.loader.systemd-boot.enable = true; + nodes.machine = + { pkgs, ... }: + { + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ + 512 + 512 + ]; + useBootLoader = true; + # Booting off the encrypted disk requires an available init script + mountHostNixStore = true; + useEFIBoot = true; + }; + boot.loader.systemd-boot.enable = true; - environment.systemPackages = with pkgs; [ cryptsetup ]; - boot.initrd.systemd = { - enable = true; - emergencyAccess = true; - }; - - specialisation.boot-luks.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - # We have two disks and only type one password - key reuse is in place - cryptroot.device = "/dev/vdb"; - cryptroot2.device = "/dev/vdc"; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - virtualisation.fileSystems."/".autoFormat = true; - # test mounting device unlocked in initrd after switching root - virtualisation.fileSystems."/cryptroot2".device = "/dev/mapper/cryptroot2"; - }; + environment.systemPackages = with pkgs; [ cryptsetup ]; + boot.initrd.systemd = { + enable = true; + emergencyAccess = true; }; - testScript = '' - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") - machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -") - machine.succeed("echo -n supersecret | cryptsetup luksOpen -q /dev/vdc cryptroot2") - machine.succeed("mkfs.ext4 /dev/mapper/cryptroot2") + specialisation.boot-luks.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + # We have two disks and only type one password - key reuse is in place + cryptroot.device = "/dev/vdb"; + cryptroot2.device = "/dev/vdc"; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + virtualisation.fileSystems."/".autoFormat = true; + # test mounting device unlocked in initrd after switching root + virtualisation.fileSystems."/cryptroot2".device = "/dev/mapper/cryptroot2"; + }; + }; - # Boot from the encrypted disk - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") - machine.succeed("sync") - machine.crash() + testScript = '' + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") + machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -") + machine.succeed("echo -n supersecret | cryptsetup luksOpen -q /dev/vdc cryptroot2") + machine.succeed("mkfs.ext4 /dev/mapper/cryptroot2") - # Boot and decrypt the disk - machine.start() - machine.wait_for_console_text("Please enter passphrase for disk cryptroot") - machine.send_console("supersecret\n") - machine.wait_for_unit("multi-user.target") + # Boot from the encrypted disk + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") + machine.succeed("sync") + machine.crash() - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount"), "/dev/mapper/cryptroot do not appear in mountpoints list" - assert "/dev/mapper/cryptroot2 on /cryptroot2 type ext4" in machine.succeed("mount") - ''; - } -) + # Boot and decrypt the disk + machine.start() + machine.wait_for_console_text("Please enter passphrase for disk cryptroot") + machine.send_console("supersecret\n") + machine.wait_for_unit("multi-user.target") + + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount"), "/dev/mapper/cryptroot do not appear in mountpoints list" + assert "/dev/mapper/cryptroot2 on /cryptroot2 type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/systemd-initrd-luks-tpm2.nix b/nixos/tests/systemd-initrd-luks-tpm2.nix index a6c52435ee03..20e203b0e86b 100644 --- a/nixos/tests/systemd-initrd-luks-tpm2.nix +++ b/nixos/tests/systemd-initrd-luks-tpm2.nix @@ -1,55 +1,53 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-luks-tpm2"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-luks-tpm2"; - nodes.machine = - { pkgs, ... }: - { - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ 512 ]; - useBootLoader = true; - # Booting off the TPM2-encrypted device requires an available init script - mountHostNixStore = true; - useEFIBoot = true; - tpm.enable = true; - }; - boot.loader.systemd-boot.enable = true; + nodes.machine = + { pkgs, ... }: + { + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ 512 ]; + useBootLoader = true; + # Booting off the TPM2-encrypted device requires an available init script + mountHostNixStore = true; + useEFIBoot = true; + tpm.enable = true; + }; + boot.loader.systemd-boot.enable = true; - boot.initrd.availableKernelModules = [ "tpm_tis" ]; + boot.initrd.availableKernelModules = [ "tpm_tis" ]; - environment.systemPackages = with pkgs; [ cryptsetup ]; - boot.initrd.systemd = { - enable = true; - }; - - specialisation.boot-luks.configuration = { - boot.initrd.luks.devices = lib.mkVMOverride { - cryptroot = { - device = "/dev/vdb"; - crypttabExtraOpts = [ "tpm2-device=auto" ]; - }; - }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - virtualisation.fileSystems."/".autoFormat = true; - }; + environment.systemPackages = with pkgs; [ cryptsetup ]; + boot.initrd.systemd = { + enable = true; }; - testScript = '' - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") - machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --tpm2-pcrs= --tpm2-device=auto /dev/vdb |& systemd-cat") + specialisation.boot-luks.configuration = { + boot.initrd.luks.devices = lib.mkVMOverride { + cryptroot = { + device = "/dev/vdb"; + crypttabExtraOpts = [ "tpm2-device=auto" ]; + }; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + virtualisation.fileSystems."/".autoFormat = true; + }; + }; - # Boot from the encrypted disk - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") - machine.succeed("sync") - machine.crash() + testScript = '' + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") + machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --tpm2-pcrs= --tpm2-device=auto /dev/vdb |& systemd-cat") - # Boot and decrypt the disk - machine.wait_for_unit("multi-user.target") - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") - ''; - } -) + # Boot from the encrypted disk + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") + machine.succeed("sync") + machine.crash() + + # Boot and decrypt the disk + machine.wait_for_unit("multi-user.target") + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/systemd-initrd-luks-unl0kr.nix b/nixos/tests/systemd-initrd-luks-unl0kr.nix index 5a9af4949cc9..875e1beb7187 100644 --- a/nixos/tests/systemd-initrd-luks-unl0kr.nix +++ b/nixos/tests/systemd-initrd-luks-unl0kr.nix @@ -1,109 +1,107 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - passphrase = "secret"; +{ lib, pkgs, ... }: +let + passphrase = "secret"; - debugPackages = with pkgs; [ - coreutils-prefixed - toybox + debugPackages = with pkgs; [ + coreutils-prefixed + toybox - micro - nano - ]; - in - { - name = "systemd-initrd-luks-unl0kr"; - meta = { - maintainers = [ ]; - }; + micro + nano + ]; +in +{ + name = "systemd-initrd-luks-unl0kr"; + meta = { + maintainers = [ ]; + }; - # TODO: Fix OCR: #302965 - # enableOCR = true; + # TODO: Fix OCR: #302965 + # enableOCR = true; - nodes.machine = - { pkgs, ... }: - { - virtualisation = { - emptyDiskImages = [ - 512 - 512 - ]; - useBootLoader = true; - mountHostNixStore = true; - useEFIBoot = true; - qemu.options = [ - "-vga virtio" - ]; - }; - boot.loader.systemd-boot.enable = true; - - boot.kernelParams = [ - "rd.systemd.debug_shell" + nodes.machine = + { pkgs, ... }: + { + virtualisation = { + emptyDiskImages = [ + 512 + 512 ]; + useBootLoader = true; + mountHostNixStore = true; + useEFIBoot = true; + qemu.options = [ + "-vga virtio" + ]; + }; + boot.loader.systemd-boot.enable = true; - environment.systemPackages = - with pkgs; - [ - cryptsetup - ] - ++ debugPackages; - boot.initrd = { - systemd = { - enable = true; - emergencyAccess = true; + boot.kernelParams = [ + "rd.systemd.debug_shell" + ]; - storePaths = debugPackages; - }; - unl0kr = { - enable = true; + environment.systemPackages = + with pkgs; + [ + cryptsetup + ] + ++ debugPackages; + boot.initrd = { + systemd = { + enable = true; + emergencyAccess = true; - settings = { - general.backend = "drm"; - # TODO: Fix OCR. See above. - # theme.default = "adwaita-dark"; # Improves contrast quite a bit, helpful for OCR. - }; - }; + storePaths = debugPackages; }; + unl0kr = { + enable = true; - specialisation.boot-luks.configuration = { - testing.initrdBackdoor = true; - boot.initrd.luks.devices = lib.mkVMOverride { - # We have two disks and only type one password - key reuse is in place - cryptroot.device = "/dev/vdb"; - cryptroot2.device = "/dev/vdc"; + settings = { + general.backend = "drm"; + # TODO: Fix OCR. See above. + # theme.default = "adwaita-dark"; # Improves contrast quite a bit, helpful for OCR. }; - virtualisation.rootDevice = "/dev/mapper/cryptroot"; - virtualisation.fileSystems."/".autoFormat = true; - # test mounting device unlocked in initrd after switching root - virtualisation.fileSystems."/cryptroot2".device = "/dev/mapper/cryptroot2"; }; }; - testScript = '' - # Create encrypted volume - machine.wait_for_unit("multi-user.target") - machine.succeed("echo -n ${passphrase} | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") - machine.succeed("echo -n ${passphrase} | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -") - machine.succeed("echo -n ${passphrase} | cryptsetup luksOpen -q /dev/vdc cryptroot2") - machine.succeed("mkfs.ext4 /dev/mapper/cryptroot2") + specialisation.boot-luks.configuration = { + testing.initrdBackdoor = true; + boot.initrd.luks.devices = lib.mkVMOverride { + # We have two disks and only type one password - key reuse is in place + cryptroot.device = "/dev/vdb"; + cryptroot2.device = "/dev/vdc"; + }; + virtualisation.rootDevice = "/dev/mapper/cryptroot"; + virtualisation.fileSystems."/".autoFormat = true; + # test mounting device unlocked in initrd after switching root + virtualisation.fileSystems."/cryptroot2".device = "/dev/mapper/cryptroot2"; + }; + }; - # Boot from the encrypted disk - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") - machine.succeed("sync") - machine.crash() + testScript = '' + # Create encrypted volume + machine.wait_for_unit("multi-user.target") + machine.succeed("echo -n ${passphrase} | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") + machine.succeed("echo -n ${passphrase} | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -") + machine.succeed("echo -n ${passphrase} | cryptsetup luksOpen -q /dev/vdc cryptroot2") + machine.succeed("mkfs.ext4 /dev/mapper/cryptroot2") - # Boot and decrypt the disk. This part of the test is SLOW. - machine.start() - machine.wait_for_unit("unl0kr-agent.service") - machine.screenshot("prompt") - machine.send_chars("${passphrase}") - machine.screenshot("pw") - machine.send_chars("\n") - machine.switch_root() - machine.wait_for_unit("multi-user.target") + # Boot from the encrypted disk + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf") + machine.succeed("sync") + machine.crash() - assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount"), "/dev/mapper/cryptroot do not appear in mountpoints list" - assert "/dev/mapper/cryptroot2 on /cryptroot2 type ext4" in machine.succeed("mount") - ''; - } -) + # Boot and decrypt the disk. This part of the test is SLOW. + machine.start() + machine.wait_for_unit("unl0kr-agent.service") + machine.screenshot("prompt") + machine.send_chars("${passphrase}") + machine.screenshot("pw") + machine.send_chars("\n") + machine.switch_root() + machine.wait_for_unit("multi-user.target") + + assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount"), "/dev/mapper/cryptroot do not appear in mountpoints list" + assert "/dev/mapper/cryptroot2 on /cryptroot2 type ext4" in machine.succeed("mount") + ''; +} diff --git a/nixos/tests/systemd-initrd-modprobe.nix b/nixos/tests/systemd-initrd-modprobe.nix index e5aee51ade7f..88237f5ab801 100644 --- a/nixos/tests/systemd-initrd-modprobe.nix +++ b/nixos/tests/systemd-initrd-modprobe.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-modprobe"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-modprobe"; - nodes.machine = - { pkgs, ... }: - { - testing.initrdBackdoor = true; - boot.initrd.systemd.enable = true; - boot.initrd.kernelModules = [ "tcp_hybla" ]; # Load module in initrd. - boot.extraModprobeConfig = '' - options tcp_hybla rtt0=42 - ''; - }; + nodes.machine = + { pkgs, ... }: + { + testing.initrdBackdoor = true; + boot.initrd.systemd.enable = true; + boot.initrd.kernelModules = [ "tcp_hybla" ]; # Load module in initrd. + boot.extraModprobeConfig = '' + options tcp_hybla rtt0=42 + ''; + }; - testScript = '' - machine.wait_for_unit("initrd.target") - rtt = machine.succeed("cat /sys/module/tcp_hybla/parameters/rtt0") - assert int(rtt) == 42, "Parameter should be respected for initrd kernel modules" + testScript = '' + machine.wait_for_unit("initrd.target") + rtt = machine.succeed("cat /sys/module/tcp_hybla/parameters/rtt0") + assert int(rtt) == 42, "Parameter should be respected for initrd kernel modules" - # Make sure it sticks in stage 2 - machine.switch_root() - machine.wait_for_unit("multi-user.target") - rtt = machine.succeed("cat /sys/module/tcp_hybla/parameters/rtt0") - assert int(rtt) == 42, "Parameter should be respected for initrd kernel modules" - ''; - } -) + # Make sure it sticks in stage 2 + machine.switch_root() + machine.wait_for_unit("multi-user.target") + rtt = machine.succeed("cat /sys/module/tcp_hybla/parameters/rtt0") + assert int(rtt) == 42, "Parameter should be respected for initrd kernel modules" + ''; +} diff --git a/nixos/tests/systemd-initrd-networkd-ssh.nix b/nixos/tests/systemd-initrd-networkd-ssh.nix index 9805689be4aa..bf59c925875c 100644 --- a/nixos/tests/systemd-initrd-networkd-ssh.nix +++ b/nixos/tests/systemd-initrd-networkd-ssh.nix @@ -1,64 +1,62 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "systemd-initrd-network-ssh"; - meta.maintainers = [ lib.maintainers.elvishjerricco ]; +{ lib, ... }: +{ + name = "systemd-initrd-network-ssh"; + meta.maintainers = [ lib.maintainers.elvishjerricco ]; - nodes = { - server = - { config, pkgs, ... }: - { - testing.initrdBackdoor = true; - boot.initrd.systemd.enable = true; - boot.initrd.systemd.contents."/etc/msg".text = "foo"; - boot.initrd.network = { + nodes = { + server = + { config, pkgs, ... }: + { + testing.initrdBackdoor = true; + boot.initrd.systemd.enable = true; + boot.initrd.systemd.contents."/etc/msg".text = "foo"; + boot.initrd.network = { + enable = true; + ssh = { enable = true; - ssh = { - enable = true; - authorizedKeys = [ (lib.readFile ./initrd-network-ssh/id_ed25519.pub) ]; - port = 22; - hostKeys = [ ./initrd-network-ssh/ssh_host_ed25519_key ]; - }; + authorizedKeys = [ (lib.readFile ./initrd-network-ssh/id_ed25519.pub) ]; + port = 22; + hostKeys = [ ./initrd-network-ssh/ssh_host_ed25519_key ]; }; }; + }; - client = - { config, ... }: - { - environment.etc = { - knownHosts = { - text = lib.concatStrings [ - "server," - "${toString (lib.head (lib.splitString " " (toString (lib.elemAt (lib.splitString "\n" config.networking.extraHosts) 2))))} " - "${lib.readFile ./initrd-network-ssh/ssh_host_ed25519_key.pub}" - ]; - }; - sshKey = { - source = ./initrd-network-ssh/id_ed25519; - mode = "0600"; - }; + client = + { config, ... }: + { + environment.etc = { + knownHosts = { + text = lib.concatStrings [ + "server," + "${toString (lib.head (lib.splitString " " (toString (lib.elemAt (lib.splitString "\n" config.networking.extraHosts) 2))))} " + "${lib.readFile ./initrd-network-ssh/ssh_host_ed25519_key.pub}" + ]; + }; + sshKey = { + source = ./initrd-network-ssh/id_ed25519; + mode = "0600"; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - def ssh_is_up(_) -> bool: - status, _ = client.execute("nc -z server 22") - return status == 0 + def ssh_is_up(_) -> bool: + status, _ = client.execute("nc -z server 22") + return status == 0 - client.wait_for_unit("network.target") - with client.nested("waiting for SSH server to come up"): - retry(ssh_is_up) + client.wait_for_unit("network.target") + with client.nested("waiting for SSH server to come up"): + retry(ssh_is_up) - msg = client.succeed( - "ssh -i /etc/sshKey -o UserKnownHostsFile=/etc/knownHosts server 'cat /etc/msg'" - ) - assert "foo" in msg + msg = client.succeed( + "ssh -i /etc/sshKey -o UserKnownHostsFile=/etc/knownHosts server 'cat /etc/msg'" + ) + assert "foo" in msg - server.switch_root() - server.wait_for_unit("multi-user.target") - ''; - } -) + server.switch_root() + server.wait_for_unit("multi-user.target") + ''; +} diff --git a/nixos/tests/systemd-initrd-swraid.nix b/nixos/tests/systemd-initrd-swraid.nix index 106839ef4b95..933b2ad41056 100644 --- a/nixos/tests/systemd-initrd-swraid.nix +++ b/nixos/tests/systemd-initrd-swraid.nix @@ -1,77 +1,75 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-swraid"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-swraid"; - nodes.machine = - { pkgs, ... }: - { - # Use systemd-boot - virtualisation = { - emptyDiskImages = [ - 512 - 512 - ]; - useBootLoader = true; - # Booting off the RAID requires an available init script - mountHostNixStore = true; - useEFIBoot = true; - }; - boot.loader.systemd-boot.enable = true; - boot.loader.efi.canTouchEfiVariables = true; + nodes.machine = + { pkgs, ... }: + { + # Use systemd-boot + virtualisation = { + emptyDiskImages = [ + 512 + 512 + ]; + useBootLoader = true; + # Booting off the RAID requires an available init script + mountHostNixStore = true; + useEFIBoot = true; + }; + boot.loader.systemd-boot.enable = true; + boot.loader.efi.canTouchEfiVariables = true; - environment.systemPackages = with pkgs; [ - mdadm - e2fsprogs - ]; # for mdadm and mkfs.ext4 - boot.swraid = { - enable = true; - mdadmConf = '' - ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc - ''; - }; - environment.etc."mdadm.conf".text = '' - MAILADDR test@example.com + environment.systemPackages = with pkgs; [ + mdadm + e2fsprogs + ]; # for mdadm and mkfs.ext4 + boot.swraid = { + enable = true; + mdadmConf = '' + ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc ''; - boot.initrd = { - systemd = { - enable = true; - emergencyAccess = true; - }; - kernelModules = [ "raid0" ]; + }; + environment.etc."mdadm.conf".text = '' + MAILADDR test@example.com + ''; + boot.initrd = { + systemd = { + enable = true; + emergencyAccess = true; }; - - specialisation.boot-swraid.configuration.virtualisation.rootDevice = "/dev/disk/by-label/testraid"; - # This protects against a regression. We do not have to switch to it. - # It's sufficient to trigger its evaluation. - specialisation.build-old-initrd.configuration.boot.initrd.systemd.enable = lib.mkForce false; + kernelModules = [ "raid0" ]; }; - testScript = '' - # Create RAID - machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid1 /dev/vdb /dev/vdc --metadata=0.90") - machine.succeed("mkfs.ext4 -L testraid /dev/md0") - machine.succeed("mkdir -p /mnt && mount /dev/md0 /mnt && echo hello > /mnt/test && umount /mnt") + specialisation.boot-swraid.configuration.virtualisation.rootDevice = "/dev/disk/by-label/testraid"; + # This protects against a regression. We do not have to switch to it. + # It's sufficient to trigger its evaluation. + specialisation.build-old-initrd.configuration.boot.initrd.systemd.enable = lib.mkForce false; + }; - # Boot from the RAID - machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-swraid.conf") - machine.succeed("sync") - machine.crash() - machine.wait_for_unit("multi-user.target") + testScript = '' + # Create RAID + machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid1 /dev/vdb /dev/vdc --metadata=0.90") + machine.succeed("mkfs.ext4 -L testraid /dev/md0") + machine.succeed("mkdir -p /mnt && mount /dev/md0 /mnt && echo hello > /mnt/test && umount /mnt") - # Ensure we have successfully booted from the RAID - assert "(initrd)" in machine.succeed("systemd-analyze") # booted with systemd in stage 1 - assert "/dev/md0 on / type ext4" in machine.succeed("mount") - assert "hello" in machine.succeed("cat /test") - assert "md0" in machine.succeed("cat /proc/mdstat") + # Boot from the RAID + machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-swraid.conf") + machine.succeed("sync") + machine.crash() + machine.wait_for_unit("multi-user.target") - expected_config = """MAILADDR test@example.com + # Ensure we have successfully booted from the RAID + assert "(initrd)" in machine.succeed("systemd-analyze") # booted with systemd in stage 1 + assert "/dev/md0 on / type ext4" in machine.succeed("mount") + assert "hello" in machine.succeed("cat /test") + assert "md0" in machine.succeed("cat /proc/mdstat") - ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc - """ - got_config = machine.execute("cat /etc/mdadm.conf")[1] - assert expected_config == got_config, repr((expected_config, got_config)) - machine.wait_for_unit("mdmonitor.service") - ''; - } -) + expected_config = """MAILADDR test@example.com + + ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc + """ + got_config = machine.execute("cat /etc/mdadm.conf")[1] + assert expected_config == got_config, repr((expected_config, got_config)) + machine.wait_for_unit("mdmonitor.service") + ''; +} diff --git a/nixos/tests/systemd-initrd-vconsole.nix b/nixos/tests/systemd-initrd-vconsole.nix index 4fc9b5f3738c..556679cdbb15 100644 --- a/nixos/tests/systemd-initrd-vconsole.nix +++ b/nixos/tests/systemd-initrd-vconsole.nix @@ -1,52 +1,50 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-initrd-vconsole"; +{ lib, pkgs, ... }: +{ + name = "systemd-initrd-vconsole"; - nodes.machine = - { pkgs, ... }: - { - boot.kernelParams = lib.mkAfter [ - "rd.systemd.unit=rescue.target" - "loglevel=3" - "udev.log_level=3" - "systemd.log_level=warning" - ]; + nodes.machine = + { pkgs, ... }: + { + boot.kernelParams = lib.mkAfter [ + "rd.systemd.unit=rescue.target" + "loglevel=3" + "udev.log_level=3" + "systemd.log_level=warning" + ]; - boot.initrd.systemd = { - enable = true; - emergencyAccess = true; - }; - - console = { - earlySetup = true; - keyMap = "colemak"; - }; + boot.initrd.systemd = { + enable = true; + emergencyAccess = true; }; - testScript = '' - # Boot into rescue shell in initrd - machine.start() - machine.wait_for_console_text("Press Enter for maintenance") - machine.send_console("\n") + console = { + earlySetup = true; + keyMap = "colemak"; + }; + }; - # Wait for shell to become ready - for _ in range(300): - machine.send_console("printf '%s to receive commands:\\n' Ready\n") - try: - machine.wait_for_console_text("Ready to receive commands:", timeout=1) - break - except Exception: - continue - else: - raise RuntimeError("Rescue shell never became ready") + testScript = '' + # Boot into rescue shell in initrd + machine.start() + machine.wait_for_console_text("Press Enter for maintenance") + machine.send_console("\n") - # Check keymap - machine.send_console("(printf '%s to receive text:\\n' Ready && read text && echo \"$text\") bool: - status, _ = client.execute("ping -n -c 1 server >&2") - return status == 0 - with client.nested("waiting for server to come up"): - retry(server_is_up) + testScript = '' + start_all() + client.wait_for_unit("network.target") - # Try to ping the (tagged) VLAN interface. - client.succeed("ping -n -w 10 -c 1 192.168.10.1 >&2") - ''; - } -) + # Wait for the regular (untagged) interface to be up. + def server_is_up(_) -> bool: + status, _ = client.execute("ping -n -c 1 server >&2") + return status == 0 + with client.nested("waiting for server to come up"): + retry(server_is_up) + + # Try to ping the (tagged) VLAN interface. + client.succeed("ping -n -w 10 -c 1 192.168.10.1 >&2") + ''; +} diff --git a/nixos/tests/systemd-journal-gateway.nix b/nixos/tests/systemd-journal-gateway.nix index 6ae009f5362c..e77501127ca2 100644 --- a/nixos/tests/systemd-journal-gateway.nix +++ b/nixos/tests/systemd-journal-gateway.nix @@ -1,95 +1,93 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "systemd-journal-gateway"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - minijackson - raitobezarius - ]; +{ lib, pkgs, ... }: +{ + name = "systemd-journal-gateway"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + minijackson + raitobezarius + ]; + }; + + # Named client for coherence with the systemd-journal-upload test, and for + # certificate validation + nodes.client = { + services.journald.gateway = { + enable = true; + cert = "/run/secrets/client/cert.pem"; + key = "/run/secrets/client/key.pem"; + trust = "/run/secrets/ca.cert.pem"; }; + }; - # Named client for coherence with the systemd-journal-upload test, and for - # certificate validation - nodes.client = { - services.journald.gateway = { - enable = true; - cert = "/run/secrets/client/cert.pem"; - key = "/run/secrets/client/key.pem"; - trust = "/run/secrets/ca.cert.pem"; - }; - }; + testScript = '' + import json + import subprocess + import tempfile - testScript = '' - import json - import subprocess - import tempfile + tmpdir_o = tempfile.TemporaryDirectory() + tmpdir = tmpdir_o.name - tmpdir_o = tempfile.TemporaryDirectory() - tmpdir = tmpdir_o.name - - def generate_pems(domain: str): - subprocess.run( - [ - "${pkgs.minica}/bin/minica", - "--ca-key=ca.key.pem", - "--ca-cert=ca.cert.pem", - f"--domains={domain}", - ], - cwd=str(tmpdir), - ) - - with subtest("Creating keys and certificates"): - generate_pems("server") - generate_pems("client") - - client.wait_for_unit("multi-user.target") - - def copy_pem(file: str): - machine.copy_from_host(source=f"{tmpdir}/{file}", target=f"/run/secrets/{file}") - machine.succeed(f"chmod 600 /run/secrets/{file} && chown systemd-journal-gateway /run/secrets/{file}") - - with subtest("Copying keys and certificates"): - machine.succeed("mkdir -p /run/secrets/{client,server}") - copy_pem("server/cert.pem") - copy_pem("server/key.pem") - copy_pem("client/cert.pem") - copy_pem("client/key.pem") - copy_pem("ca.cert.pem") - - client.wait_for_unit("multi-user.target") - - curl = '${pkgs.curl}/bin/curl' - accept_json = '--header "Accept: application/json"' - cacert = '--cacert /run/secrets/ca.cert.pem' - cert = '--cert /run/secrets/server/cert.pem' - key = '--key /run/secrets/server/key.pem' - base_url = 'https://client:19531' - - curl_cli = f"{curl} {accept_json} {cacert} {cert} {key} --fail" - - machine_info = client.succeed(f"{curl_cli} {base_url}/machine") - assert json.loads(machine_info)["hostname"] == "client", "wrong machine name" - - # The HTTP request should have started the gateway service, triggered by - # the .socket unit - client.wait_for_unit("systemd-journal-gatewayd.service") - - identifier = "nixos-test" - message = "Hello from NixOS test infrastructure" - - client.succeed(f"systemd-cat --identifier={identifier} <<< '{message}'") - - # max-time is a workaround against a bug in systemd-journal-gatewayd where - # if TLS is enabled, the connection is never closed. Since it will timeout, - # we ignore the return code. - entries = client.succeed( - f"{curl_cli} --max-time 5 {base_url}/entries?SYSLOG_IDENTIFIER={identifier} || true" + def generate_pems(domain: str): + subprocess.run( + [ + "${pkgs.minica}/bin/minica", + "--ca-key=ca.key.pem", + "--ca-cert=ca.cert.pem", + f"--domains={domain}", + ], + cwd=str(tmpdir), ) - # Number of entries should be only 1 - added_entry = json.loads(entries) - assert added_entry["SYSLOG_IDENTIFIER"] == identifier and added_entry["MESSAGE"] == message, "journal entry does not correspond" - ''; - } -) + with subtest("Creating keys and certificates"): + generate_pems("server") + generate_pems("client") + + client.wait_for_unit("multi-user.target") + + def copy_pem(file: str): + machine.copy_from_host(source=f"{tmpdir}/{file}", target=f"/run/secrets/{file}") + machine.succeed(f"chmod 600 /run/secrets/{file} && chown systemd-journal-gateway /run/secrets/{file}") + + with subtest("Copying keys and certificates"): + machine.succeed("mkdir -p /run/secrets/{client,server}") + copy_pem("server/cert.pem") + copy_pem("server/key.pem") + copy_pem("client/cert.pem") + copy_pem("client/key.pem") + copy_pem("ca.cert.pem") + + client.wait_for_unit("multi-user.target") + + curl = '${pkgs.curl}/bin/curl' + accept_json = '--header "Accept: application/json"' + cacert = '--cacert /run/secrets/ca.cert.pem' + cert = '--cert /run/secrets/server/cert.pem' + key = '--key /run/secrets/server/key.pem' + base_url = 'https://client:19531' + + curl_cli = f"{curl} {accept_json} {cacert} {cert} {key} --fail" + + machine_info = client.succeed(f"{curl_cli} {base_url}/machine") + assert json.loads(machine_info)["hostname"] == "client", "wrong machine name" + + # The HTTP request should have started the gateway service, triggered by + # the .socket unit + client.wait_for_unit("systemd-journal-gatewayd.service") + + identifier = "nixos-test" + message = "Hello from NixOS test infrastructure" + + client.succeed(f"systemd-cat --identifier={identifier} <<< '{message}'") + + # max-time is a workaround against a bug in systemd-journal-gatewayd where + # if TLS is enabled, the connection is never closed. Since it will timeout, + # we ignore the return code. + entries = client.succeed( + f"{curl_cli} --max-time 5 {base_url}/entries?SYSLOG_IDENTIFIER={identifier} || true" + ) + + # Number of entries should be only 1 + added_entry = json.loads(entries) + assert added_entry["SYSLOG_IDENTIFIER"] == identifier and added_entry["MESSAGE"] == message, "journal entry does not correspond" + ''; +} diff --git a/nixos/tests/systemd-journal-upload.nix b/nixos/tests/systemd-journal-upload.nix index 923dd48b32ac..00dbba82b88b 100644 --- a/nixos/tests/systemd-journal-upload.nix +++ b/nixos/tests/systemd-journal-upload.nix @@ -1,110 +1,108 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "systemd-journal-upload"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - minijackson - raitobezarius - ]; +{ pkgs, ... }: +{ + name = "systemd-journal-upload"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + minijackson + raitobezarius + ]; + }; + + nodes.server = + { nodes, ... }: + { + services.journald.remote = { + enable = true; + listen = "http"; + settings.Remote = { + ServerCertificateFile = "/run/secrets/sever.cert.pem"; + ServerKeyFile = "/run/secrets/sever.key.pem"; + TrustedCertificateFile = "/run/secrets/ca.cert.pem"; + Seal = true; + }; + }; + + networking.firewall.allowedTCPPorts = [ nodes.server.services.journald.remote.port ]; }; - nodes.server = - { nodes, ... }: - { - services.journald.remote = { - enable = true; - listen = "http"; - settings.Remote = { - ServerCertificateFile = "/run/secrets/sever.cert.pem"; - ServerKeyFile = "/run/secrets/sever.key.pem"; - TrustedCertificateFile = "/run/secrets/ca.cert.pem"; - Seal = true; - }; - }; - - networking.firewall.allowedTCPPorts = [ nodes.server.services.journald.remote.port ]; - }; - - nodes.client = - { lib, nodes, ... }: - { - services.journald.upload = { - enable = true; - settings.Upload = { - URL = "http://server:${toString nodes.server.services.journald.remote.port}"; - ServerCertificateFile = "/run/secrets/client.cert.pem"; - ServerKeyFile = "/run/secrets/client.key.pem"; - TrustedCertificateFile = "/run/secrets/ca.cert.pem"; - }; - }; - - # Wait for the PEMs to arrive - systemd.services.systemd-journal-upload.wantedBy = lib.mkForce [ ]; - systemd.paths.systemd-journal-upload = { - wantedBy = [ "default.target" ]; - # This file must be copied last - pathConfig.PathExists = [ "/run/secrets/ca.cert.pem" ]; + nodes.client = + { lib, nodes, ... }: + { + services.journald.upload = { + enable = true; + settings.Upload = { + URL = "http://server:${toString nodes.server.services.journald.remote.port}"; + ServerCertificateFile = "/run/secrets/client.cert.pem"; + ServerKeyFile = "/run/secrets/client.key.pem"; + TrustedCertificateFile = "/run/secrets/ca.cert.pem"; }; }; - testScript = '' - import subprocess - import tempfile + # Wait for the PEMs to arrive + systemd.services.systemd-journal-upload.wantedBy = lib.mkForce [ ]; + systemd.paths.systemd-journal-upload = { + wantedBy = [ "default.target" ]; + # This file must be copied last + pathConfig.PathExists = [ "/run/secrets/ca.cert.pem" ]; + }; + }; - tmpdir_o = tempfile.TemporaryDirectory() - tmpdir = tmpdir_o.name + testScript = '' + import subprocess + import tempfile - def generate_pems(domain: str): - subprocess.run( - [ - "${pkgs.minica}/bin/minica", - "--ca-key=ca.key.pem", - "--ca-cert=ca.cert.pem", - f"--domains={domain}", - ], - cwd=str(tmpdir), - ) + tmpdir_o = tempfile.TemporaryDirectory() + tmpdir = tmpdir_o.name - with subtest("Creating keys and certificates"): - generate_pems("server") - generate_pems("client") - - server.wait_for_unit("multi-user.target") - client.wait_for_unit("multi-user.target") - - def copy_pems(machine: Machine, domain: str): - machine.succeed("mkdir /run/secrets") - machine.copy_from_host( - source=f"{tmpdir}/{domain}/cert.pem", - target=f"/run/secrets/{domain}.cert.pem", - ) - machine.copy_from_host( - source=f"{tmpdir}/{domain}/key.pem", - target=f"/run/secrets/{domain}.key.pem", - ) - # Should be last - machine.copy_from_host( - source=f"{tmpdir}/ca.cert.pem", - target="/run/secrets/ca.cert.pem", - ) - - with subtest("Copying keys and certificates"): - copy_pems(server, "server") - copy_pems(client, "client") - - client.wait_for_unit("systemd-journal-upload.service") - # The journal upload should have started the remote service, triggered by - # the .socket unit - server.wait_for_unit("systemd-journal-remote.service") - - identifier = "nixos-test" - message = "Hello from NixOS test infrastructure" - - client.succeed(f"systemd-cat --identifier={identifier} <<< '{message}'") - server.wait_until_succeeds( - f"journalctl --file /var/log/journal/remote/remote-*.journal --identifier={identifier} | grep -F '{message}'" + def generate_pems(domain: str): + subprocess.run( + [ + "${pkgs.minica}/bin/minica", + "--ca-key=ca.key.pem", + "--ca-cert=ca.cert.pem", + f"--domains={domain}", + ], + cwd=str(tmpdir), ) - ''; - } -) + + with subtest("Creating keys and certificates"): + generate_pems("server") + generate_pems("client") + + server.wait_for_unit("multi-user.target") + client.wait_for_unit("multi-user.target") + + def copy_pems(machine: Machine, domain: str): + machine.succeed("mkdir /run/secrets") + machine.copy_from_host( + source=f"{tmpdir}/{domain}/cert.pem", + target=f"/run/secrets/{domain}.cert.pem", + ) + machine.copy_from_host( + source=f"{tmpdir}/{domain}/key.pem", + target=f"/run/secrets/{domain}.key.pem", + ) + # Should be last + machine.copy_from_host( + source=f"{tmpdir}/ca.cert.pem", + target="/run/secrets/ca.cert.pem", + ) + + with subtest("Copying keys and certificates"): + copy_pems(server, "server") + copy_pems(client, "client") + + client.wait_for_unit("systemd-journal-upload.service") + # The journal upload should have started the remote service, triggered by + # the .socket unit + server.wait_for_unit("systemd-journal-remote.service") + + identifier = "nixos-test" + message = "Hello from NixOS test infrastructure" + + client.succeed(f"systemd-cat --identifier={identifier} <<< '{message}'") + server.wait_until_succeeds( + f"journalctl --file /var/log/journal/remote/remote-*.journal --identifier={identifier} | grep -F '{message}'" + ) + ''; +} diff --git a/nixos/tests/systemd-journal.nix b/nixos/tests/systemd-journal.nix index c39fc50e5006..8589df339253 100644 --- a/nixos/tests/systemd-journal.nix +++ b/nixos/tests/systemd-journal.nix @@ -1,77 +1,75 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "systemd-journal"; - meta = with pkgs.lib.maintainers; { - maintainers = [ lewo ]; +{ + name = "systemd-journal"; + meta = with pkgs.lib.maintainers; { + maintainers = [ lewo ]; + }; + + nodes.machine = { + environment.systemPackages = [ pkgs.audit ]; + }; + nodes.auditd = { + security.auditd.enable = true; + security.audit.enable = true; + environment.systemPackages = [ pkgs.audit ]; + boot.kernel.sysctl."kernel.printk_ratelimit" = 0; + boot.kernelParams = [ "audit_backlog_limit=8192" ]; + }; + nodes.journaldAudit = { + services.journald.audit = true; + security.audit.enable = true; + environment.systemPackages = [ pkgs.audit ]; + boot.kernel.sysctl."kernel.printk_ratelimit" = 0; + boot.kernelParams = [ "audit_backlog_limit=8192" ]; + }; + nodes.containerCheck = { + containers.c1 = { + autoStart = true; + config = { }; }; + }; - nodes.machine = { - environment.systemPackages = [ pkgs.audit ]; - }; - nodes.auditd = { - security.auditd.enable = true; - security.audit.enable = true; - environment.systemPackages = [ pkgs.audit ]; - boot.kernel.sysctl."kernel.printk_ratelimit" = 0; - boot.kernelParams = [ "audit_backlog_limit=8192" ]; - }; - nodes.journaldAudit = { - services.journald.audit = true; - security.audit.enable = true; - environment.systemPackages = [ pkgs.audit ]; - boot.kernel.sysctl."kernel.printk_ratelimit" = 0; - boot.kernelParams = [ "audit_backlog_limit=8192" ]; - }; - nodes.containerCheck = { - containers.c1 = { - autoStart = true; - config = { }; - }; - }; + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed("journalctl --grep=systemd") - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed("journalctl --grep=systemd") + with subtest("no audit messages"): + machine.fail("journalctl _TRANSPORT=audit --grep 'unit=systemd-journald'") + machine.fail("journalctl _TRANSPORT=kernel --grep 'unit=systemd-journald'") - with subtest("no audit messages"): - machine.fail("journalctl _TRANSPORT=audit --grep 'unit=systemd-journald'") - machine.fail("journalctl _TRANSPORT=kernel --grep 'unit=systemd-journald'") + with subtest("auditd enabled"): + auditd.wait_for_unit("multi-user.target") - with subtest("auditd enabled"): - auditd.wait_for_unit("multi-user.target") - - # logs should end up in the journald - auditd.succeed("journalctl _TRANSPORT=audit --grep 'unit=systemd-journald'") - # logs should end up in the auditd audit log - auditd.succeed("grep 'unit=systemd-journald' /var/log/audit/audit.log") - # logs should not end up in kmesg - machine.fail("journalctl _TRANSPORT=kernel --grep 'unit=systemd-journald'") + # logs should end up in the journald + auditd.succeed("journalctl _TRANSPORT=audit --grep 'unit=systemd-journald'") + # logs should end up in the auditd audit log + auditd.succeed("grep 'unit=systemd-journald' /var/log/audit/audit.log") + # logs should not end up in kmesg + machine.fail("journalctl _TRANSPORT=kernel --grep 'unit=systemd-journald'") - with subtest("journald audit"): - journaldAudit.wait_for_unit("multi-user.target") + with subtest("journald audit"): + journaldAudit.wait_for_unit("multi-user.target") - # logs should end up in the journald - journaldAudit.succeed("journalctl _TRANSPORT=audit --grep 'unit=systemd-journald'") - # logs should NOT end up in audit log - journaldAudit.fail("grep 'unit=systemd-journald' /var/log/audit/audit.log") - # FIXME: If systemd fixes #15324 this test will start failing. - # You can fix this text by removing the below line. - # logs ideally should NOT end up in kmesg, but they do due to - # https://github.com/systemd/systemd/issues/15324 - journaldAudit.succeed("journalctl _TRANSPORT=kernel --grep 'unit=systemd-journald'") + # logs should end up in the journald + journaldAudit.succeed("journalctl _TRANSPORT=audit --grep 'unit=systemd-journald'") + # logs should NOT end up in audit log + journaldAudit.fail("grep 'unit=systemd-journald' /var/log/audit/audit.log") + # FIXME: If systemd fixes #15324 this test will start failing. + # You can fix this text by removing the below line. + # logs ideally should NOT end up in kmesg, but they do due to + # https://github.com/systemd/systemd/issues/15324 + journaldAudit.succeed("journalctl _TRANSPORT=kernel --grep 'unit=systemd-journald'") - with subtest("container systemd-journald-audit not running"): - containerCheck.wait_for_unit("multi-user.target"); - containerCheck.wait_until_succeeds("systemctl -M c1 is-active default.target"); + with subtest("container systemd-journald-audit not running"): + containerCheck.wait_for_unit("multi-user.target"); + containerCheck.wait_until_succeeds("systemctl -M c1 is-active default.target"); - # systemd-journald-audit.socket should exist but not run due to the upstream unit's `Condition*` settings - (status, output) = containerCheck.execute("systemctl -M c1 is-active systemd-journald-audit.socket") - containerCheck.log(output) - assert status == 3 and output == "inactive\n", f"systemd-journald-audit.socket should exist in a container but remain inactive, was {output}" - ''; - } -) + # systemd-journald-audit.socket should exist but not run due to the upstream unit's `Condition*` settings + (status, output) = containerCheck.execute("systemctl -M c1 is-active systemd-journald-audit.socket") + containerCheck.log(output) + assert status == 3 and output == "inactive\n", f"systemd-journald-audit.socket should exist in a container but remain inactive, was {output}" + ''; +} diff --git a/nixos/tests/systemd-machinectl.nix b/nixos/tests/systemd-machinectl.nix index 2e35d160f533..03ffc25b9113 100644 --- a/nixos/tests/systemd-machinectl.nix +++ b/nixos/tests/systemd-machinectl.nix @@ -1,213 +1,211 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let +{ pkgs, ... }: +let - container = - { config, ... }: - { - # We re-use the NixOS container option ... - boot.isContainer = true; - # ... and revert unwanted defaults - networking.useHostResolvConf = false; + container = + { config, ... }: + { + # We re-use the NixOS container option ... + boot.isContainer = true; + # ... and revert unwanted defaults + networking.useHostResolvConf = false; - # use networkd to obtain systemd network setup - networking.useNetworkd = true; - networking.useDHCP = false; + # use networkd to obtain systemd network setup + networking.useNetworkd = true; + networking.useDHCP = false; - # systemd-nspawn expects /sbin/init - boot.loader.initScript.enable = true; + # systemd-nspawn expects /sbin/init + boot.loader.initScript.enable = true; - imports = [ ../modules/profiles/minimal.nix ]; + imports = [ ../modules/profiles/minimal.nix ]; - system.stateVersion = config.system.nixos.release; + system.stateVersion = config.system.nixos.release; - nixpkgs.pkgs = pkgs; - }; - - containerSystem = - (import ../lib/eval-config.nix { - system = null; - modules = [ container ]; - }).config.system.build.toplevel; - - containerName = "container"; - containerRoot = "/var/lib/machines/${containerName}"; - - containerTarball = pkgs.callPackage ../lib/make-system-tarball.nix { - storeContents = [ - { - object = containerSystem; - symlink = "/nix/var/nix/profiles/system"; - } - ]; - - contents = [ - { - source = containerSystem + "/etc/os-release"; - target = "/etc/os-release"; - } - { - source = containerSystem + "/init"; - target = "/sbin/init"; - } - ]; + nixpkgs.pkgs = pkgs; }; - in - { - name = "systemd-machinectl"; - nodes.machine = - { lib, ... }: + containerSystem = + (import ../lib/eval-config.nix { + system = null; + modules = [ container ]; + }).config.system.build.toplevel; + + containerName = "container"; + containerRoot = "/var/lib/machines/${containerName}"; + + containerTarball = pkgs.callPackage ../lib/make-system-tarball.nix { + storeContents = [ { - # use networkd to obtain systemd network setup - networking.useNetworkd = true; - networking.useDHCP = false; + object = containerSystem; + symlink = "/nix/var/nix/profiles/system"; + } + ]; - # do not try to access cache.nixos.org - nix.settings.substituters = lib.mkForce [ ]; + contents = [ + { + source = containerSystem + "/etc/os-release"; + target = "/etc/os-release"; + } + { + source = containerSystem + "/init"; + target = "/sbin/init"; + } + ]; + }; +in +{ + name = "systemd-machinectl"; - # auto-start container - systemd.targets.machines.wants = [ "systemd-nspawn@${containerName}.service" ]; + nodes.machine = + { lib, ... }: + { + # use networkd to obtain systemd network setup + networking.useNetworkd = true; + networking.useDHCP = false; - virtualisation.additionalPaths = [ - containerSystem - containerTarball - ]; + # do not try to access cache.nixos.org + nix.settings.substituters = lib.mkForce [ ]; - systemd.tmpfiles.rules = [ - "d /var/lib/machines/shared-decl 0755 root root - -" - ]; - systemd.nspawn.shared-decl = { - execConfig = { - Boot = false; - Parameters = "${containerSystem}/init"; - }; - filesConfig = { - BindReadOnly = "/nix/store"; - }; + # auto-start container + systemd.targets.machines.wants = [ "systemd-nspawn@${containerName}.service" ]; + + virtualisation.additionalPaths = [ + containerSystem + containerTarball + ]; + + systemd.tmpfiles.rules = [ + "d /var/lib/machines/shared-decl 0755 root root - -" + ]; + systemd.nspawn.shared-decl = { + execConfig = { + Boot = false; + Parameters = "${containerSystem}/init"; }; - - systemd.nspawn.${containerName} = { - filesConfig = { - # workaround to fix kernel namespaces; needed for Nix sandbox - # https://github.com/systemd/systemd/issues/27994#issuecomment-1704005670 - Bind = "/proc:/run/proc"; - }; + filesConfig = { + BindReadOnly = "/nix/store"; }; - - systemd.services."systemd-nspawn@${containerName}" = { - serviceConfig.Environment = [ - # Disable tmpfs for /tmp - "SYSTEMD_NSPAWN_TMPFS_TMP=0" - - # force unified cgroup delegation, which would be the default - # if systemd could check the capabilities of the installed systemd. - # see also: https://github.com/NixOS/nixpkgs/pull/198526 - "SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=1" - ]; - overrideStrategy = "asDropin"; - }; - - # open DHCP for container - networking.firewall.extraCommands = '' - ${pkgs.iptables}/bin/iptables -A nixos-fw -i ve-+ -p udp -m udp --dport 67 -j nixos-fw-accept - ''; }; - testScript = '' - start_all() - machine.wait_for_unit("default.target"); + systemd.nspawn.${containerName} = { + filesConfig = { + # workaround to fix kernel namespaces; needed for Nix sandbox + # https://github.com/systemd/systemd/issues/27994#issuecomment-1704005670 + Bind = "/proc:/run/proc"; + }; + }; - # Test machinectl start stop of shared-decl - machine.succeed("machinectl start shared-decl"); - machine.wait_until_succeeds("systemctl -M shared-decl is-active default.target"); - machine.succeed("machinectl stop shared-decl"); + systemd.services."systemd-nspawn@${containerName}" = { + serviceConfig.Environment = [ + # Disable tmpfs for /tmp + "SYSTEMD_NSPAWN_TMPFS_TMP=0" - # create containers root - machine.succeed("mkdir -p ${containerRoot}"); + # force unified cgroup delegation, which would be the default + # if systemd could check the capabilities of the installed systemd. + # see also: https://github.com/NixOS/nixpkgs/pull/198526 + "SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=1" + ]; + overrideStrategy = "asDropin"; + }; - # start container with shared nix store by using same arguments as for systemd-nspawn@.service - machine.succeed("systemd-run systemd-nspawn --machine=${containerName} --network-veth -U --bind-ro=/nix/store ${containerSystem}/init") - machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); + # open DHCP for container + networking.firewall.extraCommands = '' + ${pkgs.iptables}/bin/iptables -A nixos-fw -i ve-+ -p udp -m udp --dport 67 -j nixos-fw-accept + ''; + }; - # Test machinectl stop - machine.succeed("machinectl stop ${containerName}"); + testScript = '' + start_all() + machine.wait_for_unit("default.target"); - # Install container - # Workaround for nixos-install - machine.succeed("chmod o+rx /var/lib/machines"); - machine.succeed("nixos-install --root ${containerRoot} --system ${containerSystem} --no-channel-copy --no-root-passwd"); + # Test machinectl start stop of shared-decl + machine.succeed("machinectl start shared-decl"); + machine.wait_until_succeeds("systemctl -M shared-decl is-active default.target"); + machine.succeed("machinectl stop shared-decl"); - # Allow systemd-nspawn to apply user namespace on immutable files - machine.succeed("chattr -i ${containerRoot}/var/empty"); + # create containers root + machine.succeed("mkdir -p ${containerRoot}"); - # Test machinectl start - machine.succeed("machinectl start ${containerName}"); - machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); + # start container with shared nix store by using same arguments as for systemd-nspawn@.service + machine.succeed("systemd-run systemd-nspawn --machine=${containerName} --network-veth -U --bind-ro=/nix/store ${containerSystem}/init") + machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); - # Test systemd-nspawn configured unified cgroup delegation - # see also: - # https://github.com/systemd/systemd/blob/main/docs/CGROUP_DELEGATION.md#three-different-tree-setups- - machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/stat --format="%T" --file-system /sys/fs/cgroup > fstype') - machine.succeed('test $(tr -d "\\r" < fstype) = cgroup2fs') + # Test machinectl stop + machine.succeed("machinectl stop ${containerName}"); - # Test if systemd-nspawn provides a working environment for nix to build derivations - # https://nixos.org/guides/nix-pills/07-working-derivation - machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/nix-instantiate --expr \'derivation { name = "myname"; builder = "/bin/sh"; args = [ "-c" "echo foo > $out" ]; system = "${pkgs.system}"; }\' --add-root /tmp/drv') - machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/nix-store --option substitute false --realize /tmp/drv') + # Install container + # Workaround for nixos-install + machine.succeed("chmod o+rx /var/lib/machines"); + machine.succeed("nixos-install --root ${containerRoot} --system ${containerSystem} --no-channel-copy --no-root-passwd"); - # Test nss_mymachines without nscd - machine.succeed('LD_LIBRARY_PATH="/run/current-system/sw/lib" getent -s hosts:mymachines hosts ${containerName}'); + # Allow systemd-nspawn to apply user namespace on immutable files + machine.succeed("chattr -i ${containerRoot}/var/empty"); - # Test nss_mymachines via nscd - machine.succeed("getent hosts ${containerName}"); + # Test machinectl start + machine.succeed("machinectl start ${containerName}"); + machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); - # Test systemd-nspawn network configuration to container - machine.succeed("networkctl --json=short status ve-${containerName} | ${pkgs.jq}/bin/jq -e '.OperationalState == \"routable\"'"); + # Test systemd-nspawn configured unified cgroup delegation + # see also: + # https://github.com/systemd/systemd/blob/main/docs/CGROUP_DELEGATION.md#three-different-tree-setups- + machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/stat --format="%T" --file-system /sys/fs/cgroup > fstype') + machine.succeed('test $(tr -d "\\r" < fstype) = cgroup2fs') - # Test systemd-nspawn network configuration to host - machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/networkctl --json=short status host0 | ${pkgs.jq}/bin/jq -r '.OperationalState == \"routable\"'"); + # Test if systemd-nspawn provides a working environment for nix to build derivations + # https://nixos.org/guides/nix-pills/07-working-derivation + machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/nix-instantiate --expr \'derivation { name = "myname"; builder = "/bin/sh"; args = [ "-c" "echo foo > $out" ]; system = "${pkgs.system}"; }\' --add-root /tmp/drv') + machine.succeed('systemd-run --pty --wait -M ${containerName} /run/current-system/sw/bin/nix-store --option substitute false --realize /tmp/drv') - # Test systemd-nspawn network configuration - machine.succeed("ping -n -c 1 ${containerName}"); + # Test nss_mymachines without nscd + machine.succeed('LD_LIBRARY_PATH="/run/current-system/sw/lib" getent -s hosts:mymachines hosts ${containerName}'); - # Test systemd-nspawn uses a user namespace - machine.succeed("test $(machinectl status ${containerName} | grep 'UID Shift: ' | wc -l) = 1") + # Test nss_mymachines via nscd + machine.succeed("getent hosts ${containerName}"); - # Test systemd-nspawn reboot - machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/reboot"); - machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); + # Test systemd-nspawn network configuration to container + machine.succeed("networkctl --json=short status ve-${containerName} | ${pkgs.jq}/bin/jq -e '.OperationalState == \"routable\"'"); - # Test machinectl reboot - machine.succeed("machinectl reboot ${containerName}"); - machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); + # Test systemd-nspawn network configuration to host + machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/networkctl --json=short status host0 | ${pkgs.jq}/bin/jq -r '.OperationalState == \"routable\"'"); - # Restart machine - machine.shutdown() - machine.start() - machine.wait_for_unit("default.target"); + # Test systemd-nspawn network configuration + machine.succeed("ping -n -c 1 ${containerName}"); - # Test auto-start - machine.succeed("machinectl show ${containerName}") + # Test systemd-nspawn uses a user namespace + machine.succeed("test $(machinectl status ${containerName} | grep 'UID Shift: ' | wc -l) = 1") - # Test machinectl stop - machine.succeed("machinectl stop ${containerName}"); - machine.wait_until_succeeds("test $(systemctl is-active systemd-nspawn@${containerName}) = inactive"); + # Test systemd-nspawn reboot + machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/reboot"); + machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); - # Test tmpfs for /tmp - machine.fail("mountpoint /tmp"); + # Test machinectl reboot + machine.succeed("machinectl reboot ${containerName}"); + machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); - # Show to to delete the container - machine.succeed("chattr -i ${containerRoot}/var/empty"); - machine.succeed("rm -rf ${containerRoot}"); + # Restart machine + machine.shutdown() + machine.start() + machine.wait_for_unit("default.target"); - # Test import tarball, start, stop and remove - machine.succeed("machinectl import-tar ${containerTarball}/tarball/*.tar* ${containerName}"); - machine.succeed("machinectl start ${containerName}"); - machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); - machine.succeed("machinectl stop ${containerName}"); - machine.wait_until_succeeds("test $(systemctl is-active systemd-nspawn@${containerName}) = inactive"); - machine.succeed("machinectl remove ${containerName}"); - ''; - } -) + # Test auto-start + machine.succeed("machinectl show ${containerName}") + + # Test machinectl stop + machine.succeed("machinectl stop ${containerName}"); + machine.wait_until_succeeds("test $(systemctl is-active systemd-nspawn@${containerName}) = inactive"); + + # Test tmpfs for /tmp + machine.fail("mountpoint /tmp"); + + # Show to to delete the container + machine.succeed("chattr -i ${containerRoot}/var/empty"); + machine.succeed("rm -rf ${containerRoot}"); + + # Test import tarball, start, stop and remove + machine.succeed("machinectl import-tar ${containerTarball}/tarball/*.tar* ${containerName}"); + machine.succeed("machinectl start ${containerName}"); + machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target"); + machine.succeed("machinectl stop ${containerName}"); + machine.wait_until_succeeds("test $(systemctl is-active systemd-nspawn@${containerName}) = inactive"); + machine.succeed("machinectl remove ${containerName}"); + ''; +} diff --git a/nixos/tests/systemd-misc.nix b/nixos/tests/systemd-misc.nix index 632f3f59d47b..2623f78add63 100644 --- a/nixos/tests/systemd-misc.nix +++ b/nixos/tests/systemd-misc.nix @@ -1,66 +1,64 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - exampleScript = pkgs.writeTextFile { - name = "example.sh"; - text = '' - #! ${pkgs.runtimeShell} -e +let + exampleScript = pkgs.writeTextFile { + name = "example.sh"; + text = '' + #! ${pkgs.runtimeShell} -e - while true; do - echo "Example script running" >&2 - ${pkgs.coreutils}/bin/sleep 1 - done - ''; - executable = true; - }; + while true; do + echo "Example script running" >&2 + ${pkgs.coreutils}/bin/sleep 1 + done + ''; + executable = true; + }; - unitFile = pkgs.writeTextFile { - name = "example.service"; - text = '' - [Unit] - Description=Example systemd service unit file + unitFile = pkgs.writeTextFile { + name = "example.service"; + text = '' + [Unit] + Description=Example systemd service unit file - [Service] - ExecStart=${exampleScript} + [Service] + ExecStart=${exampleScript} - [Install] - WantedBy=multi-user.target - ''; - }; - in - { - name = "systemd-misc"; + [Install] + WantedBy=multi-user.target + ''; + }; +in +{ + name = "systemd-misc"; - nodes.machine = - { pkgs, lib, ... }: - { - boot.extraSystemdUnitPaths = [ "/etc/systemd-rw/system" ]; + nodes.machine = + { pkgs, lib, ... }: + { + boot.extraSystemdUnitPaths = [ "/etc/systemd-rw/system" ]; - users.users.limited = { - isNormalUser = true; - uid = 1000; - }; - - systemd.units."user-1000.slice.d/limits.conf" = { - text = '' - [Slice] - TasksAccounting=yes - TasksMax=100 - ''; - }; + users.users.limited = { + isNormalUser = true; + uid = 1000; }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.succeed("mkdir -p /etc/systemd-rw/system") - machine.succeed( - "cp ${unitFile} /etc/systemd-rw/system/example.service" - ) - machine.succeed("systemctl start example.service") - machine.succeed("systemctl status example.service | grep 'Active: active'") + systemd.units."user-1000.slice.d/limits.conf" = { + text = '' + [Slice] + TasksAccounting=yes + TasksMax=100 + ''; + }; + }; - machine.succeed("systemctl show --property TasksMax --value user-1000.slice | grep 100") - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.succeed("mkdir -p /etc/systemd-rw/system") + machine.succeed( + "cp ${unitFile} /etc/systemd-rw/system/example.service" + ) + machine.succeed("systemctl start example.service") + machine.succeed("systemctl status example.service | grep 'Active: active'") + + machine.succeed("systemctl show --property TasksMax --value user-1000.slice | grep 100") + ''; +} diff --git a/nixos/tests/systemd-networkd-bridge.nix b/nixos/tests/systemd-networkd-bridge.nix index ec0547ce5cc4..80f2670209c7 100644 --- a/nixos/tests/systemd-networkd-bridge.nix +++ b/nixos/tests/systemd-networkd-bridge.nix @@ -89,57 +89,55 @@ let }; }; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "networkd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ picnoir ]; +{ pkgs, ... }: +{ + name = "networkd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ picnoir ]; + }; + nodes = { + node1 = generateNodeConf { + octet = 1; + vlan = 1; }; - nodes = { - node1 = generateNodeConf { - octet = 1; - vlan = 1; - }; - node2 = generateNodeConf { - octet = 2; - vlan = 3; - }; - node3 = generateNodeConf { - octet = 3; - vlan = 6; - }; - sw1 = generateSwitchConf [ - 1 - 2 - 4 - ]; - sw2 = generateSwitchConf [ - 2 - 3 - 5 - ]; - sw3 = generateSwitchConf [ - 4 - 5 - 6 - ]; + node2 = generateNodeConf { + octet = 2; + vlan = 3; }; - testScript = '' - network_nodes = [node1, node2, node3] - network_switches = [sw1, sw2, sw3] - start_all() + node3 = generateNodeConf { + octet = 3; + vlan = 6; + }; + sw1 = generateSwitchConf [ + 1 + 2 + 4 + ]; + sw2 = generateSwitchConf [ + 2 + 3 + 5 + ]; + sw3 = generateSwitchConf [ + 4 + 5 + 6 + ]; + }; + testScript = '' + network_nodes = [node1, node2, node3] + network_switches = [sw1, sw2, sw3] + start_all() - for n in network_nodes + network_switches: - n.systemctl("start systemd-networkd-wait-online.service") - n.wait_for_unit("systemd-networkd-wait-online.service") + for n in network_nodes + network_switches: + n.systemctl("start systemd-networkd-wait-online.service") + n.wait_for_unit("systemd-networkd-wait-online.service") - node1.succeed("ping 10.0.0.2 -w 10 -c 1") - node1.succeed("ping 10.0.0.3 -w 10 -c 1") - node2.succeed("ping 10.0.0.1 -w 10 -c 1") - node2.succeed("ping 10.0.0.3 -w 10 -c 1") - node3.succeed("ping 10.0.0.1 -w 10 -c 1") - node3.succeed("ping 10.0.0.2 -w 10 -c 1") - ''; - } -) + node1.succeed("ping 10.0.0.2 -w 10 -c 1") + node1.succeed("ping 10.0.0.3 -w 10 -c 1") + node2.succeed("ping 10.0.0.1 -w 10 -c 1") + node2.succeed("ping 10.0.0.3 -w 10 -c 1") + node3.succeed("ping 10.0.0.1 -w 10 -c 1") + node3.succeed("ping 10.0.0.2 -w 10 -c 1") + ''; +} diff --git a/nixos/tests/systemd-networkd-dhcpserver.nix b/nixos/tests/systemd-networkd-dhcpserver.nix index 59cc2cfd5245..ae3ec181654b 100644 --- a/nixos/tests/systemd-networkd-dhcpserver.nix +++ b/nixos/tests/systemd-networkd-dhcpserver.nix @@ -7,121 +7,119 @@ # br0 ----untagged---v # +---PVID 1+VLAN 2---[bridge]---PVID 2---eth1 # vlan2 ---VLAN 2----^ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "systemd-networkd-dhcpserver"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; - nodes = { - router = - { config, pkgs, ... }: - { - virtualisation.vlans = [ 1 ]; - systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug"; - networking = { - useNetworkd = true; - useDHCP = false; - firewall.enable = false; - }; - systemd.network = { - netdevs = { - br0 = { - enable = true; - netdevConfig = { - Name = "br0"; - Kind = "bridge"; - }; - extraConfig = '' - [Bridge] - VLANFiltering=yes - DefaultPVID=none - ''; - }; - vlan2 = { - enable = true; - netdevConfig = { - Name = "vlan2"; - Kind = "vlan"; - }; - vlanConfig.Id = 2; +{ pkgs, ... }: +{ + name = "systemd-networkd-dhcpserver"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + nodes = { + router = + { config, pkgs, ... }: + { + virtualisation.vlans = [ 1 ]; + systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug"; + networking = { + useNetworkd = true; + useDHCP = false; + firewall.enable = false; + }; + systemd.network = { + netdevs = { + br0 = { + enable = true; + netdevConfig = { + Name = "br0"; + Kind = "bridge"; }; + extraConfig = '' + [Bridge] + VLANFiltering=yes + DefaultPVID=none + ''; }; - networks = { - # systemd-networkd will load the first network unit file - # that matches, ordered lexiographically by filename. - # /etc/systemd/network/{40-eth1,99-main}.network already - # exists. This network unit must be loaded for the test, - # however, hence why this network is named such. - "01-eth1" = { - name = "eth1"; - networkConfig.Bridge = "br0"; - bridgeVLANs = [ - { - PVID = 2; - EgressUntagged = 2; - } - ]; + vlan2 = { + enable = true; + netdevConfig = { + Name = "vlan2"; + Kind = "vlan"; }; - "02-br0" = { - name = "br0"; - networkConfig = { - DHCPServer = true; - Address = "10.0.0.1/24"; - VLAN = [ "vlan2" ]; - }; - dhcpServerConfig = { - PoolOffset = 100; - PoolSize = 1; - }; - bridgeVLANs = [ - { - PVID = 1; - EgressUntagged = 1; - } - { VLAN = 2; } - ]; + vlanConfig.Id = 2; + }; + }; + networks = { + # systemd-networkd will load the first network unit file + # that matches, ordered lexiographically by filename. + # /etc/systemd/network/{40-eth1,99-main}.network already + # exists. This network unit must be loaded for the test, + # however, hence why this network is named such. + "01-eth1" = { + name = "eth1"; + networkConfig.Bridge = "br0"; + bridgeVLANs = [ + { + PVID = 2; + EgressUntagged = 2; + } + ]; + }; + "02-br0" = { + name = "br0"; + networkConfig = { + DHCPServer = true; + Address = "10.0.0.1/24"; + VLAN = [ "vlan2" ]; }; - "02-vlan2" = { - name = "vlan2"; - networkConfig = { - DHCPServer = true; - Address = "10.0.2.1/24"; - }; - dhcpServerConfig = { - PoolOffset = 100; - PoolSize = 1; - }; + dhcpServerConfig = { + PoolOffset = 100; + PoolSize = 1; + }; + bridgeVLANs = [ + { + PVID = 1; + EgressUntagged = 1; + } + { VLAN = 2; } + ]; + }; + "02-vlan2" = { + name = "vlan2"; + networkConfig = { + DHCPServer = true; + Address = "10.0.2.1/24"; + }; + dhcpServerConfig = { + PoolOffset = 100; + PoolSize = 1; }; }; }; }; + }; - client = - { config, pkgs, ... }: - { - virtualisation.vlans = [ 1 ]; - systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug"; - networking = { - useNetworkd = true; - useDHCP = false; - firewall.enable = false; - interfaces.eth1.useDHCP = true; - }; + client = + { config, pkgs, ... }: + { + virtualisation.vlans = [ 1 ]; + systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug"; + networking = { + useNetworkd = true; + useDHCP = false; + firewall.enable = false; + interfaces.eth1.useDHCP = true; }; - }; - testScript = - { ... }: - '' - start_all() + }; + }; + testScript = + { ... }: + '' + start_all() - router.systemctl("start network-online.target") - client.systemctl("start network-online.target") - router.wait_for_unit("systemd-networkd-wait-online.service") - client.wait_for_unit("systemd-networkd-wait-online.service") - client.wait_until_succeeds("ping -c 5 10.0.2.1") - router.wait_until_succeeds("ping -c 5 10.0.2.100") - ''; - } -) + router.systemctl("start network-online.target") + client.systemctl("start network-online.target") + router.wait_for_unit("systemd-networkd-wait-online.service") + client.wait_for_unit("systemd-networkd-wait-online.service") + client.wait_until_succeeds("ping -c 5 10.0.2.1") + router.wait_until_succeeds("ping -c 5 10.0.2.100") + ''; +} diff --git a/nixos/tests/systemd-networkd-vrf.nix b/nixos/tests/systemd-networkd-vrf.nix index 357431fc297a..05225e457c1b 100644 --- a/nixos/tests/systemd-networkd-vrf.nix +++ b/nixos/tests/systemd-networkd-vrf.nix @@ -1,202 +1,200 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; +{ pkgs, lib, ... }: +let + inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey; - mkNode = vlan: id: { - virtualisation.vlans = [ vlan ]; - networking = { - useDHCP = false; - useNetworkd = true; - }; + mkNode = vlan: id: { + virtualisation.vlans = [ vlan ]; + networking = { + useDHCP = false; + useNetworkd = true; + }; - systemd.network = { - enable = true; + systemd.network = { + enable = true; - networks."10-eth${toString vlan}" = { - matchConfig.Name = "eth${toString vlan}"; - linkConfig.RequiredForOnline = "no"; - networkConfig = { - Address = "192.168.${toString vlan}.${toString id}/24"; - IPv4Forwarding = "yes"; - IPv6Forwarding = "yes"; - }; + networks."10-eth${toString vlan}" = { + matchConfig.Name = "eth${toString vlan}"; + linkConfig.RequiredForOnline = "no"; + networkConfig = { + Address = "192.168.${toString vlan}.${toString id}/24"; + IPv4Forwarding = "yes"; + IPv6Forwarding = "yes"; }; }; }; - in - { - name = "systemd-networkd-vrf"; - meta.maintainers = with lib.maintainers; [ ma27 ]; + }; +in +{ + name = "systemd-networkd-vrf"; + meta.maintainers = with lib.maintainers; [ ma27 ]; - nodes = { - client = - { pkgs, ... }: - { - virtualisation.vlans = [ - 1 - 2 - ]; + nodes = { + client = + { pkgs, ... }: + { + virtualisation.vlans = [ + 1 + 2 + ]; - networking = { - useDHCP = false; - useNetworkd = true; - firewall.checkReversePath = "loose"; + networking = { + useDHCP = false; + useNetworkd = true; + firewall.checkReversePath = "loose"; + }; + + systemd.network = { + enable = true; + + netdevs."10-vrf1" = { + netdevConfig = { + Kind = "vrf"; + Name = "vrf1"; + MTUBytes = "1300"; + }; + vrfConfig.Table = 23; + }; + netdevs."10-vrf2" = { + netdevConfig = { + Kind = "vrf"; + Name = "vrf2"; + MTUBytes = "1300"; + }; + vrfConfig.Table = 42; }; - systemd.network = { - enable = true; + networks."10-vrf1" = { + matchConfig.Name = "vrf1"; + networkConfig.IPv4Forwarding = "yes"; + networkConfig.IPv6Forwarding = "yes"; + routes = [ + { + Destination = "192.168.1.2"; + Metric = 100; + } + ]; + }; + networks."10-vrf2" = { + matchConfig.Name = "vrf2"; + networkConfig.IPv4Forwarding = "yes"; + networkConfig.IPv6Forwarding = "yes"; + routes = [ + { + Destination = "192.168.2.3"; + Metric = 100; + } + ]; + }; - netdevs."10-vrf1" = { - netdevConfig = { - Kind = "vrf"; - Name = "vrf1"; - MTUBytes = "1300"; - }; - vrfConfig.Table = 23; + networks."10-eth1" = { + matchConfig.Name = "eth1"; + linkConfig.RequiredForOnline = "no"; + networkConfig = { + VRF = "vrf1"; + Address = "192.168.1.1/24"; + IPv4Forwarding = "yes"; + IPv6Forwarding = "yes"; }; - netdevs."10-vrf2" = { - netdevConfig = { - Kind = "vrf"; - Name = "vrf2"; - MTUBytes = "1300"; - }; - vrfConfig.Table = 42; - }; - - networks."10-vrf1" = { - matchConfig.Name = "vrf1"; - networkConfig.IPv4Forwarding = "yes"; - networkConfig.IPv6Forwarding = "yes"; - routes = [ - { - Destination = "192.168.1.2"; - Metric = 100; - } - ]; - }; - networks."10-vrf2" = { - matchConfig.Name = "vrf2"; - networkConfig.IPv4Forwarding = "yes"; - networkConfig.IPv6Forwarding = "yes"; - routes = [ - { - Destination = "192.168.2.3"; - Metric = 100; - } - ]; - }; - - networks."10-eth1" = { - matchConfig.Name = "eth1"; - linkConfig.RequiredForOnline = "no"; - networkConfig = { - VRF = "vrf1"; - Address = "192.168.1.1/24"; - IPv4Forwarding = "yes"; - IPv6Forwarding = "yes"; - }; - }; - networks."10-eth2" = { - matchConfig.Name = "eth2"; - linkConfig.RequiredForOnline = "no"; - networkConfig = { - VRF = "vrf2"; - Address = "192.168.2.1/24"; - IPv4Forwarding = "yes"; - IPv6Forwarding = "yes"; - }; + }; + networks."10-eth2" = { + matchConfig.Name = "eth2"; + linkConfig.RequiredForOnline = "no"; + networkConfig = { + VRF = "vrf2"; + Address = "192.168.2.1/24"; + IPv4Forwarding = "yes"; + IPv6Forwarding = "yes"; }; }; }; + }; - node1 = lib.mkMerge [ - (mkNode 1 2) - { - services.openssh.enable = true; - users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - } - ]; + node1 = lib.mkMerge [ + (mkNode 1 2) + { + services.openssh.enable = true; + users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + } + ]; - node2 = mkNode 2 3; - node3 = mkNode 2 4; - }; + node2 = mkNode 2 3; + node3 = mkNode 2 4; + }; - testScript = '' - import json + testScript = '' + import json - def compare(raw_json, to_compare): - data = json.loads(raw_json) - assert len(raw_json) >= len(to_compare) - for i, row in enumerate(to_compare): - actual = data[i] - assert len(row.keys()) > 0 - for key, value in row.items(): - assert value == actual[key], f""" - In entry {i}, value {key}: got: {actual[key]}, expected {value} - """ + def compare(raw_json, to_compare): + data = json.loads(raw_json) + assert len(raw_json) >= len(to_compare) + for i, row in enumerate(to_compare): + actual = data[i] + assert len(row.keys()) > 0 + for key, value in row.items(): + assert value == actual[key], f""" + In entry {i}, value {key}: got: {actual[key]}, expected {value} + """ - start_all() + start_all() - client.wait_for_unit("network.target") - node1.wait_for_unit("network.target") - node2.wait_for_unit("network.target") - node3.wait_for_unit("network.target") + client.wait_for_unit("network.target") + node1.wait_for_unit("network.target") + node2.wait_for_unit("network.target") + node3.wait_for_unit("network.target") - # Check that networkd properly configures the main routing table - # and the routing tables for the VRF. - with subtest("check vrf routing tables"): - compare( - client.succeed("ip --json -4 route list"), - [ - {"dst": "192.168.1.2", "dev": "vrf1", "metric": 100}, - {"dst": "192.168.2.3", "dev": "vrf2", "metric": 100} - ] - ) - compare( - client.succeed("ip --json -4 route list table 23"), - [ - {"dst": "192.168.1.0/24", "dev": "eth1", "prefsrc": "192.168.1.1"}, - {"type": "local", "dst": "192.168.1.1", "dev": "eth1", "prefsrc": "192.168.1.1"}, - {"type": "broadcast", "dev": "eth1", "prefsrc": "192.168.1.1", "dst": "192.168.1.255"} - ] - ) - compare( - client.succeed("ip --json -4 route list table 42"), - [ - {"dst": "192.168.2.0/24", "dev": "eth2", "prefsrc": "192.168.2.1"}, - {"type": "local", "dst": "192.168.2.1", "dev": "eth2", "prefsrc": "192.168.2.1"}, - {"type": "broadcast", "dev": "eth2", "prefsrc": "192.168.2.1", "dst": "192.168.2.255"} - ] - ) + # Check that networkd properly configures the main routing table + # and the routing tables for the VRF. + with subtest("check vrf routing tables"): + compare( + client.succeed("ip --json -4 route list"), + [ + {"dst": "192.168.1.2", "dev": "vrf1", "metric": 100}, + {"dst": "192.168.2.3", "dev": "vrf2", "metric": 100} + ] + ) + compare( + client.succeed("ip --json -4 route list table 23"), + [ + {"dst": "192.168.1.0/24", "dev": "eth1", "prefsrc": "192.168.1.1"}, + {"type": "local", "dst": "192.168.1.1", "dev": "eth1", "prefsrc": "192.168.1.1"}, + {"type": "broadcast", "dev": "eth1", "prefsrc": "192.168.1.1", "dst": "192.168.1.255"} + ] + ) + compare( + client.succeed("ip --json -4 route list table 42"), + [ + {"dst": "192.168.2.0/24", "dev": "eth2", "prefsrc": "192.168.2.1"}, + {"type": "local", "dst": "192.168.2.1", "dev": "eth2", "prefsrc": "192.168.2.1"}, + {"type": "broadcast", "dev": "eth2", "prefsrc": "192.168.2.1", "dst": "192.168.2.255"} + ] + ) - # Ensure that other nodes are reachable via ICMP through the VRF. - with subtest("icmp through vrf works"): - client.succeed("ping -c5 192.168.1.2") - client.succeed("ping -c5 192.168.2.3") + # Ensure that other nodes are reachable via ICMP through the VRF. + with subtest("icmp through vrf works"): + client.succeed("ping -c5 192.168.1.2") + client.succeed("ping -c5 192.168.2.3") - # Test whether TCP through a VRF IP is possible. - with subtest("tcp traffic through vrf works"): - node1.wait_for_open_port(22) - client.succeed( - "cat ${snakeOilPrivateKey} > privkey.snakeoil" - ) - client.succeed("chmod 600 privkey.snakeoil") - client.succeed( - "ulimit -l 2048; ip vrf exec vrf1 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.1.2 true" - ) + # Test whether TCP through a VRF IP is possible. + with subtest("tcp traffic through vrf works"): + node1.wait_for_open_port(22) + client.succeed( + "cat ${snakeOilPrivateKey} > privkey.snakeoil" + ) + client.succeed("chmod 600 privkey.snakeoil") + client.succeed( + "ulimit -l 2048; ip vrf exec vrf1 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.1.2 true" + ) - # Only configured routes through the VRF from the main routing table should - # work. Additional IPs are only reachable when binding to the vrf interface. - with subtest("only routes from main routing table work by default"): - client.fail("ping -c5 192.168.2.4") - client.succeed("ping -I vrf2 -c5 192.168.2.4") + # Only configured routes through the VRF from the main routing table should + # work. Additional IPs are only reachable when binding to the vrf interface. + with subtest("only routes from main routing table work by default"): + client.fail("ping -c5 192.168.2.4") + client.succeed("ping -I vrf2 -c5 192.168.2.4") - client.shutdown() - node1.shutdown() - node2.shutdown() - node3.shutdown() - ''; - } -) + client.shutdown() + node1.shutdown() + node2.shutdown() + node3.shutdown() + ''; +} diff --git a/nixos/tests/systemd-networkd.nix b/nixos/tests/systemd-networkd.nix index 7bde032871d5..940192b1ad7f 100644 --- a/nixos/tests/systemd-networkd.nix +++ b/nixos/tests/systemd-networkd.nix @@ -125,83 +125,81 @@ let }; }; in -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "networkd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ picnoir ]; - }; - nodes = { - node1 = - { pkgs, ... }@attrs: - let - localConf = { - privk = "GDiXWlMQKb379XthwX0haAbK6hTdjblllpjGX0heP00="; - pubk = "iRxpqj42nnY0Qz8MAQbSm7bXxXP5hkPqWYIULmvW+EE="; - systemdCreds = false; - nodeId = "1"; - peerId = "2"; - }; - in - generateNodeConf (attrs // localConf); +{ pkgs, ... }: +{ + name = "networkd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ picnoir ]; + }; + nodes = { + node1 = + { pkgs, ... }@attrs: + let + localConf = { + privk = "GDiXWlMQKb379XthwX0haAbK6hTdjblllpjGX0heP00="; + pubk = "iRxpqj42nnY0Qz8MAQbSm7bXxXP5hkPqWYIULmvW+EE="; + systemdCreds = false; + nodeId = "1"; + peerId = "2"; + }; + in + generateNodeConf (attrs // localConf); - node2 = - { pkgs, ... }@attrs: - let - localConf = { - privk = "eHxSI2jwX/P4AOI0r8YppPw0+4NZnjOxfbS5mt06K2k="; - pubk = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g="; - systemdCreds = true; - nodeId = "2"; - peerId = "1"; - }; - in - generateNodeConf (attrs // localConf); - }; - testScript = '' - start_all() - node1.systemctl("start systemd-networkd-wait-online@eth1.service") - node1.systemctl("start systemd-networkd-wait-online.service") - node1.wait_for_unit("systemd-networkd-wait-online@eth1.service") - node1.wait_for_unit("systemd-networkd-wait-online.service") - node2.systemctl("start systemd-networkd-wait-online@eth1.service") - node2.systemctl("start systemd-networkd-wait-online.service") - node2.wait_for_unit("systemd-networkd-wait-online@eth1.service") - node2.wait_for_unit("systemd-networkd-wait-online.service") + node2 = + { pkgs, ... }@attrs: + let + localConf = { + privk = "eHxSI2jwX/P4AOI0r8YppPw0+4NZnjOxfbS5mt06K2k="; + pubk = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g="; + systemdCreds = true; + nodeId = "2"; + peerId = "1"; + }; + in + generateNodeConf (attrs // localConf); + }; + testScript = '' + start_all() + node1.systemctl("start systemd-networkd-wait-online@eth1.service") + node1.systemctl("start systemd-networkd-wait-online.service") + node1.wait_for_unit("systemd-networkd-wait-online@eth1.service") + node1.wait_for_unit("systemd-networkd-wait-online.service") + node2.systemctl("start systemd-networkd-wait-online@eth1.service") + node2.systemctl("start systemd-networkd-wait-online.service") + node2.wait_for_unit("systemd-networkd-wait-online@eth1.service") + node2.wait_for_unit("systemd-networkd-wait-online.service") - # ================================ - # Networkd Config - # ================================ - node1.succeed("grep RouteTable=custom:23 /etc/systemd/networkd.conf") - node1.succeed("sudo ip route show table custom | grep '10.0.0.0/24 via 10.0.0.1 dev wg0 proto static'") + # ================================ + # Networkd Config + # ================================ + node1.succeed("grep RouteTable=custom:23 /etc/systemd/networkd.conf") + node1.succeed("sudo ip route show table custom | grep '10.0.0.0/24 via 10.0.0.1 dev wg0 proto static'") - # ================================ - # Wireguard - # ================================ - node1.succeed("ping -c 5 10.0.0.2") - node2.succeed("ping -c 5 10.0.0.1") - # Is the fwmark set? - node2.succeed("wg | grep -q 42") + # ================================ + # Wireguard + # ================================ + node1.succeed("ping -c 5 10.0.0.2") + node2.succeed("ping -c 5 10.0.0.1") + # Is the fwmark set? + node2.succeed("wg | grep -q 42") - # ================================ - # Routing Policies - # ================================ - # Testing all the routingPolicyRuleConfig members: - # Table + IncomingInterface - node1.succeed("sudo ip rule | grep 'from all iif eth1 lookup 10'") - # OutgoingInterface - node1.succeed("sudo ip rule | grep 'from all oif eth1 lookup 20'") - # From + To + SourcePort + DestinationPort - node1.succeed( - "sudo ip rule | grep 'from 192.168.1.1 to 192.168.1.2 sport 666 dport 667 lookup 30'" - ) - # IPProtocol + InvertRule - node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'") - # FirewallMark without a mask - node1.succeed("sudo ip rule | grep 'from all fwmark 0x4 lookup 60'") - # FirewallMark with a mask - node1.succeed("sudo ip rule | grep 'from all fwmark 0x10/0x1f lookup 70'") - ''; - } -) + # ================================ + # Routing Policies + # ================================ + # Testing all the routingPolicyRuleConfig members: + # Table + IncomingInterface + node1.succeed("sudo ip rule | grep 'from all iif eth1 lookup 10'") + # OutgoingInterface + node1.succeed("sudo ip rule | grep 'from all oif eth1 lookup 20'") + # From + To + SourcePort + DestinationPort + node1.succeed( + "sudo ip rule | grep 'from 192.168.1.1 to 192.168.1.2 sport 666 dport 667 lookup 30'" + ) + # IPProtocol + InvertRule + node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'") + # FirewallMark without a mask + node1.succeed("sudo ip rule | grep 'from all fwmark 0x4 lookup 60'") + # FirewallMark with a mask + node1.succeed("sudo ip rule | grep 'from all fwmark 0x10/0x1f lookup 70'") + ''; +} diff --git a/nixos/tests/systemd-no-tainted.nix b/nixos/tests/systemd-no-tainted.nix index 12b68868415c..8b3059345181 100644 --- a/nixos/tests/systemd-no-tainted.nix +++ b/nixos/tests/systemd-no-tainted.nix @@ -1,17 +1,15 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "systemd-no-tainted"; +{ pkgs, ... }: +{ + name = "systemd-no-tainted"; - nodes.machine = { }; + nodes.machine = { }; - testScript = '' - machine.wait_for_unit("multi-user.target") - with subtest("systemctl should not report tainted with unmerged-usr"): - output = machine.succeed("systemctl status") - print(output) - assert "Tainted" not in output - assert "unmerged-usr" not in output - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + with subtest("systemctl should not report tainted with unmerged-usr"): + output = machine.succeed("systemctl status") + print(output) + assert "Tainted" not in output + assert "unmerged-usr" not in output + ''; +} diff --git a/nixos/tests/systemd-nspawn-configfile.nix b/nixos/tests/systemd-nspawn-configfile.nix index 8dd0260d2aee..d8b9e714a858 100644 --- a/nixos/tests/systemd-nspawn-configfile.nix +++ b/nixos/tests/systemd-nspawn-configfile.nix @@ -1,133 +1,131 @@ -import ./make-test-python.nix ( - { lib, ... }: - let - execOptions = [ - "Boot" - "ProcessTwo" - "Parameters" - "Environment" - "User" - "WorkingDirectory" - "PivotRoot" - "Capability" - "DropCapability" - "NoNewPrivileges" - "KillSignal" - "Personality" - "MachineID" - "PrivateUsers" - "NotifyReady" - "SystemCallFilter" - "LimitCPU" - "LimitFSIZE" - "LimitDATA" - "LimitSTACK" - "LimitCORE" - "LimitRSS" - "LimitNOFILE" - "LimitAS" - "LimitNPROC" - "LimitMEMLOCK" - "LimitLOCKS" - "LimitSIGPENDING" - "LimitMSGQUEUE" - "LimitNICE" - "LimitRTPRIO" - "LimitRTTIME" - "OOMScoreAdjust" - "CPUAffinity" - "Hostname" - "ResolvConf" - "Timezone" - "LinkJournal" - "Ephemeral" - "AmbientCapability" - ]; +{ lib, ... }: +let + execOptions = [ + "Boot" + "ProcessTwo" + "Parameters" + "Environment" + "User" + "WorkingDirectory" + "PivotRoot" + "Capability" + "DropCapability" + "NoNewPrivileges" + "KillSignal" + "Personality" + "MachineID" + "PrivateUsers" + "NotifyReady" + "SystemCallFilter" + "LimitCPU" + "LimitFSIZE" + "LimitDATA" + "LimitSTACK" + "LimitCORE" + "LimitRSS" + "LimitNOFILE" + "LimitAS" + "LimitNPROC" + "LimitMEMLOCK" + "LimitLOCKS" + "LimitSIGPENDING" + "LimitMSGQUEUE" + "LimitNICE" + "LimitRTPRIO" + "LimitRTTIME" + "OOMScoreAdjust" + "CPUAffinity" + "Hostname" + "ResolvConf" + "Timezone" + "LinkJournal" + "Ephemeral" + "AmbientCapability" + ]; - filesOptions = [ - "ReadOnly" - "Volatile" - "Bind" - "BindReadOnly" - "TemporaryFileSystem" - "Overlay" - "OverlayReadOnly" - "PrivateUsersChown" - "BindUser" - "Inaccessible" - "PrivateUsersOwnership" - ]; + filesOptions = [ + "ReadOnly" + "Volatile" + "Bind" + "BindReadOnly" + "TemporaryFileSystem" + "Overlay" + "OverlayReadOnly" + "PrivateUsersChown" + "BindUser" + "Inaccessible" + "PrivateUsersOwnership" + ]; - networkOptions = [ - "Private" - "VirtualEthernet" - "VirtualEthernetExtra" - "Interface" - "MACVLAN" - "IPVLAN" - "Bridge" - "Zone" - "Port" - ]; + networkOptions = [ + "Private" + "VirtualEthernet" + "VirtualEthernetExtra" + "Interface" + "MACVLAN" + "IPVLAN" + "Bridge" + "Zone" + "Port" + ]; - optionsToConfig = opts: builtins.listToAttrs (map (n: lib.nameValuePair n "testdata") opts); + optionsToConfig = opts: builtins.listToAttrs (map (n: lib.nameValuePair n "testdata") opts); - grepForOptions = opts: '' - node.succeed( - "for o in ${builtins.concatStringsSep " " opts} ; do grep --quiet $o ${configFile} || exit 1 ; done" - )''; + grepForOptions = opts: '' + node.succeed( + "for o in ${builtins.concatStringsSep " " opts} ; do grep --quiet $o ${configFile} || exit 1 ; done" + )''; - unitName = "options-test"; - configFile = "/etc/systemd/nspawn/${unitName}.nspawn"; + unitName = "options-test"; + configFile = "/etc/systemd/nspawn/${unitName}.nspawn"; - in - { - name = "systemd-nspawn-configfile"; +in +{ + name = "systemd-nspawn-configfile"; - nodes = { - node = - { pkgs, ... }: - { - systemd.nspawn."${unitName}" = { - enable = true; + nodes = { + node = + { pkgs, ... }: + { + systemd.nspawn."${unitName}" = { + enable = true; - execConfig = optionsToConfig execOptions // { - Boot = true; - ProcessTwo = true; - NotifyReady = true; - }; + execConfig = optionsToConfig execOptions // { + Boot = true; + ProcessTwo = true; + NotifyReady = true; + }; - filesConfig = optionsToConfig filesOptions // { - ReadOnly = true; - Volatile = "state"; - PrivateUsersChown = true; - PrivateUsersOwnership = "auto"; - }; + filesConfig = optionsToConfig filesOptions // { + ReadOnly = true; + Volatile = "state"; + PrivateUsersChown = true; + PrivateUsersOwnership = "auto"; + }; - networkConfig = optionsToConfig networkOptions // { - Private = true; - VirtualEthernet = true; - }; + networkConfig = optionsToConfig networkOptions // { + Private = true; + VirtualEthernet = true; }; }; - }; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - node.wait_for_file("${configFile}") + node.wait_for_file("${configFile}") - with subtest("Test for presence of all specified options in config file"): - ${grepForOptions execOptions} - ${grepForOptions filesOptions} - ${grepForOptions networkOptions} + with subtest("Test for presence of all specified options in config file"): + ${grepForOptions execOptions} + ${grepForOptions filesOptions} + ${grepForOptions networkOptions} - with subtest("Test for absence of misspelled option 'MachineId' (instead of 'MachineID')"): - node.fail("grep --quiet MachineId ${configFile}") - ''; + with subtest("Test for absence of misspelled option 'MachineId' (instead of 'MachineID')"): + node.fail("grep --quiet MachineId ${configFile}") + ''; - meta.maintainers = [ - lib.maintainers.zi3m5f - ]; - } -) + meta.maintainers = [ + lib.maintainers.zi3m5f + ]; +} diff --git a/nixos/tests/systemd-nspawn.nix b/nixos/tests/systemd-nspawn.nix index feda47bf34ef..d54f3d8b5b31 100644 --- a/nixos/tests/systemd-nspawn.nix +++ b/nixos/tests/systemd-nspawn.nix @@ -1,68 +1,66 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - gpgKeyring = import ./common/gpg-keyring.nix { inherit pkgs; }; +{ pkgs, lib, ... }: +let + gpgKeyring = import ./common/gpg-keyring.nix { inherit pkgs; }; - nspawnImages = ( - pkgs.runCommand "localhost" - { - buildInputs = [ - pkgs.coreutils - pkgs.gnupg - ]; - } - '' - mkdir -p $out - cd $out + nspawnImages = ( + pkgs.runCommand "localhost" + { + buildInputs = [ + pkgs.coreutils + pkgs.gnupg + ]; + } + '' + mkdir -p $out + cd $out - # produce a testimage.raw - dd if=/dev/urandom of=$out/testimage.raw bs=$((1024*1024+7)) count=5 + # produce a testimage.raw + dd if=/dev/urandom of=$out/testimage.raw bs=$((1024*1024+7)) count=5 - # produce a testimage2.tar.xz, containing the hello store path - tar cvJpf testimage2.tar.xz ${pkgs.hello} + # produce a testimage2.tar.xz, containing the hello store path + tar cvJpf testimage2.tar.xz ${pkgs.hello} - # produce signature(s) - sha256sum testimage* > SHA256SUMS - export GNUPGHOME="$(mktemp -d)" - cp -R ${gpgKeyring}/* $GNUPGHOME - gpg --batch --sign --detach-sign --output SHA256SUMS.gpg SHA256SUMS - '' - ); - in - { - name = "systemd-nspawn"; + # produce signature(s) + sha256sum testimage* > SHA256SUMS + export GNUPGHOME="$(mktemp -d)" + cp -R ${gpgKeyring}/* $GNUPGHOME + gpg --batch --sign --detach-sign --output SHA256SUMS.gpg SHA256SUMS + '' + ); +in +{ + name = "systemd-nspawn"; - nodes = { - server = - { pkgs, ... }: - { - networking.firewall.allowedTCPPorts = [ 80 ]; - services.nginx = { - enable = true; - virtualHosts."server".root = nspawnImages; - }; + nodes = { + server = + { pkgs, ... }: + { + networking.firewall.allowedTCPPorts = [ 80 ]; + services.nginx = { + enable = true; + virtualHosts."server".root = nspawnImages; }; - client = - { pkgs, ... }: - { - environment.etc."systemd/import-pubring.gpg".source = "${gpgKeyring}/pubkey.gpg"; - }; - }; + }; + client = + { pkgs, ... }: + { + environment.etc."systemd/import-pubring.gpg".source = "${gpgKeyring}/pubkey.gpg"; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("nginx.service") - client.systemctl("start network-online.target") - client.wait_for_unit("network-online.target") - client.succeed("machinectl pull-raw --verify=signature http://server/testimage.raw") - client.succeed( - "cmp /var/lib/machines/testimage.raw ${nspawnImages}/testimage.raw" - ) - client.succeed("machinectl pull-tar --verify=signature http://server/testimage2.tar.xz") - client.succeed( - "cmp /var/lib/machines/testimage2/${pkgs.hello}/bin/hello ${pkgs.hello}/bin/hello" - ) - ''; - } -) + server.wait_for_unit("nginx.service") + client.systemctl("start network-online.target") + client.wait_for_unit("network-online.target") + client.succeed("machinectl pull-raw --verify=signature http://server/testimage.raw") + client.succeed( + "cmp /var/lib/machines/testimage.raw ${nspawnImages}/testimage.raw" + ) + client.succeed("machinectl pull-tar --verify=signature http://server/testimage2.tar.xz") + client.succeed( + "cmp /var/lib/machines/testimage2/${pkgs.hello}/bin/hello ${pkgs.hello}/bin/hello" + ) + ''; +} diff --git a/nixos/tests/systemd-oomd.nix b/nixos/tests/systemd-oomd.nix index 5c95c6705c71..265fabaf3a30 100644 --- a/nixos/tests/systemd-oomd.nix +++ b/nixos/tests/systemd-oomd.nix @@ -1,58 +1,56 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - { - name = "systemd-oomd"; +{ + name = "systemd-oomd"; - # This test is a simplified version of systemd's testsuite-55. - # https://github.com/systemd/systemd/blob/v251/test/units/testsuite-55.sh - nodes.machine = - { pkgs, ... }: - { - # Limit VM resource usage. - virtualisation.memorySize = 1024; - systemd.oomd.extraConfig.DefaultMemoryPressureDurationSec = "1s"; + # This test is a simplified version of systemd's testsuite-55. + # https://github.com/systemd/systemd/blob/v251/test/units/testsuite-55.sh + nodes.machine = + { pkgs, ... }: + { + # Limit VM resource usage. + virtualisation.memorySize = 1024; + systemd.oomd.extraConfig.DefaultMemoryPressureDurationSec = "1s"; - systemd.slices.workload = { - description = "Test slice for memory pressure kills"; - sliceConfig = { - MemoryAccounting = true; - ManagedOOMMemoryPressure = "kill"; - ManagedOOMMemoryPressureLimit = "10%"; - }; - }; - - systemd.services.testbloat = { - description = "Create a lot of memory pressure"; - serviceConfig = { - Slice = "workload.slice"; - MemoryHigh = "5M"; - ExecStart = "${pkgs.coreutils}/bin/tail /dev/zero"; - }; - }; - - systemd.services.testchill = { - description = "No memory pressure"; - serviceConfig = { - Slice = "workload.slice"; - MemoryHigh = "3M"; - ExecStart = "${pkgs.coreutils}/bin/sleep infinity"; - }; + systemd.slices.workload = { + description = "Test slice for memory pressure kills"; + sliceConfig = { + MemoryAccounting = true; + ManagedOOMMemoryPressure = "kill"; + ManagedOOMMemoryPressureLimit = "10%"; }; }; - testScript = '' - # Start the system. - machine.wait_for_unit("multi-user.target") - machine.succeed("oomctl") + systemd.services.testbloat = { + description = "Create a lot of memory pressure"; + serviceConfig = { + Slice = "workload.slice"; + MemoryHigh = "5M"; + ExecStart = "${pkgs.coreutils}/bin/tail /dev/zero"; + }; + }; - machine.succeed("systemctl start testchill.service") - with subtest("OOMd should kill the bad service"): - machine.fail("systemctl start --wait testbloat.service") - assert machine.get_unit_info("testbloat.service")["Result"] == "oom-kill" + systemd.services.testchill = { + description = "No memory pressure"; + serviceConfig = { + Slice = "workload.slice"; + MemoryHigh = "3M"; + ExecStart = "${pkgs.coreutils}/bin/sleep infinity"; + }; + }; + }; - with subtest("Service without memory pressure should be untouched"): - machine.require_unit_state("testchill.service", "active") - ''; - } -) + testScript = '' + # Start the system. + machine.wait_for_unit("multi-user.target") + machine.succeed("oomctl") + + machine.succeed("systemctl start testchill.service") + with subtest("OOMd should kill the bad service"): + machine.fail("systemctl start --wait testbloat.service") + assert machine.get_unit_info("testbloat.service")["Result"] == "oom-kill" + + with subtest("Service without memory pressure should be untouched"): + machine.require_unit_state("testchill.service", "active") + ''; +} diff --git a/nixos/tests/systemd-portabled.nix b/nixos/tests/systemd-portabled.nix index caece9089d22..cde16bcf3652 100644 --- a/nixos/tests/systemd-portabled.nix +++ b/nixos/tests/systemd-portabled.nix @@ -1,58 +1,56 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - demo-program = pkgs.writeShellScriptBin "demo" '' - while ${pkgs.coreutils}/bin/sleep 3; do - echo Hello World > /dev/null - done - ''; - demo-service = pkgs.writeText "demo.service" '' - [Unit] - Description=demo service - Requires=demo.socket - After=demo.socket +{ pkgs, lib, ... }: +let + demo-program = pkgs.writeShellScriptBin "demo" '' + while ${pkgs.coreutils}/bin/sleep 3; do + echo Hello World > /dev/null + done + ''; + demo-service = pkgs.writeText "demo.service" '' + [Unit] + Description=demo service + Requires=demo.socket + After=demo.socket - [Service] - Type=simple - ExecStart=${demo-program}/bin/demo - Restart=always + [Service] + Type=simple + ExecStart=${demo-program}/bin/demo + Restart=always - [Install] - WantedBy=multi-user.target - Also=demo.socket - ''; - demo-socket = pkgs.writeText "demo.socket" '' - [Unit] - Description=demo socket + [Install] + WantedBy=multi-user.target + Also=demo.socket + ''; + demo-socket = pkgs.writeText "demo.socket" '' + [Unit] + Description=demo socket - [Socket] - ListenStream=/run/demo.sock - SocketMode=0666 + [Socket] + ListenStream=/run/demo.sock + SocketMode=0666 - [Install] - WantedBy=sockets.target - ''; - demo-portable = pkgs.portableService { - pname = "demo"; - version = "1.0"; - description = ''A demo "Portable Service" for a shell program built with nix''; - units = [ - demo-service - demo-socket - ]; - }; - in - { + [Install] + WantedBy=sockets.target + ''; + demo-portable = pkgs.portableService { + pname = "demo"; + version = "1.0"; + description = ''A demo "Portable Service" for a shell program built with nix''; + units = [ + demo-service + demo-socket + ]; + }; +in +{ - name = "systemd-portabled"; - nodes.machine = { }; - testScript = '' - machine.succeed("portablectl") - machine.wait_for_unit("systemd-portabled.service") - machine.succeed("portablectl attach --now --runtime ${demo-portable}/demo_1.0.raw") - machine.wait_for_unit("demo.service") - machine.succeed("portablectl detach --now --runtime demo_1.0") - machine.fail("systemctl status demo.service") - ''; - } -) + name = "systemd-portabled"; + nodes.machine = { }; + testScript = '' + machine.succeed("portablectl") + machine.wait_for_unit("systemd-portabled.service") + machine.succeed("portablectl attach --now --runtime ${demo-portable}/demo_1.0.raw") + machine.wait_for_unit("demo.service") + machine.succeed("portablectl detach --now --runtime demo_1.0") + machine.fail("systemctl status demo.service") + ''; +} diff --git a/nixos/tests/systemd-resolved.nix b/nixos/tests/systemd-resolved.nix index 6256b7f2e675..25b926c75fdc 100644 --- a/nixos/tests/systemd-resolved.nix +++ b/nixos/tests/systemd-resolved.nix @@ -1,93 +1,91 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "systemd-resolved"; - meta.maintainers = [ lib.maintainers.elvishjerricco ]; +{ pkgs, lib, ... }: +{ + name = "systemd-resolved"; + meta.maintainers = [ lib.maintainers.elvishjerricco ]; - nodes.server = - { lib, config, ... }: - let - exampleZone = pkgs.writeTextDir "example.com.zone" '' - @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800 - @ A ${(lib.head config.networking.interfaces.eth1.ipv4.addresses).address} - @ AAAA ${(lib.head config.networking.interfaces.eth1.ipv6.addresses).address} - ''; - in - { - networking.firewall.enable = false; - networking.useDHCP = false; - - networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ - { - address = "fd00::1"; - prefixLength = 64; - } - ]; - - services.knot = { - enable = true; - settings = { - server.listen = [ - "0.0.0.0@53" - "::@53" - ]; - template.default.storage = exampleZone; - zone."example.com".file = "example.com.zone"; - }; - }; - }; - - nodes.client = - { nodes, ... }: - let - inherit (lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses) address; - in - { - networking.nameservers = [ address ]; - networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ - { - address = "fd00::2"; - prefixLength = 64; - } - ]; - services.resolved.enable = true; - services.resolved.fallbackDns = [ ]; - networking.useNetworkd = true; - networking.useDHCP = false; - systemd.network.networks."40-eth0".enable = false; - - testing.initrdBackdoor = true; - boot.initrd = { - systemd.enable = true; - systemd.initrdBin = [ pkgs.iputils ]; - network.enable = true; - services.resolved.enable = true; - }; - }; - - testScript = - { nodes, ... }: - let - address4 = (lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses).address; - address6 = (lib.head nodes.server.networking.interfaces.eth1.ipv6.addresses).address; - in - '' - start_all() - server.wait_for_unit("multi-user.target") - - def test_client(): - query = client.succeed("resolvectl query example.com") - assert "${address4}" in query - assert "${address6}" in query - client.succeed("ping -4 -c 1 example.com") - client.succeed("ping -6 -c 1 example.com") - - client.wait_for_unit("initrd.target") - test_client() - client.switch_root() - - client.wait_for_unit("multi-user.target") - test_client() + nodes.server = + { lib, config, ... }: + let + exampleZone = pkgs.writeTextDir "example.com.zone" '' + @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800 + @ A ${(lib.head config.networking.interfaces.eth1.ipv4.addresses).address} + @ AAAA ${(lib.head config.networking.interfaces.eth1.ipv6.addresses).address} ''; - } -) + in + { + networking.firewall.enable = false; + networking.useDHCP = false; + + networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ + { + address = "fd00::1"; + prefixLength = 64; + } + ]; + + services.knot = { + enable = true; + settings = { + server.listen = [ + "0.0.0.0@53" + "::@53" + ]; + template.default.storage = exampleZone; + zone."example.com".file = "example.com.zone"; + }; + }; + }; + + nodes.client = + { nodes, ... }: + let + inherit (lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses) address; + in + { + networking.nameservers = [ address ]; + networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ + { + address = "fd00::2"; + prefixLength = 64; + } + ]; + services.resolved.enable = true; + services.resolved.fallbackDns = [ ]; + networking.useNetworkd = true; + networking.useDHCP = false; + systemd.network.networks."40-eth0".enable = false; + + testing.initrdBackdoor = true; + boot.initrd = { + systemd.enable = true; + systemd.initrdBin = [ pkgs.iputils ]; + network.enable = true; + services.resolved.enable = true; + }; + }; + + testScript = + { nodes, ... }: + let + address4 = (lib.head nodes.server.networking.interfaces.eth1.ipv4.addresses).address; + address6 = (lib.head nodes.server.networking.interfaces.eth1.ipv6.addresses).address; + in + '' + start_all() + server.wait_for_unit("multi-user.target") + + def test_client(): + query = client.succeed("resolvectl query example.com") + assert "${address4}" in query + assert "${address6}" in query + client.succeed("ping -4 -c 1 example.com") + client.succeed("ping -6 -c 1 example.com") + + client.wait_for_unit("initrd.target") + test_client() + client.switch_root() + + client.wait_for_unit("multi-user.target") + test_client() + ''; +} diff --git a/nixos/tests/systemd-shutdown.nix b/nixos/tests/systemd-shutdown.nix index fa0105cb90cf..56f9094c681c 100644 --- a/nixos/tests/systemd-shutdown.nix +++ b/nixos/tests/systemd-shutdown.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { - pkgs, - systemdStage1 ? false, - ... - }: - let - msg = "Shutting down NixOS"; - in - { - name = "systemd-shutdown"; - meta = with pkgs.lib.maintainers; { - maintainers = [ das_j ]; - }; +{ + pkgs, + systemdStage1 ? false, + ... +}: +let + msg = "Shutting down NixOS"; +in +{ + name = "systemd-shutdown"; + meta = with pkgs.lib.maintainers; { + maintainers = [ das_j ]; + }; - nodes.machine = { - imports = [ ../modules/profiles/minimal.nix ]; - systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/shutdown-message".source = - pkgs.writeShellScript "shutdown-message" '' - echo "${msg}" > /dev/kmsg - ''; - boot.initrd.systemd.enable = systemdStage1; - }; + nodes.machine = { + imports = [ ../modules/profiles/minimal.nix ]; + systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/shutdown-message".source = + pkgs.writeShellScript "shutdown-message" '' + echo "${msg}" > /dev/kmsg + ''; + boot.initrd.systemd.enable = systemdStage1; + }; - testScript = '' - # Check that 'generate-shutdown-ramfs.service' is started - # automatically and that 'systemd-shutdown' runs our script. - machine.wait_for_unit("multi-user.target") - # .shutdown() would wait for the machine to power off - machine.succeed("systemctl poweroff") - # Message printed by systemd-shutdown - machine.wait_for_console_text("Unmounting '/oldroot'") - machine.wait_for_console_text("${msg}") - # Don't try to sync filesystems - machine.wait_for_shutdown() + testScript = '' + # Check that 'generate-shutdown-ramfs.service' is started + # automatically and that 'systemd-shutdown' runs our script. + machine.wait_for_unit("multi-user.target") + # .shutdown() would wait for the machine to power off + machine.succeed("systemctl poweroff") + # Message printed by systemd-shutdown + machine.wait_for_console_text("Unmounting '/oldroot'") + machine.wait_for_console_text("${msg}") + # Don't try to sync filesystems + machine.wait_for_shutdown() - # In a separate boot, start 'generate-shutdown-ramfs.service' - # manually in order to check the permissions on '/run/initramfs'. - machine.systemctl("start generate-shutdown-ramfs.service") - stat = machine.succeed("stat --printf=%a:%u:%g /run/initramfs") - assert stat == "700:0:0", f"Improper permissions on /run/initramfs: {stat}" - ''; - } -) + # In a separate boot, start 'generate-shutdown-ramfs.service' + # manually in order to check the permissions on '/run/initramfs'. + machine.systemctl("start generate-shutdown-ramfs.service") + stat = machine.succeed("stat --printf=%a:%u:%g /run/initramfs") + assert stat == "700:0:0", f"Improper permissions on /run/initramfs: {stat}" + ''; +} diff --git a/nixos/tests/systemd-timesyncd-nscd-dnssec.nix b/nixos/tests/systemd-timesyncd-nscd-dnssec.nix index fc3002dbf51a..df30751b507e 100644 --- a/nixos/tests/systemd-timesyncd-nscd-dnssec.nix +++ b/nixos/tests/systemd-timesyncd-nscd-dnssec.nix @@ -13,57 +13,55 @@ # server running. For this test to succeed, we only need to ensure that systemd-timesyncd # resolves the IP address of the fake.ntp host. -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - ntpHostname = "fake.ntp"; - ntpIP = "192.0.2.1"; - in - { - name = "systemd-timesyncd"; - nodes.machine = - { - pkgs, - lib, - config, - ... - }: - let - eth1IP = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address; - in - { - # Setup a local DNS server for the NTP domain on the eth1 IP address - services.tinydns = { - enable = true; - ip = eth1IP; - data = '' - .ntp:${eth1IP} - +.${ntpHostname}:${ntpIP} - ''; - }; - - # Enable systemd-resolved with DNSSEC and use the local DNS as a name server - services.resolved.enable = true; - services.resolved.dnssec = "true"; - networking.nameservers = [ eth1IP ]; - - # Configure systemd-timesyncd to use our NTP hostname - services.timesyncd.enable = lib.mkForce true; - services.timesyncd.servers = [ ntpHostname ]; - services.timesyncd.extraConfig = '' - FallbackNTP=${ntpHostname} +let + ntpHostname = "fake.ntp"; + ntpIP = "192.0.2.1"; +in +{ + name = "systemd-timesyncd"; + nodes.machine = + { + pkgs, + lib, + config, + ... + }: + let + eth1IP = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address; + in + { + # Setup a local DNS server for the NTP domain on the eth1 IP address + services.tinydns = { + enable = true; + ip = eth1IP; + data = '' + .ntp:${eth1IP} + +.${ntpHostname}:${ntpIP} ''; - - # The debug output is necessary to determine whether systemd-timesyncd successfully resolves our NTP hostname or not - systemd.services.systemd-timesyncd.environment.SYSTEMD_LOG_LEVEL = "debug"; }; - testScript = '' - machine.wait_for_unit("tinydns.service") - machine.wait_for_unit("systemd-timesyncd.service") - machine.fail("resolvectl query ${ntpHostname}") - machine.wait_until_succeeds("journalctl -u systemd-timesyncd.service --grep='Resolved address ${ntpIP}:123 for ${ntpHostname}'") - ''; - } -) + # Enable systemd-resolved with DNSSEC and use the local DNS as a name server + services.resolved.enable = true; + services.resolved.dnssec = "true"; + networking.nameservers = [ eth1IP ]; + + # Configure systemd-timesyncd to use our NTP hostname + services.timesyncd.enable = lib.mkForce true; + services.timesyncd.servers = [ ntpHostname ]; + services.timesyncd.extraConfig = '' + FallbackNTP=${ntpHostname} + ''; + + # The debug output is necessary to determine whether systemd-timesyncd successfully resolves our NTP hostname or not + systemd.services.systemd-timesyncd.environment.SYSTEMD_LOG_LEVEL = "debug"; + }; + + testScript = '' + machine.wait_for_unit("tinydns.service") + machine.wait_for_unit("systemd-timesyncd.service") + machine.fail("resolvectl query ${ntpHostname}") + machine.wait_until_succeeds("journalctl -u systemd-timesyncd.service --grep='Resolved address ${ntpIP}:123 for ${ntpHostname}'") + ''; +} diff --git a/nixos/tests/systemd-timesyncd.nix b/nixos/tests/systemd-timesyncd.nix index b1f618d947cd..83a3d820c50b 100644 --- a/nixos/tests/systemd-timesyncd.nix +++ b/nixos/tests/systemd-timesyncd.nix @@ -1,72 +1,70 @@ # Regression test for systemd-timesync having moved the state directory without # upstream providing a migration path. https://github.com/systemd/systemd/issues/12131 -import ./make-test-python.nix ( - let - common = +let + common = + { lib, ... }: + { + # override the `false` value from the qemu-vm base profile + services.timesyncd.enable = lib.mkForce true; + }; + mkVM = conf: { + imports = [ + conf + common + ]; + }; +in +{ + name = "systemd-timesyncd"; + nodes = { + current = mkVM { }; + pre1909 = mkVM ( { lib, ... }: { - # override the `false` value from the qemu-vm base profile - services.timesyncd.enable = lib.mkForce true; - }; - mkVM = conf: { - imports = [ - conf - common - ]; - }; - in - { - name = "systemd-timesyncd"; - nodes = { - current = mkVM { }; - pre1909 = mkVM ( - { lib, ... }: - { - # create the path that should be migrated by our activation script when - # upgrading to a newer nixos version - system.stateVersion = "19.03"; - systemd.services.old-timesync-state-dir = { - requiredBy = [ "sysinit.target" ]; - before = [ "systemd-timesyncd.service" ]; - after = [ "local-fs.target" ]; - unitConfig.DefaultDependencies = false; - serviceConfig.Type = "oneshot"; - script = '' - rm -rf /var/lib/systemd/timesync - mkdir -p /var/lib/systemd /var/lib/private/systemd/timesync - ln -s /var/lib/private/systemd/timesync /var/lib/systemd/timesync - chown systemd-timesync: /var/lib/private/systemd/timesync - ''; - }; - } - ); - }; + # create the path that should be migrated by our activation script when + # upgrading to a newer nixos version + system.stateVersion = "19.03"; + systemd.services.old-timesync-state-dir = { + requiredBy = [ "sysinit.target" ]; + before = [ "systemd-timesyncd.service" ]; + after = [ "local-fs.target" ]; + unitConfig.DefaultDependencies = false; + serviceConfig.Type = "oneshot"; + script = '' + rm -rf /var/lib/systemd/timesync + mkdir -p /var/lib/systemd /var/lib/private/systemd/timesync + ln -s /var/lib/private/systemd/timesync /var/lib/systemd/timesync + chown systemd-timesync: /var/lib/private/systemd/timesync + ''; + }; + } + ); + }; - testScript = '' - start_all() - current.succeed("systemctl status systemd-timesyncd.service") - # on a new install with a recent systemd there should not be any - # leftovers from the dynamic user mess - current.succeed("test -e /var/lib/systemd/timesync") - current.succeed("test ! -L /var/lib/systemd/timesync") + testScript = '' + start_all() + current.succeed("systemctl status systemd-timesyncd.service") + # on a new install with a recent systemd there should not be any + # leftovers from the dynamic user mess + current.succeed("test -e /var/lib/systemd/timesync") + current.succeed("test ! -L /var/lib/systemd/timesync") - # timesyncd should be running on the upgrading system since we fixed the - # file bits in the activation script - pre1909.succeed("systemctl status systemd-timesyncd.service") + # timesyncd should be running on the upgrading system since we fixed the + # file bits in the activation script + pre1909.succeed("systemctl status systemd-timesyncd.service") - # the path should be gone after the migration - pre1909.succeed("test ! -e /var/lib/private/systemd/timesync") + # the path should be gone after the migration + pre1909.succeed("test ! -e /var/lib/private/systemd/timesync") - # and the new path should no longer be a symlink - pre1909.succeed("test -e /var/lib/systemd/timesync") - pre1909.succeed("test ! -L /var/lib/systemd/timesync") + # and the new path should no longer be a symlink + pre1909.succeed("test -e /var/lib/systemd/timesync") + pre1909.succeed("test ! -L /var/lib/systemd/timesync") - # after a restart things should still work and not fail in the activation - # scripts and cause the boot to fail.. - pre1909.shutdown() - pre1909.start() - pre1909.succeed("systemctl status systemd-timesyncd.service") - ''; - } -) + # after a restart things should still work and not fail in the activation + # scripts and cause the boot to fail.. + pre1909.shutdown() + pre1909.start() + pre1909.succeed("systemctl status systemd-timesyncd.service") + ''; +} diff --git a/nixos/tests/systemd-user-linger.nix b/nixos/tests/systemd-user-linger.nix index 2c3d71668979..32094c74fa56 100644 --- a/nixos/tests/systemd-user-linger.nix +++ b/nixos/tests/systemd-user-linger.nix @@ -1,39 +1,37 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "systemd-user-linger"; +{ lib, ... }: +{ + name = "systemd-user-linger"; - nodes.machine = - { ... }: - { - users.users = { - alice = { - isNormalUser = true; - linger = true; - uid = 1000; - }; + nodes.machine = + { ... }: + { + users.users = { + alice = { + isNormalUser = true; + linger = true; + uid = 1000; + }; - bob = { - isNormalUser = true; - linger = false; - uid = 10001; - }; + bob = { + isNormalUser = true; + linger = false; + uid = 10001; }; }; + }; - testScript = - { ... }: - '' - machine.wait_for_file("/var/lib/systemd/linger/alice") - machine.succeed("systemctl status user-1000.slice") + testScript = + { ... }: + '' + machine.wait_for_file("/var/lib/systemd/linger/alice") + machine.succeed("systemctl status user-1000.slice") - machine.fail("test -e /var/lib/systemd/linger/bob") - machine.fail("systemctl status user-1001.slice") + machine.fail("test -e /var/lib/systemd/linger/bob") + machine.fail("systemctl status user-1001.slice") - with subtest("missing users have linger purged"): - machine.succeed("touch /var/lib/systemd/linger/missing") - machine.systemctl("restart linger-users") - machine.succeed("test ! -e /var/lib/systemd/linger/missing") - ''; - } -) + with subtest("missing users have linger purged"): + machine.succeed("touch /var/lib/systemd/linger/missing") + machine.systemctl("restart linger-users") + machine.succeed("test ! -e /var/lib/systemd/linger/missing") + ''; +} diff --git a/nixos/tests/systemd-user-tmpfiles-rules.nix b/nixos/tests/systemd-user-tmpfiles-rules.nix index 6e11e9d4e5f6..c74a52c4f169 100644 --- a/nixos/tests/systemd-user-tmpfiles-rules.nix +++ b/nixos/tests/systemd-user-tmpfiles-rules.nix @@ -1,42 +1,40 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "systemd-user-tmpfiles-rules"; +{ lib, ... }: +{ + name = "systemd-user-tmpfiles-rules"; - meta = with lib.maintainers; { - maintainers = [ schnusch ]; - }; + meta = with lib.maintainers; { + maintainers = [ schnusch ]; + }; - nodes.machine = - { ... }: - { - users.users = { - alice.isNormalUser = true; - bob.isNormalUser = true; - }; - - systemd.user.tmpfiles = { - rules = [ - "d %h/user_tmpfiles_created" - ]; - users.alice.rules = [ - "d %h/only_alice" - ]; - }; + nodes.machine = + { ... }: + { + users.users = { + alice.isNormalUser = true; + bob.isNormalUser = true; }; - testScript = - { ... }: - '' - machine.succeed("loginctl enable-linger alice bob") + systemd.user.tmpfiles = { + rules = [ + "d %h/user_tmpfiles_created" + ]; + users.alice.rules = [ + "d %h/only_alice" + ]; + }; + }; - machine.wait_until_succeeds("systemctl --user --machine=alice@ is-active systemd-tmpfiles-setup.service") - machine.succeed("[ -d ~alice/user_tmpfiles_created ]") - machine.succeed("[ -d ~alice/only_alice ]") + testScript = + { ... }: + '' + machine.succeed("loginctl enable-linger alice bob") - machine.wait_until_succeeds("systemctl --user --machine=bob@ is-active systemd-tmpfiles-setup.service") - machine.succeed("[ -d ~bob/user_tmpfiles_created ]") - machine.succeed("[ ! -e ~bob/only_alice ]") - ''; - } -) + machine.wait_until_succeeds("systemctl --user --machine=alice@ is-active systemd-tmpfiles-setup.service") + machine.succeed("[ -d ~alice/user_tmpfiles_created ]") + machine.succeed("[ -d ~alice/only_alice ]") + + machine.wait_until_succeeds("systemctl --user --machine=bob@ is-active systemd-tmpfiles-setup.service") + machine.succeed("[ -d ~bob/user_tmpfiles_created ]") + machine.succeed("[ ! -e ~bob/only_alice ]") + ''; +} diff --git a/nixos/tests/systemd-userdbd.nix b/nixos/tests/systemd-userdbd.nix index 75fe469ed963..4a505432e321 100644 --- a/nixos/tests/systemd-userdbd.nix +++ b/nixos/tests/systemd-userdbd.nix @@ -1,37 +1,35 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "systemd-userdbd"; - nodes.machine = - { config, pkgs, ... }: - { - services.userdbd.enable = true; +{ pkgs, lib, ... }: +{ + name = "systemd-userdbd"; + nodes.machine = + { config, pkgs, ... }: + { + services.userdbd.enable = true; - users.users.test-user-nss = { - isNormalUser = true; - }; - - environment.etc."userdb/test-user-dropin.user".text = builtins.toJSON { - userName = "test-user-dropin"; - }; - - environment.systemPackages = with pkgs; [ libvarlink ]; + users.users.test-user-nss = { + isNormalUser = true; }; - testScript = '' - import json - from shlex import quote - def getUserRecord(name): - Interface = "unix:/run/systemd/userdb/io.systemd.Multiplexer/io.systemd.UserDatabase" - payload = json.dumps({ - "service": "io.systemd.Multiplexer", - "userName": name - }) - return json.loads(machine.succeed(f"varlink call {Interface}.GetUserRecord {quote(payload)}")) + environment.etc."userdb/test-user-dropin.user".text = builtins.toJSON { + userName = "test-user-dropin"; + }; - machine.wait_for_unit("systemd-userdbd.socket") - getUserRecord("test-user-nss") - getUserRecord("test-user-dropin") - ''; - } -) + environment.systemPackages = with pkgs; [ libvarlink ]; + }; + testScript = '' + import json + from shlex import quote + + def getUserRecord(name): + Interface = "unix:/run/systemd/userdb/io.systemd.Multiplexer/io.systemd.UserDatabase" + payload = json.dumps({ + "service": "io.systemd.Multiplexer", + "userName": name + }) + return json.loads(machine.succeed(f"varlink call {Interface}.GetUserRecord {quote(payload)}")) + + machine.wait_for_unit("systemd-userdbd.socket") + getUserRecord("test-user-nss") + getUserRecord("test-user-dropin") + ''; +} diff --git a/nixos/tests/systemd.nix b/nixos/tests/systemd.nix index 9e01aedca406..f2ea77f2ed48 100644 --- a/nixos/tests/systemd.nix +++ b/nixos/tests/systemd.nix @@ -1,252 +1,250 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "systemd"; +{ pkgs, ... }: +{ + name = "systemd"; - nodes.machine = - { config, lib, ... }: - { - imports = [ - common/user-account.nix - common/x11.nix - ]; + nodes.machine = + { config, lib, ... }: + { + imports = [ + common/user-account.nix + common/x11.nix + ]; - virtualisation.emptyDiskImages = [ - 512 - 512 - ]; + virtualisation.emptyDiskImages = [ + 512 + 512 + ]; - environment.systemPackages = [ pkgs.cryptsetup ]; + environment.systemPackages = [ pkgs.cryptsetup ]; - virtualisation.fileSystems = { - "/test-x-initrd-mount" = { - device = "/dev/vdb"; - fsType = "ext2"; - autoFormat = true; - noCheck = true; - options = [ "x-initrd.mount" ]; - }; + virtualisation.fileSystems = { + "/test-x-initrd-mount" = { + device = "/dev/vdb"; + fsType = "ext2"; + autoFormat = true; + noCheck = true; + options = [ "x-initrd.mount" ]; }; + }; - systemd.extraConfig = "DefaultEnvironment=\"XXX_SYSTEM=foo\""; - systemd.user.extraConfig = "DefaultEnvironment=\"XXX_USER=bar\""; - services.journald.extraConfig = "Storage=volatile"; - test-support.displayManager.auto.user = "alice"; + systemd.extraConfig = "DefaultEnvironment=\"XXX_SYSTEM=foo\""; + systemd.user.extraConfig = "DefaultEnvironment=\"XXX_USER=bar\""; + services.journald.extraConfig = "Storage=volatile"; + test-support.displayManager.auto.user = "alice"; - systemd.shutdown.test = pkgs.writeScript "test.shutdown" '' - #!${pkgs.runtimeShell} - PATH=${ - lib.makeBinPath ( - with pkgs; - [ - util-linux - coreutils - ] - ) - } - mount -t 9p shared -o trans=virtio,version=9p2000.L /tmp/shared - touch /tmp/shared/shutdown-test - umount /tmp/shared - ''; + systemd.shutdown.test = pkgs.writeScript "test.shutdown" '' + #!${pkgs.runtimeShell} + PATH=${ + lib.makeBinPath ( + with pkgs; + [ + util-linux + coreutils + ] + ) + } + mount -t 9p shared -o trans=virtio,version=9p2000.L /tmp/shared + touch /tmp/shared/shutdown-test + umount /tmp/shared + ''; - systemd.services.oncalendar-test = { - description = "calendar test"; - # Japan does not have DST which makes the test a little bit simpler - startAt = "Wed 10:00 Asia/Tokyo"; - script = "true"; - }; + systemd.services.oncalendar-test = { + description = "calendar test"; + # Japan does not have DST which makes the test a little bit simpler + startAt = "Wed 10:00 Asia/Tokyo"; + script = "true"; + }; - systemd.services.testDependency1 = { - description = "Test Dependency 1"; - wantedBy = [ config.systemd.services."testservice1".name ]; - serviceConfig.Type = "oneshot"; - script = '' - true - ''; - }; - - systemd.services.testservice1 = { - description = "Test Service 1"; - wantedBy = [ config.systemd.targets.multi-user.name ]; - serviceConfig.Type = "oneshot"; - script = '' - if [ "$XXX_SYSTEM" = foo ]; then - touch /system_conf_read - fi - ''; - }; - - systemd.user.services.testservice2 = { - description = "Test Service 2"; - wantedBy = [ "default.target" ]; - serviceConfig.Type = "oneshot"; - script = '' - if [ "$XXX_USER" = bar ]; then - touch "$HOME/user_conf_read" - fi - ''; - }; - - systemd.watchdog = { - device = "/dev/watchdog"; - runtimeTime = "30s"; - rebootTime = "10min"; - kexecTime = "5min"; - }; - - environment.etc."systemd/system-preset/10-testservice.preset".text = '' - disable ${config.systemd.services.testservice1.name} + systemd.services.testDependency1 = { + description = "Test Dependency 1"; + wantedBy = [ config.systemd.services."testservice1".name ]; + serviceConfig.Type = "oneshot"; + script = '' + true ''; }; - testScript = - { nodes, ... }: - '' - import re - import subprocess + systemd.services.testservice1 = { + description = "Test Service 1"; + wantedBy = [ config.systemd.targets.multi-user.name ]; + serviceConfig.Type = "oneshot"; + script = '' + if [ "$XXX_SYSTEM" = foo ]; then + touch /system_conf_read + fi + ''; + }; - machine.start(allow_reboot=True) + systemd.user.services.testservice2 = { + description = "Test Service 2"; + wantedBy = [ "default.target" ]; + serviceConfig.Type = "oneshot"; + script = '' + if [ "$XXX_USER" = bar ]; then + touch "$HOME/user_conf_read" + fi + ''; + }; - # Will not succeed unless ConditionFirstBoot=yes - machine.wait_for_unit("first-boot-complete.target") + systemd.watchdog = { + device = "/dev/watchdog"; + runtimeTime = "30s"; + rebootTime = "10min"; + kexecTime = "5min"; + }; - # Make sure, a subsequent boot isn't a ConditionFirstBoot=yes. - machine.reboot() - machine.wait_for_x() - state = machine.get_unit_info("first-boot-complete.target")['ActiveState'] - assert state == 'inactive', "Detected first boot despite first-boot-completed.target was already reached on a previous boot." - - # wait for user services - machine.wait_for_unit("default.target", "alice") - - with subtest("systemctl edit suggests --runtime"): - # --runtime is suggested when using `systemctl edit` - ret, out = machine.execute("systemctl edit testservice1.service 2>&1") - assert ret == 1 - assert out.rstrip("\n") == "The unit-directory '/etc/systemd/system' is read-only on NixOS, so it's not possible to edit system-units directly. Use 'systemctl edit --runtime' instead." - # editing w/o `--runtime` is possible for user-services, however - # it's not possible because we're not in a tty when grepping - # (i.e. hacky way to ensure that the error from above doesn't appear here). - _, out = machine.execute("systemctl --user edit testservice2.service 2>&1") - assert out.rstrip("\n") == "Cannot edit units interactively if not on a tty." - - # Regression test for https://github.com/NixOS/nixpkgs/issues/105049 - with subtest("systemd reads timezone database in /etc/zoneinfo"): - timer = machine.succeed("TZ=UTC systemctl show --property=TimersCalendar oncalendar-test.timer") - assert re.search("next_elapse=Wed ....-..-.. 01:00:00 UTC", timer), f"got {timer.strip()}" - - # Regression test for https://github.com/NixOS/nixpkgs/issues/35415 - with subtest("configuration files are recognized by systemd"): - machine.succeed("test -e /system_conf_read") - machine.succeed("test -e /home/alice/user_conf_read") - machine.succeed("test -z $(ls -1 /var/log/journal)") - - with subtest("regression test for https://bugs.freedesktop.org/show_bug.cgi?id=77507"): - retcode, output = machine.execute("systemctl status testservice1.service") - assert retcode in [0, 3] # https://bugs.freedesktop.org/show_bug.cgi?id=77507 - - # Regression test for https://github.com/NixOS/nixpkgs/issues/35268 - with subtest("file system with x-initrd.mount is not unmounted"): - machine.succeed("mountpoint -q /test-x-initrd-mount") - machine.shutdown() - - subprocess.check_call( - [ - "qemu-img", - "convert", - "-O", - "raw", - "vm-state-machine/empty0.qcow2", - "x-initrd-mount.raw", - ] - ) - extinfo = subprocess.check_output( - [ - "${pkgs.e2fsprogs}/bin/dumpe2fs", - "x-initrd-mount.raw", - ] - ).decode("utf-8") - assert ( - re.search(r"^Filesystem state: *clean$", extinfo, re.MULTILINE) is not None - ), ("File system was not cleanly unmounted: " + extinfo) - - # Regression test for https://github.com/NixOS/nixpkgs/pull/91232 - with subtest("setting transient hostnames works"): - machine.succeed("hostnamectl set-hostname --transient machine-transient") - machine.fail("hostnamectl set-hostname machine-all") - - with subtest("systemd-shutdown works"): - machine.shutdown() - machine.wait_for_unit("multi-user.target") - machine.succeed("test -e /tmp/shared/shutdown-test") - - # Test settings from /etc/sysctl.d/50-default.conf are applied - with subtest("systemd sysctl settings are applied"): - machine.wait_for_unit("multi-user.target") - assert "fq_codel" in machine.succeed("sysctl net.core.default_qdisc") - - # Test systemd is configured to manage a watchdog - with subtest("systemd manages hardware watchdog"): - machine.wait_for_unit("multi-user.target") - - # It seems that the device's path doesn't appear in 'systemctl show' so - # check it separately. - assert "WatchdogDevice=/dev/watchdog" in machine.succeed( - "cat /etc/systemd/system.conf" - ) - - output = machine.succeed("systemctl show | grep Watchdog") - # assert "RuntimeWatchdogUSec=30s" in output - # for some reason RuntimeWatchdogUSec, doesn't seem to be updated in here. - assert "RebootWatchdogUSec=10min" in output - assert "KExecWatchdogUSec=5min" in output - - # Test systemd cryptsetup support - with subtest("systemd successfully reads /etc/crypttab and unlocks volumes"): - # create a luks volume and put a filesystem on it - machine.succeed( - "echo -n supersecret | cryptsetup luksFormat -q /dev/vdc -", - "echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vdc foo", - "mkfs.ext3 /dev/mapper/foo", - ) - - # create a keyfile and /etc/crypttab - machine.succeed("echo -n supersecret > /var/lib/luks-keyfile") - machine.succeed("chmod 600 /var/lib/luks-keyfile") - machine.succeed("echo 'luks1 /dev/vdc /var/lib/luks-keyfile luks' > /etc/crypttab") - - # after a reboot, systemd should unlock the volume and we should be able to mount it - machine.shutdown() - machine.succeed("systemctl status systemd-cryptsetup@luks1.service") - machine.succeed("mkdir -p /tmp/luks1") - machine.succeed("mount /dev/mapper/luks1 /tmp/luks1") - - # Do some IP traffic - output_ping = machine.succeed( - "systemd-run --wait -- ping -c 1 127.0.0.1 2>&1" - ) - - with subtest("systemd reports accounting data on system.slice"): - output = machine.succeed("systemctl status system.slice") - assert "CPU:" in output - assert "Memory:" in output - - assert "IP:" in output - assert "0B in, 0B out" not in output - - assert "IO:" in output - assert "0B read, 0B written" not in output - - with subtest("systemd per-unit accounting works"): - assert "IP traffic received: 84B sent: 84B" in output_ping - - with subtest("systemd environment is properly set"): - machine.systemctl("daemon-reexec") # Rewrites /proc/1/environ - machine.succeed("grep -q TZDIR=/etc/zoneinfo /proc/1/environ") - - with subtest("systemd presets are ignored"): - machine.succeed("systemctl preset ${nodes.machine.systemd.services.testservice1.name}") - machine.succeed("test -e /etc/systemd/system/${nodes.machine.systemd.services.testservice1.name}") + environment.etc."systemd/system-preset/10-testservice.preset".text = '' + disable ${config.systemd.services.testservice1.name} ''; - } -) + }; + + testScript = + { nodes, ... }: + '' + import re + import subprocess + + machine.start(allow_reboot=True) + + # Will not succeed unless ConditionFirstBoot=yes + machine.wait_for_unit("first-boot-complete.target") + + # Make sure, a subsequent boot isn't a ConditionFirstBoot=yes. + machine.reboot() + machine.wait_for_x() + state = machine.get_unit_info("first-boot-complete.target")['ActiveState'] + assert state == 'inactive', "Detected first boot despite first-boot-completed.target was already reached on a previous boot." + + # wait for user services + machine.wait_for_unit("default.target", "alice") + + with subtest("systemctl edit suggests --runtime"): + # --runtime is suggested when using `systemctl edit` + ret, out = machine.execute("systemctl edit testservice1.service 2>&1") + assert ret == 1 + assert out.rstrip("\n") == "The unit-directory '/etc/systemd/system' is read-only on NixOS, so it's not possible to edit system-units directly. Use 'systemctl edit --runtime' instead." + # editing w/o `--runtime` is possible for user-services, however + # it's not possible because we're not in a tty when grepping + # (i.e. hacky way to ensure that the error from above doesn't appear here). + _, out = machine.execute("systemctl --user edit testservice2.service 2>&1") + assert out.rstrip("\n") == "Cannot edit units interactively if not on a tty." + + # Regression test for https://github.com/NixOS/nixpkgs/issues/105049 + with subtest("systemd reads timezone database in /etc/zoneinfo"): + timer = machine.succeed("TZ=UTC systemctl show --property=TimersCalendar oncalendar-test.timer") + assert re.search("next_elapse=Wed ....-..-.. 01:00:00 UTC", timer), f"got {timer.strip()}" + + # Regression test for https://github.com/NixOS/nixpkgs/issues/35415 + with subtest("configuration files are recognized by systemd"): + machine.succeed("test -e /system_conf_read") + machine.succeed("test -e /home/alice/user_conf_read") + machine.succeed("test -z $(ls -1 /var/log/journal)") + + with subtest("regression test for https://bugs.freedesktop.org/show_bug.cgi?id=77507"): + retcode, output = machine.execute("systemctl status testservice1.service") + assert retcode in [0, 3] # https://bugs.freedesktop.org/show_bug.cgi?id=77507 + + # Regression test for https://github.com/NixOS/nixpkgs/issues/35268 + with subtest("file system with x-initrd.mount is not unmounted"): + machine.succeed("mountpoint -q /test-x-initrd-mount") + machine.shutdown() + + subprocess.check_call( + [ + "qemu-img", + "convert", + "-O", + "raw", + "vm-state-machine/empty0.qcow2", + "x-initrd-mount.raw", + ] + ) + extinfo = subprocess.check_output( + [ + "${pkgs.e2fsprogs}/bin/dumpe2fs", + "x-initrd-mount.raw", + ] + ).decode("utf-8") + assert ( + re.search(r"^Filesystem state: *clean$", extinfo, re.MULTILINE) is not None + ), ("File system was not cleanly unmounted: " + extinfo) + + # Regression test for https://github.com/NixOS/nixpkgs/pull/91232 + with subtest("setting transient hostnames works"): + machine.succeed("hostnamectl set-hostname --transient machine-transient") + machine.fail("hostnamectl set-hostname machine-all") + + with subtest("systemd-shutdown works"): + machine.shutdown() + machine.wait_for_unit("multi-user.target") + machine.succeed("test -e /tmp/shared/shutdown-test") + + # Test settings from /etc/sysctl.d/50-default.conf are applied + with subtest("systemd sysctl settings are applied"): + machine.wait_for_unit("multi-user.target") + assert "fq_codel" in machine.succeed("sysctl net.core.default_qdisc") + + # Test systemd is configured to manage a watchdog + with subtest("systemd manages hardware watchdog"): + machine.wait_for_unit("multi-user.target") + + # It seems that the device's path doesn't appear in 'systemctl show' so + # check it separately. + assert "WatchdogDevice=/dev/watchdog" in machine.succeed( + "cat /etc/systemd/system.conf" + ) + + output = machine.succeed("systemctl show | grep Watchdog") + # assert "RuntimeWatchdogUSec=30s" in output + # for some reason RuntimeWatchdogUSec, doesn't seem to be updated in here. + assert "RebootWatchdogUSec=10min" in output + assert "KExecWatchdogUSec=5min" in output + + # Test systemd cryptsetup support + with subtest("systemd successfully reads /etc/crypttab and unlocks volumes"): + # create a luks volume and put a filesystem on it + machine.succeed( + "echo -n supersecret | cryptsetup luksFormat -q /dev/vdc -", + "echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vdc foo", + "mkfs.ext3 /dev/mapper/foo", + ) + + # create a keyfile and /etc/crypttab + machine.succeed("echo -n supersecret > /var/lib/luks-keyfile") + machine.succeed("chmod 600 /var/lib/luks-keyfile") + machine.succeed("echo 'luks1 /dev/vdc /var/lib/luks-keyfile luks' > /etc/crypttab") + + # after a reboot, systemd should unlock the volume and we should be able to mount it + machine.shutdown() + machine.succeed("systemctl status systemd-cryptsetup@luks1.service") + machine.succeed("mkdir -p /tmp/luks1") + machine.succeed("mount /dev/mapper/luks1 /tmp/luks1") + + # Do some IP traffic + output_ping = machine.succeed( + "systemd-run --wait -- ping -c 1 127.0.0.1 2>&1" + ) + + with subtest("systemd reports accounting data on system.slice"): + output = machine.succeed("systemctl status system.slice") + assert "CPU:" in output + assert "Memory:" in output + + assert "IP:" in output + assert "0B in, 0B out" not in output + + assert "IO:" in output + assert "0B read, 0B written" not in output + + with subtest("systemd per-unit accounting works"): + assert "IP traffic received: 84B sent: 84B" in output_ping + + with subtest("systemd environment is properly set"): + machine.systemctl("daemon-reexec") # Rewrites /proc/1/environ + machine.succeed("grep -q TZDIR=/etc/zoneinfo /proc/1/environ") + + with subtest("systemd presets are ignored"): + machine.succeed("systemctl preset ${nodes.machine.systemd.services.testservice1.name}") + machine.succeed("test -e /etc/systemd/system/${nodes.machine.systemd.services.testservice1.name}") + ''; +} diff --git a/nixos/tests/tandoor-recipes-script-name.nix b/nixos/tests/tandoor-recipes-script-name.nix index 6216d67b8084..b2a73771dddd 100644 --- a/nixos/tests/tandoor-recipes-script-name.nix +++ b/nixos/tests/tandoor-recipes-script-name.nix @@ -1,95 +1,93 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "tandoor-recipes-script-name"; +{ pkgs, lib, ... }: +{ + name = "tandoor-recipes-script-name"; - nodes.machine = - { pkgs, nodes, ... }: - { - services.tandoor-recipes = { - enable = true; - extraConfig = { - SCRIPT_NAME = "/any/path"; - STATIC_URL = "${nodes.machine.services.tandoor-recipes.extraConfig.SCRIPT_NAME}/static/"; - }; + nodes.machine = + { pkgs, nodes, ... }: + { + services.tandoor-recipes = { + enable = true; + extraConfig = { + SCRIPT_NAME = "/any/path"; + STATIC_URL = "${nodes.machine.services.tandoor-recipes.extraConfig.SCRIPT_NAME}/static/"; }; }; + }; - testScript = - { nodes, ... }: - let - inherit (nodes.machine.services.tandoor-recipes) address port; - inherit (nodes.machine.services.tandoor-recipes.extraConfig) SCRIPT_NAME; - in - '' - from html.parser import HTMLParser + testScript = + { nodes, ... }: + let + inherit (nodes.machine.services.tandoor-recipes) address port; + inherit (nodes.machine.services.tandoor-recipes.extraConfig) SCRIPT_NAME; + in + '' + from html.parser import HTMLParser - origin_url = "http://${address}:${toString port}" - base_url = f"{origin_url}${SCRIPT_NAME}" - login_path = "/admin/login/" - login_url = f"{base_url}{login_path}" + origin_url = "http://${address}:${toString port}" + base_url = f"{origin_url}${SCRIPT_NAME}" + login_path = "/admin/login/" + login_url = f"{base_url}{login_path}" - cookie_jar_path = "/tmp/cookies.txt" - curl = f"curl --cookie {cookie_jar_path} --cookie-jar {cookie_jar_path} --fail --header 'Origin: {origin_url}' --show-error --silent" + cookie_jar_path = "/tmp/cookies.txt" + curl = f"curl --cookie {cookie_jar_path} --cookie-jar {cookie_jar_path} --fail --header 'Origin: {origin_url}' --show-error --silent" - print("Wait for the service to respond") - machine.wait_for_unit("tandoor-recipes.service") - machine.wait_until_succeeds(f"{curl} {login_url}") + print("Wait for the service to respond") + machine.wait_for_unit("tandoor-recipes.service") + machine.wait_until_succeeds(f"{curl} {login_url}") - username = "username" - password = "password" + username = "username" + password = "password" - print("Create admin user") - machine.succeed( - f"DJANGO_SUPERUSER_PASSWORD='{password}' /var/lib/tandoor-recipes/tandoor-recipes-manage createsuperuser --no-input --username='{username}' --email=nobody@example.com" - ) + print("Create admin user") + machine.succeed( + f"DJANGO_SUPERUSER_PASSWORD='{password}' /var/lib/tandoor-recipes/tandoor-recipes-manage createsuperuser --no-input --username='{username}' --email=nobody@example.com" + ) - print("Get CSRF token for later requests") - csrf_token = machine.succeed(f"grep csrftoken {cookie_jar_path} | cut --fields=7").rstrip() + print("Get CSRF token for later requests") + csrf_token = machine.succeed(f"grep csrftoken {cookie_jar_path} | cut --fields=7").rstrip() - print("Log in as admin user") - machine.succeed( - f"{curl} --data 'csrfmiddlewaretoken={csrf_token}' --data 'username={username}' --data 'password={password}' {login_url}" - ) + print("Log in as admin user") + machine.succeed( + f"{curl} --data 'csrfmiddlewaretoken={csrf_token}' --data 'username={username}' --data 'password={password}' {login_url}" + ) - print("Get the contents of the logged in main page") - logged_in_page = machine.succeed(f"{curl} --location {base_url}") + print("Get the contents of the logged in main page") + logged_in_page = machine.succeed(f"{curl} --location {base_url}") - class UrlParser(HTMLParser): - def __init__(self): - super().__init__() + class UrlParser(HTMLParser): + def __init__(self): + super().__init__() - self.urls: list[str] = [] + self.urls: list[str] = [] - def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None: - if tag == "form": - for name, value in attrs: - if name == "action" and value is not None: - assert not value.endswith(login_path) - break + def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None: + if tag == "form": + for name, value in attrs: + if name == "action" and value is not None: + assert not value.endswith(login_path) + break - if tag != "a": - return + if tag != "a": + return - for name, value in attrs: - if name == "href" and value is not None: - if value.startswith(base_url): - self.urls.append(value) - elif value.startswith("/"): - self.urls.append(f"{origin_url}{value}") - else: - print(f"Ignoring non-path URL: {value}") + for name, value in attrs: + if name == "href" and value is not None: + if value.startswith(base_url): + self.urls.append(value) + elif value.startswith("/"): + self.urls.append(f"{origin_url}{value}") + else: + print(f"Ignoring non-path URL: {value}") - break + break - parser = UrlParser() - parser.feed(logged_in_page) + parser = UrlParser() + parser.feed(logged_in_page) - for url in parser.urls: - with subtest(f"Verify that {url} can be reached"): - machine.succeed(f"{curl} {url}") - ''; + for url in parser.urls: + with subtest(f"Verify that {url} can be reached"): + machine.succeed(f"{curl} {url}") + ''; - meta.maintainers = with lib.maintainers; [ l0b0 ]; - } -) + meta.maintainers = with lib.maintainers; [ l0b0 ]; +} diff --git a/nixos/tests/tandoor-recipes.nix b/nixos/tests/tandoor-recipes.nix index 536746e092b9..0b2211af22fa 100644 --- a/nixos/tests/tandoor-recipes.nix +++ b/nixos/tests/tandoor-recipes.nix @@ -1,46 +1,44 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "tandoor-recipes"; - meta.maintainers = with lib.maintainers; [ ]; +{ lib, ... }: +{ + name = "tandoor-recipes"; + meta.maintainers = with lib.maintainers; [ ]; - nodes.machine = - { pkgs, ... }: - { - services.tandoor-recipes = { - enable = true; - extraConfig = { - DB_ENGINE = "django.db.backends.postgresql"; - POSTGRES_HOST = "/run/postgresql"; - POSTGRES_USER = "tandoor_recipes"; - POSTGRES_DB = "tandoor_recipes"; - }; - }; - - services.postgresql = { - enable = true; - ensureDatabases = [ "tandoor_recipes" ]; - ensureUsers = [ - { - name = "tandoor_recipes"; - ensureDBOwnership = true; - } - ]; - }; - - systemd.services = { - tandoor-recipes = { - after = [ "postgresql.service" ]; - }; + nodes.machine = + { pkgs, ... }: + { + services.tandoor-recipes = { + enable = true; + extraConfig = { + DB_ENGINE = "django.db.backends.postgresql"; + POSTGRES_HOST = "/run/postgresql"; + POSTGRES_USER = "tandoor_recipes"; + POSTGRES_DB = "tandoor_recipes"; }; }; - testScript = '' - machine.wait_for_unit("tandoor-recipes.service") + services.postgresql = { + enable = true; + ensureDatabases = [ "tandoor_recipes" ]; + ensureUsers = [ + { + name = "tandoor_recipes"; + ensureDBOwnership = true; + } + ]; + }; - with subtest("Web interface gets ready"): - # Wait until server accepts connections - machine.wait_until_succeeds("curl -fs localhost:8080") - ''; - } -) + systemd.services = { + tandoor-recipes = { + after = [ "postgresql.service" ]; + }; + }; + }; + + testScript = '' + machine.wait_for_unit("tandoor-recipes.service") + + with subtest("Web interface gets ready"): + # Wait until server accepts connections + machine.wait_until_succeeds("curl -fs localhost:8080") + ''; +} diff --git a/nixos/tests/tang.nix b/nixos/tests/tang.nix index 58d351061791..0241b62dd3c7 100644 --- a/nixos/tests/tang.nix +++ b/nixos/tests/tang.nix @@ -1,93 +1,91 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "tang"; - meta = with pkgs.lib.maintainers; { - maintainers = [ jfroche ]; - }; +{ pkgs, ... }: +{ + name = "tang"; + meta = with pkgs.lib.maintainers; { + maintainers = [ jfroche ]; + }; - nodes.server = - { - config, - pkgs, - modulesPath, - ... - }: - { - imports = [ - "${modulesPath}/../tests/common/auto-format-root-device.nix" - ]; - virtualisation = { - emptyDiskImages = [ 512 ]; - useBootLoader = true; - useEFIBoot = true; - # This requires to have access - # to a host Nix store as - # the new root device is /dev/vdb - # an empty 512MiB drive, containing no Nix store. - mountHostNixStore = true; - }; - - boot.loader.systemd-boot.enable = true; - - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.1"; - prefixLength = 24; - } - ]; - - environment.systemPackages = with pkgs; [ - clevis - tang - cryptsetup - ]; - services.tang = { - enable = true; - ipAddressAllow = [ "127.0.0.1/32" ]; - }; + nodes.server = + { + config, + pkgs, + modulesPath, + ... + }: + { + imports = [ + "${modulesPath}/../tests/common/auto-format-root-device.nix" + ]; + virtualisation = { + emptyDiskImages = [ 512 ]; + useBootLoader = true; + useEFIBoot = true; + # This requires to have access + # to a host Nix store as + # the new root device is /dev/vdb + # an empty 512MiB drive, containing no Nix store. + mountHostNixStore = true; }; - testScript = '' - start_all() - machine.wait_for_unit("sockets.target") - with subtest("Check keys are generated"): - machine.wait_until_succeeds("curl -v http://127.0.0.1:7654/adv") - key = machine.wait_until_succeeds("tang-show-keys 7654") + boot.loader.systemd-boot.enable = true; - with subtest("Check systemd access list"): - machine.succeed("ping -c 3 192.168.0.1") - machine.fail("curl -v --connect-timeout 3 http://192.168.0.1:7654/adv") + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.1"; + prefixLength = 24; + } + ]; - with subtest("Check basic encrypt and decrypt message"): - machine.wait_until_succeeds(f"""echo 'Hello World' | clevis encrypt tang '{{ "url": "http://127.0.0.1:7654", "thp":"{key}"}}' > /tmp/encrypted""") - decrypted = machine.wait_until_succeeds("clevis decrypt < /tmp/encrypted") - assert decrypted.strip() == "Hello World" - machine.wait_until_succeeds("tang-show-keys 7654") + environment.systemPackages = with pkgs; [ + clevis + tang + cryptsetup + ]; + services.tang = { + enable = true; + ipAddressAllow = [ "127.0.0.1/32" ]; + }; + }; + testScript = '' + start_all() + machine.wait_for_unit("sockets.target") - with subtest("Check encrypt and decrypt disk"): - machine.succeed("cryptsetup luksFormat --force-password --batch-mode /dev/vdb <<<'password'") - machine.succeed(f"""clevis luks bind -s1 -y -f -d /dev/vdb tang '{{ "url": "http://127.0.0.1:7654", "thp":"{key}" }}' <<< 'password' """) - clevis_luks = machine.succeed("clevis luks list -d /dev/vdb") - assert clevis_luks.strip() == """1: tang '{"url":"http://127.0.0.1:7654"}'""" - machine.succeed("clevis luks unlock -d /dev/vdb") - machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +") - machine.succeed("clevis luks unlock -d /dev/vdb") - machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +") - # without tang available, unlock should fail - machine.succeed("systemctl stop tangd.socket") - machine.fail("clevis luks unlock -d /dev/vdb") - machine.succeed("systemctl start tangd.socket") + with subtest("Check keys are generated"): + machine.wait_until_succeeds("curl -v http://127.0.0.1:7654/adv") + key = machine.wait_until_succeeds("tang-show-keys 7654") - with subtest("Rotate server keys"): - machine.succeed("${pkgs.tang}/libexec/tangd-rotate-keys -d /var/lib/tang") - machine.succeed("clevis luks unlock -d /dev/vdb") - machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +") + with subtest("Check systemd access list"): + machine.succeed("ping -c 3 192.168.0.1") + machine.fail("curl -v --connect-timeout 3 http://192.168.0.1:7654/adv") - with subtest("Test systemd service security"): - output = machine.succeed("systemd-analyze security tangd@.service") - machine.log(output) - assert output[-9:-1] == "SAFE :-}" - ''; - } -) + with subtest("Check basic encrypt and decrypt message"): + machine.wait_until_succeeds(f"""echo 'Hello World' | clevis encrypt tang '{{ "url": "http://127.0.0.1:7654", "thp":"{key}"}}' > /tmp/encrypted""") + decrypted = machine.wait_until_succeeds("clevis decrypt < /tmp/encrypted") + assert decrypted.strip() == "Hello World" + machine.wait_until_succeeds("tang-show-keys 7654") + + with subtest("Check encrypt and decrypt disk"): + machine.succeed("cryptsetup luksFormat --force-password --batch-mode /dev/vdb <<<'password'") + machine.succeed(f"""clevis luks bind -s1 -y -f -d /dev/vdb tang '{{ "url": "http://127.0.0.1:7654", "thp":"{key}" }}' <<< 'password' """) + clevis_luks = machine.succeed("clevis luks list -d /dev/vdb") + assert clevis_luks.strip() == """1: tang '{"url":"http://127.0.0.1:7654"}'""" + machine.succeed("clevis luks unlock -d /dev/vdb") + machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +") + machine.succeed("clevis luks unlock -d /dev/vdb") + machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +") + # without tang available, unlock should fail + machine.succeed("systemctl stop tangd.socket") + machine.fail("clevis luks unlock -d /dev/vdb") + machine.succeed("systemctl start tangd.socket") + + with subtest("Rotate server keys"): + machine.succeed("${pkgs.tang}/libexec/tangd-rotate-keys -d /var/lib/tang") + machine.succeed("clevis luks unlock -d /dev/vdb") + machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +") + + with subtest("Test systemd service security"): + output = machine.succeed("systemd-analyze security tangd@.service") + machine.log(output) + assert output[-9:-1] == "SAFE :-}" + ''; +} diff --git a/nixos/tests/taskchampion-sync-server.nix b/nixos/tests/taskchampion-sync-server.nix index 659a900fd2c2..474c0bd8834e 100644 --- a/nixos/tests/taskchampion-sync-server.nix +++ b/nixos/tests/taskchampion-sync-server.nix @@ -1,49 +1,47 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "taskchampion-sync-server"; +{ ... }: +{ + name = "taskchampion-sync-server"; - nodes = { - server = { - services.taskchampion-sync-server.enable = true; - services.taskchampion-sync-server.host = "0.0.0.0"; - services.taskchampion-sync-server.openFirewall = true; - }; - client = - { pkgs, ... }: - { - environment.systemPackages = [ pkgs.taskwarrior3 ]; - }; + nodes = { + server = { + services.taskchampion-sync-server.enable = true; + services.taskchampion-sync-server.host = "0.0.0.0"; + services.taskchampion-sync-server.openFirewall = true; }; - testScript = - { nodes, ... }: - let - cfg = nodes.server.services.taskchampion-sync-server; - port = builtins.toString cfg.port; - # Generated with uuidgen - uuid = "bf01376e-04a4-435a-9263-608567531af3"; - password = "nixos-test"; - in - '' - # Explicitly start the VMs so that we don't accidentally start newServer - server.start() - client.start() + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.taskwarrior3 ]; + }; + }; + testScript = + { nodes, ... }: + let + cfg = nodes.server.services.taskchampion-sync-server; + port = builtins.toString cfg.port; + # Generated with uuidgen + uuid = "bf01376e-04a4-435a-9263-608567531af3"; + password = "nixos-test"; + in + '' + # Explicitly start the VMs so that we don't accidentally start newServer + server.start() + client.start() - server.wait_for_unit("taskchampion-sync-server.service") - server.wait_for_open_port(${port}) + server.wait_for_unit("taskchampion-sync-server.service") + server.wait_for_open_port(${port}) - # See man task-sync(5) - client.succeed("mkdir ~/.task") - client.succeed("touch ~/.taskrc") - client.succeed("echo sync.server.origin=http://server:${port} >> ~/.taskrc") - client.succeed("echo sync.server.client_id=${uuid} >> ~/.taskrc") - client.succeed("echo sync.encryption_secret=${password} >> ~/.taskrc") - client.succeed("task add hello world") - client.succeed("task sync") + # See man task-sync(5) + client.succeed("mkdir ~/.task") + client.succeed("touch ~/.taskrc") + client.succeed("echo sync.server.origin=http://server:${port} >> ~/.taskrc") + client.succeed("echo sync.server.client_id=${uuid} >> ~/.taskrc") + client.succeed("echo sync.encryption_secret=${password} >> ~/.taskrc") + client.succeed("task add hello world") + client.succeed("task sync") - # Useful for debugging - client.copy_from_vm("/root/.task", "client") - server.copy_from_vm("${cfg.dataDir}", "server") - ''; - } -) + # Useful for debugging + client.copy_from_vm("/root/.task", "client") + server.copy_from_vm("${cfg.dataDir}", "server") + ''; +} diff --git a/nixos/tests/taskserver.nix b/nixos/tests/taskserver.nix index 31dfea00435c..b1e2113f2f09 100644 --- a/nixos/tests/taskserver.nix +++ b/nixos/tests/taskserver.nix @@ -1,299 +1,297 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - snakeOil = - pkgs.runCommand "snakeoil-certs" - { - outputs = [ - "out" - "cacert" - "cert" - "key" - "crl" - ]; - buildInputs = [ pkgs.gnutls.bin ]; - caTemplate = pkgs.writeText "snakeoil-ca.template" '' - cn = server - expiration_days = -1 - cert_signing_key - ca - ''; - certTemplate = pkgs.writeText "snakeoil-cert.template" '' - cn = server - expiration_days = -1 - tls_www_server - encryption_key - signing_key - ''; - crlTemplate = pkgs.writeText "snakeoil-crl.template" '' - expiration_days = -1 - ''; - userCertTemplate = pkgs.writeText "snakeoil-user-cert.template" '' - organization = snakeoil - cn = server - expiration_days = -1 - tls_www_client - encryption_key - signing_key - ''; - } - '' - certtool -p --bits 4096 --outfile ca.key - certtool -s --template "$caTemplate" --load-privkey ca.key \ - --outfile "$cacert" - certtool -p --bits 4096 --outfile "$key" - certtool -c --template "$certTemplate" \ - --load-ca-privkey ca.key \ - --load-ca-certificate "$cacert" \ - --load-privkey "$key" \ - --outfile "$cert" - certtool --generate-crl --template "$crlTemplate" \ - --load-ca-privkey ca.key \ - --load-ca-certificate "$cacert" \ - --outfile "$crl" - - mkdir "$out" - - # Stripping key information before the actual PEM-encoded values is solely - # to make test output a bit less verbose when copying the client key to the - # actual client. - certtool -p --bits 4096 | sed -n \ - -e '/^----* *BEGIN/,/^----* *END/p' > "$out/alice.key" - - certtool -c --template "$userCertTemplate" \ - --load-privkey "$out/alice.key" \ - --load-ca-privkey ca.key \ - --load-ca-certificate "$cacert" \ - --outfile "$out/alice.cert" +{ pkgs, ... }: +let + snakeOil = + pkgs.runCommand "snakeoil-certs" + { + outputs = [ + "out" + "cacert" + "cert" + "key" + "crl" + ]; + buildInputs = [ pkgs.gnutls.bin ]; + caTemplate = pkgs.writeText "snakeoil-ca.template" '' + cn = server + expiration_days = -1 + cert_signing_key + ca ''; + certTemplate = pkgs.writeText "snakeoil-cert.template" '' + cn = server + expiration_days = -1 + tls_www_server + encryption_key + signing_key + ''; + crlTemplate = pkgs.writeText "snakeoil-crl.template" '' + expiration_days = -1 + ''; + userCertTemplate = pkgs.writeText "snakeoil-user-cert.template" '' + organization = snakeoil + cn = server + expiration_days = -1 + tls_www_client + encryption_key + signing_key + ''; + } + '' + certtool -p --bits 4096 --outfile ca.key + certtool -s --template "$caTemplate" --load-privkey ca.key \ + --outfile "$cacert" + certtool -p --bits 4096 --outfile "$key" + certtool -c --template "$certTemplate" \ + --load-ca-privkey ca.key \ + --load-ca-certificate "$cacert" \ + --load-privkey "$key" \ + --outfile "$cert" + certtool --generate-crl --template "$crlTemplate" \ + --load-ca-privkey ca.key \ + --load-ca-certificate "$cacert" \ + --outfile "$crl" - in - { - name = "taskserver"; + mkdir "$out" - nodes = rec { - server = { - services.taskserver.enable = true; - services.taskserver.listenHost = "::"; - services.taskserver.openFirewall = true; - services.taskserver.fqdn = "server"; - services.taskserver.organisations = { - testOrganisation.users = [ - "alice" - "foo" - ]; - anotherOrganisation.users = [ "bob" ]; - }; + # Stripping key information before the actual PEM-encoded values is solely + # to make test output a bit less verbose when copying the client key to the + # actual client. + certtool -p --bits 4096 | sed -n \ + -e '/^----* *BEGIN/,/^----* *END/p' > "$out/alice.key" - specialisation.manual_config.configuration = { - services.taskserver.pki.manual = { - ca.cert = snakeOil.cacert; - server.cert = snakeOil.cert; - server.key = snakeOil.key; - server.crl = snakeOil.crl; - }; - }; + certtool -c --template "$userCertTemplate" \ + --load-privkey "$out/alice.key" \ + --load-ca-privkey ca.key \ + --load-ca-certificate "$cacert" \ + --outfile "$out/alice.cert" + ''; + +in +{ + name = "taskserver"; + + nodes = rec { + server = { + services.taskserver.enable = true; + services.taskserver.listenHost = "::"; + services.taskserver.openFirewall = true; + services.taskserver.fqdn = "server"; + services.taskserver.organisations = { + testOrganisation.users = [ + "alice" + "foo" + ]; + anotherOrganisation.users = [ "bob" ]; }; - client1 = - { pkgs, ... }: - { - environment.systemPackages = [ - pkgs.taskwarrior2 - pkgs.gnutls - ]; - users.users.alice.isNormalUser = true; - users.users.bob.isNormalUser = true; - users.users.foo.isNormalUser = true; - users.users.bar.isNormalUser = true; + specialisation.manual_config.configuration = { + services.taskserver.pki.manual = { + ca.cert = snakeOil.cacert; + server.cert = snakeOil.cert; + server.key = snakeOil.key; + server.crl = snakeOil.crl; }; - - client2 = client1; + }; }; - testScript = - { nodes, ... }: - let - cfg = nodes.server.services.taskserver; - portStr = toString cfg.listenPort; - specialisations = "${nodes.server.system.build.toplevel}/specialisation"; - newServerSystem = "${specialisations}/manual_config"; - switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test"; - in - '' - from shlex import quote + client1 = + { pkgs, ... }: + { + environment.systemPackages = [ + pkgs.taskwarrior2 + pkgs.gnutls + ]; + users.users.alice.isNormalUser = true; + users.users.bob.isNormalUser = true; + users.users.foo.isNormalUser = true; + users.users.bar.isNormalUser = true; + }; + + client2 = client1; + }; + + testScript = + { nodes, ... }: + let + cfg = nodes.server.services.taskserver; + portStr = toString cfg.listenPort; + specialisations = "${nodes.server.system.build.toplevel}/specialisation"; + newServerSystem = "${specialisations}/manual_config"; + switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test"; + in + '' + from shlex import quote - def su(user, cmd): - return f"su - {user} -c {quote(cmd)}" + def su(user, cmd): + return f"su - {user} -c {quote(cmd)}" - def no_extra_init(client, org, user): - pass + def no_extra_init(client, org, user): + pass - def setup_clients_for(org, user, extra_init=no_extra_init): - for client in [client1, client2]: - with client.nested(f"initialize client for user {user}"): - client.succeed( - su(user, f"rm -rf /home/{user}/.task"), - su(user, "task rc.confirmation=no config confirmation no"), - ) + def setup_clients_for(org, user, extra_init=no_extra_init): + for client in [client1, client2]: + with client.nested(f"initialize client for user {user}"): + client.succeed( + su(user, f"rm -rf /home/{user}/.task"), + su(user, "task rc.confirmation=no config confirmation no"), + ) - exportinfo = server.succeed(f"nixos-taskserver user export {org} {user}") + exportinfo = server.succeed(f"nixos-taskserver user export {org} {user}") - with client.nested("importing taskwarrior configuration"): - client.succeed(su(user, f"eval {quote(exportinfo)} >&2")) + with client.nested("importing taskwarrior configuration"): + client.succeed(su(user, f"eval {quote(exportinfo)} >&2")) - extra_init(client, org, user) + extra_init(client, org, user) - client.succeed(su(user, "task config taskd.server server:${portStr} >&2")) + client.succeed(su(user, "task config taskd.server server:${portStr} >&2")) - client.succeed(su(user, "task sync init >&2")) + client.succeed(su(user, "task sync init >&2")) - def restart_server(): - server.systemctl("restart taskserver.service") - server.wait_for_open_port(${portStr}) + def restart_server(): + server.systemctl("restart taskserver.service") + server.wait_for_open_port(${portStr}) - def re_add_imperative_user(): - with server.nested("(re-)add imperative user bar"): - server.execute("nixos-taskserver org remove imperativeOrg") - server.succeed( - "nixos-taskserver org add imperativeOrg", - "nixos-taskserver user add imperativeOrg bar", - ) - setup_clients_for("imperativeOrg", "bar") + def re_add_imperative_user(): + with server.nested("(re-)add imperative user bar"): + server.execute("nixos-taskserver org remove imperativeOrg") + server.succeed( + "nixos-taskserver org add imperativeOrg", + "nixos-taskserver user add imperativeOrg bar", + ) + setup_clients_for("imperativeOrg", "bar") - def test_sync(user): - with subtest(f"sync for user {user}"): - client1.succeed(su(user, "task add foo >&2")) - client1.succeed(su(user, "task sync >&2")) - client2.fail(su(user, "task list >&2")) - client2.succeed(su(user, "task sync >&2")) - client2.succeed(su(user, "task list >&2")) + def test_sync(user): + with subtest(f"sync for user {user}"): + client1.succeed(su(user, "task add foo >&2")) + client1.succeed(su(user, "task sync >&2")) + client2.fail(su(user, "task list >&2")) + client2.succeed(su(user, "task sync >&2")) + client2.succeed(su(user, "task list >&2")) - def check_client_cert(user): - # debug level 3 is a workaround for gnutls issue https://gitlab.com/gnutls/gnutls/-/issues/1040 - cmd = ( - f"gnutls-cli -d 3" - f" --x509cafile=/home/{user}/.task/keys/ca.cert" - f" --x509keyfile=/home/{user}/.task/keys/private.key" - f" --x509certfile=/home/{user}/.task/keys/public.cert" - f" --port=${portStr} server < /dev/null" - ) - return su(user, cmd) + def check_client_cert(user): + # debug level 3 is a workaround for gnutls issue https://gitlab.com/gnutls/gnutls/-/issues/1040 + cmd = ( + f"gnutls-cli -d 3" + f" --x509cafile=/home/{user}/.task/keys/ca.cert" + f" --x509keyfile=/home/{user}/.task/keys/private.key" + f" --x509certfile=/home/{user}/.task/keys/public.cert" + f" --port=${portStr} server < /dev/null" + ) + return su(user, cmd) - # Explicitly start the VMs so that we don't accidentally start newServer - server.start() - client1.start() - client2.start() + # Explicitly start the VMs so that we don't accidentally start newServer + server.start() + client1.start() + client2.start() - server.wait_for_unit("taskserver.service") + server.wait_for_unit("taskserver.service") - server.succeed( - "nixos-taskserver user list testOrganisation | grep -qxF alice", - "nixos-taskserver user list testOrganisation | grep -qxF foo", - "nixos-taskserver user list anotherOrganisation | grep -qxF bob", - ) + server.succeed( + "nixos-taskserver user list testOrganisation | grep -qxF alice", + "nixos-taskserver user list testOrganisation | grep -qxF foo", + "nixos-taskserver user list anotherOrganisation | grep -qxF bob", + ) - server.wait_for_open_port(${portStr}) + server.wait_for_open_port(${portStr}) - client1.wait_for_unit("multi-user.target") - client2.wait_for_unit("multi-user.target") + client1.wait_for_unit("multi-user.target") + client2.wait_for_unit("multi-user.target") - setup_clients_for("testOrganisation", "alice") - setup_clients_for("testOrganisation", "foo") - setup_clients_for("anotherOrganisation", "bob") + setup_clients_for("testOrganisation", "alice") + setup_clients_for("testOrganisation", "foo") + setup_clients_for("anotherOrganisation", "bob") - for user in ["alice", "bob", "foo"]: - test_sync(user) + for user in ["alice", "bob", "foo"]: + test_sync(user) - server.fail("nixos-taskserver user add imperativeOrg bar") - re_add_imperative_user() + server.fail("nixos-taskserver user add imperativeOrg bar") + re_add_imperative_user() - test_sync("bar") + test_sync("bar") - with subtest("checking certificate revocation of user bar"): - client1.succeed(check_client_cert("bar")) + with subtest("checking certificate revocation of user bar"): + client1.succeed(check_client_cert("bar")) - server.succeed("nixos-taskserver user remove imperativeOrg bar") - restart_server() + server.succeed("nixos-taskserver user remove imperativeOrg bar") + restart_server() - client1.fail(check_client_cert("bar")) + client1.fail(check_client_cert("bar")) - client1.succeed(su("bar", "task add destroy everything >&2")) - client1.fail(su("bar", "task sync >&2")) + client1.succeed(su("bar", "task add destroy everything >&2")) + client1.fail(su("bar", "task sync >&2")) - re_add_imperative_user() + re_add_imperative_user() - with subtest("checking certificate revocation of org imperativeOrg"): - client1.succeed(check_client_cert("bar")) + with subtest("checking certificate revocation of org imperativeOrg"): + client1.succeed(check_client_cert("bar")) - server.succeed("nixos-taskserver org remove imperativeOrg") - restart_server() + server.succeed("nixos-taskserver org remove imperativeOrg") + restart_server() - client1.fail(check_client_cert("bar")) + client1.fail(check_client_cert("bar")) - client1.succeed(su("bar", "task add destroy even more >&2")) - client1.fail(su("bar", "task sync >&2")) + client1.succeed(su("bar", "task add destroy even more >&2")) + client1.fail(su("bar", "task sync >&2")) - re_add_imperative_user() + re_add_imperative_user() - with subtest("check whether declarative config overrides user bar"): - restart_server() - test_sync("bar") + with subtest("check whether declarative config overrides user bar"): + restart_server() + test_sync("bar") - def init_manual_config(client, org, user): - cfgpath = f"/home/{user}/.task" + def init_manual_config(client, org, user): + cfgpath = f"/home/{user}/.task" - client.copy_from_host( - "${snakeOil.cacert}", - f"{cfgpath}/ca.cert", - ) - for file in ["alice.key", "alice.cert"]: - client.copy_from_host( - f"${snakeOil}/{file}", - f"{cfgpath}/{file}", - ) + client.copy_from_host( + "${snakeOil.cacert}", + f"{cfgpath}/ca.cert", + ) + for file in ["alice.key", "alice.cert"]: + client.copy_from_host( + f"${snakeOil}/{file}", + f"{cfgpath}/{file}", + ) - for file in [f"{user}.key", f"{user}.cert"]: - client.copy_from_host( - f"${snakeOil}/{file}", - f"{cfgpath}/{file}", - ) + for file in [f"{user}.key", f"{user}.cert"]: + client.copy_from_host( + f"${snakeOil}/{file}", + f"{cfgpath}/{file}", + ) - client.succeed( - su("alice", f"task config taskd.ca {cfgpath}/ca.cert"), - su("alice", f"task config taskd.key {cfgpath}/{user}.key"), - su(user, f"task config taskd.certificate {cfgpath}/{user}.cert"), - ) + client.succeed( + su("alice", f"task config taskd.ca {cfgpath}/ca.cert"), + su("alice", f"task config taskd.key {cfgpath}/{user}.key"), + su(user, f"task config taskd.certificate {cfgpath}/{user}.cert"), + ) - with subtest("check manual configuration"): - # Remove the keys from automatic CA creation, to make sure the new - # generation doesn't use keys from before. - server.succeed("rm -rf ${cfg.dataDir}/keys/* >&2") + with subtest("check manual configuration"): + # Remove the keys from automatic CA creation, to make sure the new + # generation doesn't use keys from before. + server.succeed("rm -rf ${cfg.dataDir}/keys/* >&2") - server.succeed( - "${switchToNewServer} >&2" - ) - server.wait_for_unit("taskserver.service") - server.wait_for_open_port(${portStr}) + server.succeed( + "${switchToNewServer} >&2" + ) + server.wait_for_unit("taskserver.service") + server.wait_for_open_port(${portStr}) - server.succeed( - "nixos-taskserver org add manualOrg", - "nixos-taskserver user add manualOrg alice", - ) + server.succeed( + "nixos-taskserver org add manualOrg", + "nixos-taskserver user add manualOrg alice", + ) - setup_clients_for("manualOrg", "alice", init_manual_config) + setup_clients_for("manualOrg", "alice", init_manual_config) - test_sync("alice") - ''; - } -) + test_sync("alice") + ''; +} diff --git a/nixos/tests/tayga.nix b/nixos/tests/tayga.nix index bd60b51449b8..32512224bb58 100644 --- a/nixos/tests/tayga.nix +++ b/nixos/tests/tayga.nix @@ -22,257 +22,255 @@ # | Route: 192.0.2.0/24 via 100.64.0.1 # +------ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "tayga"; - meta = with pkgs.lib.maintainers; { - maintainers = [ hax404 ]; +{ + name = "tayga"; + meta = with pkgs.lib.maintainers; { + maintainers = [ hax404 ]; + }; + + nodes = { + # The server is configured with static IPv4 addresses. RFC 6052 Section 3.1 + # disallows the mapping of non-global IPv4 addresses like RFC 1918 into the + # Well-Known Prefix 64:ff9b::/96. TAYGA also does not allow the mapping of + # documentation space (RFC 5737). To circumvent this, 100.64.0.2/24 from + # RFC 6589 (Carrier Grade NAT) is used here. + # To reach the IPv4 address pool of the NAT64 gateway, there is a static + # route configured. In normal cases, where the router would also source NAT + # the pool addresses to one IPv4 addresses, this would not be needed. + server = { + virtualisation.vlans = [ + 2 # towards router + ]; + networking = { + useDHCP = false; + interfaces.eth1 = lib.mkForce { }; + }; + systemd.network = { + enable = true; + networks."vlan1" = { + matchConfig.Name = "eth1"; + address = [ + "100.64.0.2/24" + ]; + routes = [ + { + Destination = "192.0.2.0/24"; + Gateway = "100.64.0.1"; + } + ]; + }; + }; + programs.mtr.enable = true; }; - nodes = { - # The server is configured with static IPv4 addresses. RFC 6052 Section 3.1 - # disallows the mapping of non-global IPv4 addresses like RFC 1918 into the - # Well-Known Prefix 64:ff9b::/96. TAYGA also does not allow the mapping of - # documentation space (RFC 5737). To circumvent this, 100.64.0.2/24 from - # RFC 6589 (Carrier Grade NAT) is used here. - # To reach the IPv4 address pool of the NAT64 gateway, there is a static - # route configured. In normal cases, where the router would also source NAT - # the pool addresses to one IPv4 addresses, this would not be needed. - server = { - virtualisation.vlans = [ - 2 # towards router - ]; - networking = { - useDHCP = false; - interfaces.eth1 = lib.mkForce { }; - }; - systemd.network = { - enable = true; - networks."vlan1" = { - matchConfig.Name = "eth1"; - address = [ - "100.64.0.2/24" - ]; - routes = [ + # The router is configured with static IPv4 addresses towards the server + # and IPv6 addresses towards the client. For NAT64, the Well-Known prefix + # 64:ff9b::/96 is used. NAT64 is done with TAYGA which provides the + # tun-interface nat64 and does the translation over it. The IPv6 packets + # are sent to this interfaces and received as IPv4 packets and vice versa. + # As TAYGA only translates IPv6 addresses to dedicated IPv4 addresses, it + # needs a pool of IPv4 addresses which must be at least as big as the + # expected amount of clients. In this test, the packets from the pool are + # directly routed towards the client. In normal cases, there would be a + # second source NAT44 to map all clients behind one IPv4 address. + router_systemd = { + boot.kernel.sysctl = { + "net.ipv4.ip_forward" = 1; + "net.ipv6.conf.all.forwarding" = 1; + }; + + virtualisation.vlans = [ + 2 # towards server + 3 # towards client + ]; + + networking = { + useDHCP = false; + useNetworkd = true; + firewall.enable = false; + interfaces.eth1 = lib.mkForce { + ipv4 = { + addresses = [ { - Destination = "192.0.2.0/24"; - Gateway = "100.64.0.1"; + address = "100.64.0.1"; + prefixLength = 24; } ]; }; }; - programs.mtr.enable = true; - }; - - # The router is configured with static IPv4 addresses towards the server - # and IPv6 addresses towards the client. For NAT64, the Well-Known prefix - # 64:ff9b::/96 is used. NAT64 is done with TAYGA which provides the - # tun-interface nat64 and does the translation over it. The IPv6 packets - # are sent to this interfaces and received as IPv4 packets and vice versa. - # As TAYGA only translates IPv6 addresses to dedicated IPv4 addresses, it - # needs a pool of IPv4 addresses which must be at least as big as the - # expected amount of clients. In this test, the packets from the pool are - # directly routed towards the client. In normal cases, there would be a - # second source NAT44 to map all clients behind one IPv4 address. - router_systemd = { - boot.kernel.sysctl = { - "net.ipv4.ip_forward" = 1; - "net.ipv6.conf.all.forwarding" = 1; - }; - - virtualisation.vlans = [ - 2 # towards server - 3 # towards client - ]; - - networking = { - useDHCP = false; - useNetworkd = true; - firewall.enable = false; - interfaces.eth1 = lib.mkForce { - ipv4 = { - addresses = [ - { - address = "100.64.0.1"; - prefixLength = 24; - } - ]; - }; - }; - interfaces.eth2 = lib.mkForce { - ipv6 = { - addresses = [ - { - address = "2001:db8::1"; - prefixLength = 64; - } - ]; - }; - }; - }; - - services.tayga = { - enable = true; - ipv4 = { - address = "192.0.2.0"; - router = { - address = "192.0.2.1"; - }; - pool = { - address = "192.0.2.0"; - prefixLength = 24; - }; - }; + interfaces.eth2 = lib.mkForce { ipv6 = { - address = "2001:db8::1"; - router = { - address = "64:ff9b::1"; - }; - pool = { - address = "64:ff9b::"; - prefixLength = 96; - }; - }; - mappings = { - "192.0.2.42" = "2001:db8::2"; - }; - }; - }; - - router_nixos = { - boot.kernel.sysctl = { - "net.ipv4.ip_forward" = 1; - "net.ipv6.conf.all.forwarding" = 1; - }; - - virtualisation.vlans = [ - 2 # towards server - 3 # towards client - ]; - - networking = { - useDHCP = false; - firewall.enable = false; - interfaces.eth1 = lib.mkForce { - ipv4 = { - addresses = [ - { - address = "100.64.0.1"; - prefixLength = 24; - } - ]; - }; - }; - interfaces.eth2 = lib.mkForce { - ipv6 = { - addresses = [ - { - address = "2001:db8::1"; - prefixLength = 64; - } - ]; - }; - }; - }; - - services.tayga = { - enable = true; - ipv4 = { - address = "192.0.2.0"; - router = { - address = "192.0.2.1"; - }; - pool = { - address = "192.0.2.0"; - prefixLength = 24; - }; - }; - ipv6 = { - address = "2001:db8::1"; - router = { - address = "64:ff9b::1"; - }; - pool = { - address = "64:ff9b::"; - prefixLength = 96; - }; - }; - mappings = { - "192.0.2.42" = "2001:db8::2"; - }; - }; - }; - - # The client is configured with static IPv6 addresses. It has also a static - # route for the NAT64 IP space where the IPv4 addresses are mapped in. In - # normal cases, there would be only a default route. - client = { - virtualisation.vlans = [ - 3 # towards router - ]; - - networking = { - useDHCP = false; - interfaces.eth1 = lib.mkForce { }; - }; - - systemd.network = { - enable = true; - networks."vlan1" = { - matchConfig.Name = "eth1"; - address = [ - "2001:db8::2/64" - ]; - routes = [ + addresses = [ { - Destination = "64:ff9b::/96"; - Gateway = "2001:db8::1"; + address = "2001:db8::1"; + prefixLength = 64; } ]; }; }; - programs.mtr.enable = true; + }; + + services.tayga = { + enable = true; + ipv4 = { + address = "192.0.2.0"; + router = { + address = "192.0.2.1"; + }; + pool = { + address = "192.0.2.0"; + prefixLength = 24; + }; + }; + ipv6 = { + address = "2001:db8::1"; + router = { + address = "64:ff9b::1"; + }; + pool = { + address = "64:ff9b::"; + prefixLength = 96; + }; + }; + mappings = { + "192.0.2.42" = "2001:db8::2"; + }; }; }; - testScript = '' - # start client and server - for machine in client, server: - machine.systemctl("start network-online.target") - machine.wait_for_unit("network-online.target") - machine.log(machine.execute("ip addr")[1]) - machine.log(machine.execute("ip route")[1]) - machine.log(machine.execute("ip -6 route")[1]) + router_nixos = { + boot.kernel.sysctl = { + "net.ipv4.ip_forward" = 1; + "net.ipv6.conf.all.forwarding" = 1; + }; - # test systemd-networkd and nixos-scripts based router - for router in router_systemd, router_nixos: - router.start() - router.systemctl("start network-online.target") - router.wait_for_unit("network-online.target") + virtualisation.vlans = [ + 2 # towards server + 3 # towards client + ]; + + networking = { + useDHCP = false; + firewall.enable = false; + interfaces.eth1 = lib.mkForce { + ipv4 = { + addresses = [ + { + address = "100.64.0.1"; + prefixLength = 24; + } + ]; + }; + }; + interfaces.eth2 = lib.mkForce { + ipv6 = { + addresses = [ + { + address = "2001:db8::1"; + prefixLength = 64; + } + ]; + }; + }; + }; + + services.tayga = { + enable = true; + ipv4 = { + address = "192.0.2.0"; + router = { + address = "192.0.2.1"; + }; + pool = { + address = "192.0.2.0"; + prefixLength = 24; + }; + }; + ipv6 = { + address = "2001:db8::1"; + router = { + address = "64:ff9b::1"; + }; + pool = { + address = "64:ff9b::"; + prefixLength = 96; + }; + }; + mappings = { + "192.0.2.42" = "2001:db8::2"; + }; + }; + }; + + # The client is configured with static IPv6 addresses. It has also a static + # route for the NAT64 IP space where the IPv4 addresses are mapped in. In + # normal cases, there would be only a default route. + client = { + virtualisation.vlans = [ + 3 # towards router + ]; + + networking = { + useDHCP = false; + interfaces.eth1 = lib.mkForce { }; + }; + + systemd.network = { + enable = true; + networks."vlan1" = { + matchConfig.Name = "eth1"; + address = [ + "2001:db8::2/64" + ]; + routes = [ + { + Destination = "64:ff9b::/96"; + Gateway = "2001:db8::1"; + } + ]; + }; + }; + programs.mtr.enable = true; + }; + }; + + testScript = '' + # start client and server + for machine in client, server: + machine.systemctl("start network-online.target") + machine.wait_for_unit("network-online.target") + machine.log(machine.execute("ip addr")[1]) + machine.log(machine.execute("ip route")[1]) + machine.log(machine.execute("ip -6 route")[1]) + + # test systemd-networkd and nixos-scripts based router + for router in router_systemd, router_nixos: + router.start() + router.systemctl("start network-online.target") + router.wait_for_unit("network-online.target") + router.wait_for_unit("tayga.service") + router.log(machine.execute("ip addr")[1]) + router.log(machine.execute("ip route")[1]) + router.log(machine.execute("ip -6 route")[1]) + + with subtest("Wait for tayga"): router.wait_for_unit("tayga.service") - router.log(machine.execute("ip addr")[1]) - router.log(machine.execute("ip route")[1]) - router.log(machine.execute("ip -6 route")[1]) - with subtest("Wait for tayga"): - router.wait_for_unit("tayga.service") + with subtest("Test ICMP server -> client"): + server.wait_until_succeeds("ping -c 3 192.0.2.42 >&2") - with subtest("Test ICMP server -> client"): - server.wait_until_succeeds("ping -c 3 192.0.2.42 >&2") + with subtest("Test ICMP and show a traceroute server -> client"): + server.wait_until_succeeds("mtr --show-ips --report-wide 192.0.2.42 >&2") - with subtest("Test ICMP and show a traceroute server -> client"): - server.wait_until_succeeds("mtr --show-ips --report-wide 192.0.2.42 >&2") + with subtest("Test ICMP client -> server"): + client.wait_until_succeeds("ping -c 3 64:ff9b::100.64.0.2 >&2") - with subtest("Test ICMP client -> server"): - client.wait_until_succeeds("ping -c 3 64:ff9b::100.64.0.2 >&2") + with subtest("Test ICMP and show a traceroute client -> server"): + client.wait_until_succeeds("mtr --show-ips --report-wide 64:ff9b::100.64.0.2 >&2") - with subtest("Test ICMP and show a traceroute client -> server"): - client.wait_until_succeeds("mtr --show-ips --report-wide 64:ff9b::100.64.0.2 >&2") - - router.log(router.execute("systemd-analyze security tayga.service")[1]) - router.shutdown() - ''; - } -) + router.log(router.execute("systemd-analyze security tayga.service")[1]) + router.shutdown() + ''; +} diff --git a/nixos/tests/technitium-dns-server.nix b/nixos/tests/technitium-dns-server.nix index dbe8fac5d354..7ac06371ead9 100644 --- a/nixos/tests/technitium-dns-server.nix +++ b/nixos/tests/technitium-dns-server.nix @@ -1,31 +1,29 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "technitium-dns-server"; +{ pkgs, lib, ... }: +{ + name = "technitium-dns-server"; - nodes = { - machine = - { pkgs, ... }: - { - services.technitium-dns-server = { - enable = true; - openFirewall = true; - }; + nodes = { + machine = + { pkgs, ... }: + { + services.technitium-dns-server = { + enable = true; + openFirewall = true; }; - }; + }; + }; - testScript = '' - import json + testScript = '' + import json - start_all() - machine.wait_for_unit("technitium-dns-server.service") - machine.wait_for_open_port(53) - curl_cmd = 'curl --fail-with-body -X GET "http://localhost:5380/api/user/login?user=admin&pass=admin"' - output = json.loads(machine.wait_until_succeeds(curl_cmd, timeout=10)) - print(output) - assert "ok" == output['status'], "status not ok" - ''; + start_all() + machine.wait_for_unit("technitium-dns-server.service") + machine.wait_for_open_port(53) + curl_cmd = 'curl --fail-with-body -X GET "http://localhost:5380/api/user/login?user=admin&pass=admin"' + output = json.loads(machine.wait_until_succeeds(curl_cmd, timeout=10)) + print(output) + assert "ok" == output['status'], "status not ok" + ''; - meta.maintainers = with lib.maintainers; [ fabianrig ]; - } -) + meta.maintainers = with lib.maintainers; [ fabianrig ]; +} diff --git a/nixos/tests/teeworlds.nix b/nixos/tests/teeworlds.nix index 7c94f66a2b20..9b0c39d12235 100644 --- a/nixos/tests/teeworlds.nix +++ b/nixos/tests/teeworlds.nix @@ -1,58 +1,56 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - client = - { pkgs, ... }: +let + client = + { pkgs, ... }: - { - imports = [ ./common/x11.nix ]; - environment.systemPackages = [ pkgs.teeworlds ]; - }; - - in - { - name = "teeworlds"; - meta = with pkgs.lib.maintainers; { - maintainers = [ hax404 ]; + { + imports = [ ./common/x11.nix ]; + environment.systemPackages = [ pkgs.teeworlds ]; }; - nodes = { - server = { - services.teeworlds = { - enable = true; - openPorts = true; - }; - }; +in +{ + name = "teeworlds"; + meta = with pkgs.lib.maintainers; { + maintainers = [ hax404 ]; + }; - client1 = client; - client2 = client; + nodes = { + server = { + services.teeworlds = { + enable = true; + openPorts = true; + }; }; - testScript = '' - start_all() + client1 = client; + client2 = client; + }; - server.wait_for_unit("teeworlds.service") - server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 8303") + testScript = '' + start_all() - client1.wait_for_x() - client2.wait_for_x() + server.wait_for_unit("teeworlds.service") + server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 8303") - client1.execute("teeworlds 'player_name Alice;connect server' >&2 &") - server.wait_until_succeeds( - 'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Alice"' - ) + client1.wait_for_x() + client2.wait_for_x() - client2.execute("teeworlds 'player_name Bob;connect server' >&2 &") - server.wait_until_succeeds( - 'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Bob"' - ) + client1.execute("teeworlds 'player_name Alice;connect server' >&2 &") + server.wait_until_succeeds( + 'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Alice"' + ) - server.sleep(10) # wait for a while to get a nice screenshot + client2.execute("teeworlds 'player_name Bob;connect server' >&2 &") + server.wait_until_succeeds( + 'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Bob"' + ) - client1.screenshot("screen_client1") - client2.screenshot("screen_client2") - ''; + server.sleep(10) # wait for a while to get a nice screenshot - } -) + client1.screenshot("screen_client1") + client2.screenshot("screen_client2") + ''; + +} diff --git a/nixos/tests/tiddlywiki.nix b/nixos/tests/tiddlywiki.nix index b52bc385f7f3..3473c8a8e910 100644 --- a/nixos/tests/tiddlywiki.nix +++ b/nixos/tests/tiddlywiki.nix @@ -1,71 +1,69 @@ -import ./make-test-python.nix ( - { ... }: - { - name = "tiddlywiki"; - nodes = { - default = { - services.tiddlywiki.enable = true; - }; - configured = { - boot.postBootCommands = '' - echo "username,password - somelogin,somesecret" > /var/lib/wikiusers.csv - ''; - services.tiddlywiki = { - enable = true; - listenOptions = { - port = 3000; - credentials = "../wikiusers.csv"; - readers = "(authenticated)"; - }; +{ ... }: +{ + name = "tiddlywiki"; + nodes = { + default = { + services.tiddlywiki.enable = true; + }; + configured = { + boot.postBootCommands = '' + echo "username,password + somelogin,somesecret" > /var/lib/wikiusers.csv + ''; + services.tiddlywiki = { + enable = true; + listenOptions = { + port = 3000; + credentials = "../wikiusers.csv"; + readers = "(authenticated)"; }; }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - with subtest("by default works without configuration"): - default.wait_for_unit("tiddlywiki.service") + with subtest("by default works without configuration"): + default.wait_for_unit("tiddlywiki.service") - with subtest("by default available on port 8080 without auth"): - default.wait_for_unit("tiddlywiki.service") - default.wait_for_open_port(8080) - # we output to /dev/null here to avoid a python UTF-8 decode error - # but the check will still fail if the service doesn't respond - default.succeed("curl --fail -o /dev/null 127.0.0.1:8080") + with subtest("by default available on port 8080 without auth"): + default.wait_for_unit("tiddlywiki.service") + default.wait_for_open_port(8080) + # we output to /dev/null here to avoid a python UTF-8 decode error + # but the check will still fail if the service doesn't respond + default.succeed("curl --fail -o /dev/null 127.0.0.1:8080") - with subtest("by default creates empty wiki"): - default.succeed("test -f /var/lib/tiddlywiki/tiddlywiki.info") + with subtest("by default creates empty wiki"): + default.succeed("test -f /var/lib/tiddlywiki/tiddlywiki.info") - with subtest("configured on port 3000 with basic auth"): - configured.wait_for_unit("tiddlywiki.service") - configured.wait_for_open_port(3000) - configured.fail("curl --fail -o /dev/null 127.0.0.1:3000") - configured.succeed( - "curl --fail -o /dev/null 127.0.0.1:3000 --user somelogin:somesecret" - ) + with subtest("configured on port 3000 with basic auth"): + configured.wait_for_unit("tiddlywiki.service") + configured.wait_for_open_port(3000) + configured.fail("curl --fail -o /dev/null 127.0.0.1:3000") + configured.succeed( + "curl --fail -o /dev/null 127.0.0.1:3000 --user somelogin:somesecret" + ) - with subtest("restart preserves changes"): - # given running wiki - default.wait_for_unit("tiddlywiki.service") - # with some changes - default.succeed( - 'curl --fail --request PUT --header \'X-Requested-With:TiddlyWiki\' \ - --data \'{ "title": "title", "text": "content" }\' \ - --url 127.0.0.1:8080/recipes/default/tiddlers/somepage ' - ) - default.succeed("sleep 2") + with subtest("restart preserves changes"): + # given running wiki + default.wait_for_unit("tiddlywiki.service") + # with some changes + default.succeed( + 'curl --fail --request PUT --header \'X-Requested-With:TiddlyWiki\' \ + --data \'{ "title": "title", "text": "content" }\' \ + --url 127.0.0.1:8080/recipes/default/tiddlers/somepage ' + ) + default.succeed("sleep 2") - # when wiki is cycled - default.systemctl("restart tiddlywiki.service") - default.wait_for_unit("tiddlywiki.service") - default.wait_for_open_port(8080) + # when wiki is cycled + default.systemctl("restart tiddlywiki.service") + default.wait_for_unit("tiddlywiki.service") + default.wait_for_open_port(8080) - # the change is preserved - default.succeed( - "curl --fail -o /dev/null 127.0.0.1:8080/recipes/default/tiddlers/somepage" - ) - ''; - } -) + # the change is preserved + default.succeed( + "curl --fail -o /dev/null 127.0.0.1:8080/recipes/default/tiddlers/somepage" + ) + ''; +} diff --git a/nixos/tests/timezone.nix b/nixos/tests/timezone.nix index 86de47204ae8..3a16c9de4887 100644 --- a/nixos/tests/timezone.nix +++ b/nixos/tests/timezone.nix @@ -1,59 +1,57 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "timezone"; - meta.maintainers = with pkgs.lib.maintainers; [ ]; +{ pkgs, ... }: +{ + name = "timezone"; + meta.maintainers = with pkgs.lib.maintainers; [ ]; - nodes = { - node_eutz = - { pkgs, ... }: - { - time.timeZone = "Europe/Amsterdam"; - }; + nodes = { + node_eutz = + { pkgs, ... }: + { + time.timeZone = "Europe/Amsterdam"; + }; - node_nulltz = - { pkgs, ... }: - { - time.timeZone = null; - }; - }; + node_nulltz = + { pkgs, ... }: + { + time.timeZone = null; + }; + }; - testScript = - { nodes, ... }: - '' - node_eutz.wait_for_unit("dbus.socket") + testScript = + { nodes, ... }: + '' + node_eutz.wait_for_unit("dbus.socket") - with subtest("static - Ensure timezone change gives the correct result"): - node_eutz.fail("timedatectl set-timezone Asia/Tokyo") - date_result = node_eutz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') - assert date_result == "1970-01-01 01:00:00\n", "Timezone seems to be wrong" + with subtest("static - Ensure timezone change gives the correct result"): + node_eutz.fail("timedatectl set-timezone Asia/Tokyo") + date_result = node_eutz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') + assert date_result == "1970-01-01 01:00:00\n", "Timezone seems to be wrong" - node_nulltz.wait_for_unit("dbus.socket") + node_nulltz.wait_for_unit("dbus.socket") - with subtest("imperative - Ensure timezone defaults to UTC"): - date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') - print(date_result) - assert ( - date_result == "1970-01-01 00:00:00\n" - ), "Timezone seems to be wrong (not UTC)" + with subtest("imperative - Ensure timezone defaults to UTC"): + date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') + print(date_result) + assert ( + date_result == "1970-01-01 00:00:00\n" + ), "Timezone seems to be wrong (not UTC)" - with subtest("imperative - Ensure timezone adjustment produces expected result"): - node_nulltz.succeed("timedatectl set-timezone Asia/Tokyo") + with subtest("imperative - Ensure timezone adjustment produces expected result"): + node_nulltz.succeed("timedatectl set-timezone Asia/Tokyo") - # Adjustment should be taken into account - date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') - print(date_result) - assert date_result == "1970-01-01 09:00:00\n", "Timezone was not adjusted" + # Adjustment should be taken into account + date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') + print(date_result) + assert date_result == "1970-01-01 09:00:00\n", "Timezone was not adjusted" - with subtest("imperative - Ensure timezone adjustment persists across reboot"): - # Adjustment should persist across a reboot - node_nulltz.shutdown() - node_nulltz.wait_for_unit("dbus.socket") - date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') - print(date_result) - assert ( - date_result == "1970-01-01 09:00:00\n" - ), "Timezone adjustment was not persisted" - ''; - } -) + with subtest("imperative - Ensure timezone adjustment persists across reboot"): + # Adjustment should persist across a reboot + node_nulltz.shutdown() + node_nulltz.wait_for_unit("dbus.socket") + date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"') + print(date_result) + assert ( + date_result == "1970-01-01 09:00:00\n" + ), "Timezone adjustment was not persisted" + ''; +} diff --git a/nixos/tests/tinydns.nix b/nixos/tests/tinydns.nix index 74f5a9413752..fc07983f529a 100644 --- a/nixos/tests/tinydns.nix +++ b/nixos/tests/tinydns.nix @@ -1,46 +1,44 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "tinydns"; - meta = { - maintainers = with lib.maintainers; [ basvandijk ]; - }; - nodes = { - nameserver = - { config, lib, ... }: - let - ip = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address; - in - { - networking.nameservers = [ ip ]; - services.tinydns = { - enable = true; - inherit ip; - data = '' - .foo.bar:${ip} - +.bla.foo.bar:1.2.3.4:300 - ''; - }; +{ lib, ... }: +{ + name = "tinydns"; + meta = { + maintainers = with lib.maintainers; [ basvandijk ]; + }; + nodes = { + nameserver = + { config, lib, ... }: + let + ip = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address; + in + { + networking.nameservers = [ ip ]; + services.tinydns = { + enable = true; + inherit ip; + data = '' + .foo.bar:${ip} + +.bla.foo.bar:1.2.3.4:300 + ''; }; - }; - testScript = '' - nameserver.start() - nameserver.wait_for_unit("tinydns.service") + }; + }; + testScript = '' + nameserver.start() + nameserver.wait_for_unit("tinydns.service") - # We query tinydns a few times to trigger the bug: - # - # nameserver # [ 6.105872] mmap: tinydns (842): VmData 331776 exceed data ulimit 300000. Update limits or use boot option ignore_rlimit_data. - # - # which was reported in https://github.com/NixOS/nixpkgs/issues/119066. - # Without the patch - # it fails on the 10th iteration. - nameserver.succeed( - """ - for i in {1..100}; do - host bla.foo.bar 192.168.1.1 | grep '1\.2\.3\.4' - done - """ - ) - ''; - } -) + # We query tinydns a few times to trigger the bug: + # + # nameserver # [ 6.105872] mmap: tinydns (842): VmData 331776 exceed data ulimit 300000. Update limits or use boot option ignore_rlimit_data. + # + # which was reported in https://github.com/NixOS/nixpkgs/issues/119066. + # Without the patch + # it fails on the 10th iteration. + nameserver.succeed( + """ + for i in {1..100}; do + host bla.foo.bar 192.168.1.1 | grep '1\.2\.3\.4' + done + """ + ) + ''; +} diff --git a/nixos/tests/tinyproxy.nix b/nixos/tests/tinyproxy.nix index 4184f68fd181..c1bcd0428503 100644 --- a/nixos/tests/tinyproxy.nix +++ b/nixos/tests/tinyproxy.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "tinyproxy"; +{ pkgs, ... }: +{ + name = "tinyproxy"; - nodes.machine = - { config, pkgs, ... }: - { - services.tinyproxy = { - enable = true; - settings = { - Listen = "127.0.0.1"; - Port = 8080; - }; + nodes.machine = + { config, pkgs, ... }: + { + services.tinyproxy = { + enable = true; + settings = { + Listen = "127.0.0.1"; + Port = 8080; }; }; + }; - testScript = '' - machine.wait_for_unit("tinyproxy.service") - machine.wait_for_open_port(8080) + testScript = '' + machine.wait_for_unit("tinyproxy.service") + machine.wait_for_open_port(8080) - machine.succeed('curl -s http://localhost:8080 |grep -i tinyproxy') - ''; - } -) + machine.succeed('curl -s http://localhost:8080 |grep -i tinyproxy') + ''; +} diff --git a/nixos/tests/tinywl.nix b/nixos/tests/tinywl.nix index 85788d74ec89..aa0df6b6b1df 100644 --- a/nixos/tests/tinywl.nix +++ b/nixos/tests/tinywl.nix @@ -1,69 +1,67 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - { - name = "tinywl"; - meta = { - maintainers = with lib.maintainers; [ primeos ]; - }; +{ + name = "tinywl"; + meta = { + maintainers = with lib.maintainers; [ primeos ]; + }; - nodes.machine = - { config, ... }: - { - # Automatically login on tty1 as a normal user: - imports = [ ./common/user-account.nix ]; - services.getty.autologinUser = "alice"; - security.polkit.enable = true; + nodes.machine = + { config, ... }: + { + # Automatically login on tty1 as a normal user: + imports = [ ./common/user-account.nix ]; + services.getty.autologinUser = "alice"; + security.polkit.enable = true; - environment = { - systemPackages = with pkgs; [ - tinywl - foot - wayland-utils - ]; - }; - - hardware.graphics.enable = true; - - # Automatically start TinyWL when logging in on tty1: - programs.bash.loginShellInit = '' - if [ "$(tty)" = "/dev/tty1" ]; then - set -e - test ! -e /tmp/tinywl.log # Only start tinywl once - readonly TEST_CMD="wayland-info |& tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok; read" - readonly FOOT_CMD="foot sh -c '$TEST_CMD'" - tinywl -s "$FOOT_CMD" |& tee /tmp/tinywl.log - touch /tmp/tinywl-exit-ok - fi - ''; - - # Switch to a different GPU driver (default: -vga std), otherwise TinyWL segfaults: - virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + environment = { + systemPackages = with pkgs; [ + tinywl + foot + wayland-utils + ]; }; - testScript = - { nodes, ... }: - '' - start_all() - machine.wait_for_unit("multi-user.target") + hardware.graphics.enable = true; - # Wait for complete startup: - machine.wait_until_succeeds("pgrep tinywl") - machine.wait_for_file("/run/user/1000/wayland-0") - machine.wait_until_succeeds("pgrep foot") - machine.wait_for_file("/tmp/test-wayland-exit-ok") - - # Make a screenshot and save the result: - machine.screenshot("tinywl_foot") - print(machine.succeed("cat /tmp/test-wayland.out")) - machine.copy_from_vm("/tmp/test-wayland.out") - - # Terminate cleanly: - machine.send_key("alt-esc") - machine.wait_until_fails("pgrep foot") - machine.wait_until_fails("pgrep tinywl") - machine.wait_for_file("/tmp/tinywl-exit-ok") - machine.copy_from_vm("/tmp/tinywl.log") + # Automatically start TinyWL when logging in on tty1: + programs.bash.loginShellInit = '' + if [ "$(tty)" = "/dev/tty1" ]; then + set -e + test ! -e /tmp/tinywl.log # Only start tinywl once + readonly TEST_CMD="wayland-info |& tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok; read" + readonly FOOT_CMD="foot sh -c '$TEST_CMD'" + tinywl -s "$FOOT_CMD" |& tee /tmp/tinywl.log + touch /tmp/tinywl-exit-ok + fi ''; - } -) + + # Switch to a different GPU driver (default: -vga std), otherwise TinyWL segfaults: + virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ]; + }; + + testScript = + { nodes, ... }: + '' + start_all() + machine.wait_for_unit("multi-user.target") + + # Wait for complete startup: + machine.wait_until_succeeds("pgrep tinywl") + machine.wait_for_file("/run/user/1000/wayland-0") + machine.wait_until_succeeds("pgrep foot") + machine.wait_for_file("/tmp/test-wayland-exit-ok") + + # Make a screenshot and save the result: + machine.screenshot("tinywl_foot") + print(machine.succeed("cat /tmp/test-wayland.out")) + machine.copy_from_vm("/tmp/test-wayland.out") + + # Terminate cleanly: + machine.send_key("alt-esc") + machine.wait_until_fails("pgrep foot") + machine.wait_until_fails("pgrep tinywl") + machine.wait_for_file("/tmp/tinywl-exit-ok") + machine.copy_from_vm("/tmp/tinywl.log") + ''; +} diff --git a/nixos/tests/tmate-ssh-server.nix b/nixos/tests/tmate-ssh-server.nix index 0d529d6e6812..daf0321c6971 100644 --- a/nixos/tests/tmate-ssh-server.nix +++ b/nixos/tests/tmate-ssh-server.nix @@ -1,85 +1,83 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - inherit (import ./ssh-keys.nix pkgs) - snakeOilPrivateKey - snakeOilPublicKey - ; +{ pkgs, lib, ... }: +let + inherit (import ./ssh-keys.nix pkgs) + snakeOilPrivateKey + snakeOilPublicKey + ; - setUpPrivateKey = name: '' - ${name}.succeed( - "mkdir -p /root/.ssh", - "chmod 700 /root/.ssh", - "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil", - "chmod 600 /root/.ssh/id_snakeoil", - ) - ${name}.wait_for_file("/root/.ssh/id_snakeoil") - ''; + setUpPrivateKey = name: '' + ${name}.succeed( + "mkdir -p /root/.ssh", + "chmod 700 /root/.ssh", + "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil", + "chmod 600 /root/.ssh/id_snakeoil", + ) + ${name}.wait_for_file("/root/.ssh/id_snakeoil") + ''; - sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil"; + sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil"; - in - { - name = "tmate-ssh-server"; - nodes = { - server = - { ... }: - { - services.tmate-ssh-server = { - enable = true; - port = 2223; - openFirewall = true; - }; +in +{ + name = "tmate-ssh-server"; + nodes = { + server = + { ... }: + { + services.tmate-ssh-server = { + enable = true; + port = 2223; + openFirewall = true; }; - client = - { ... }: - { - environment.systemPackages = [ pkgs.tmate ]; - services.openssh.enable = true; - users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; - }; - client2 = - { ... }: - { - environment.systemPackages = [ pkgs.openssh ]; - }; - }; - testScript = '' - start_all() + }; + client = + { ... }: + { + environment.systemPackages = [ pkgs.tmate ]; + services.openssh.enable = true; + users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; + }; + client2 = + { ... }: + { + environment.systemPackages = [ pkgs.openssh ]; + }; + }; + testScript = '' + start_all() - server.wait_for_unit("tmate-ssh-server.service") - server.wait_for_open_port(2223) - server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_ed25519_key.pub") - server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_rsa_key.pub") - server.succeed("tmate-client-config > /tmp/tmate.conf") - server.wait_for_file("/tmp/tmate.conf") + server.wait_for_unit("tmate-ssh-server.service") + server.wait_for_open_port(2223) + server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_ed25519_key.pub") + server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_rsa_key.pub") + server.succeed("tmate-client-config > /tmp/tmate.conf") + server.wait_for_file("/tmp/tmate.conf") - ${setUpPrivateKey "server"} - client.wait_for_unit("sshd.service") - client.wait_for_open_port(22) - server.succeed("scp ${sshOpts} /tmp/tmate.conf client:/tmp/tmate.conf") + ${setUpPrivateKey "server"} + client.wait_for_unit("sshd.service") + client.wait_for_open_port(22) + server.succeed("scp ${sshOpts} /tmp/tmate.conf client:/tmp/tmate.conf") - client.wait_for_file("/tmp/tmate.conf") - client.wait_until_tty_matches("1", "login:") - client.send_chars("root\n") - client.sleep(2) - client.send_chars("tmate -f /tmp/tmate.conf\n") - client.sleep(2) - client.send_chars("q") - client.sleep(2) - client.send_chars("tmate display -p '#{tmate_ssh}' > /tmp/ssh_command\n") - client.wait_for_file("/tmp/ssh_command") - ssh_cmd = client.succeed("cat /tmp/ssh_command") + client.wait_for_file("/tmp/tmate.conf") + client.wait_until_tty_matches("1", "login:") + client.send_chars("root\n") + client.sleep(2) + client.send_chars("tmate -f /tmp/tmate.conf\n") + client.sleep(2) + client.send_chars("q") + client.sleep(2) + client.send_chars("tmate display -p '#{tmate_ssh}' > /tmp/ssh_command\n") + client.wait_for_file("/tmp/ssh_command") + ssh_cmd = client.succeed("cat /tmp/ssh_command") - client2.succeed("mkdir -p ~/.ssh; ssh-keyscan -4 -p 2223 server > ~/.ssh/known_hosts") - client2.wait_until_tty_matches("1", "login:") - client2.send_chars("root\n") - client2.sleep(2) - client2.send_chars(ssh_cmd.strip() + "\n") - client2.sleep(2) - client2.send_chars("touch /tmp/client_2\n") + client2.succeed("mkdir -p ~/.ssh; ssh-keyscan -4 -p 2223 server > ~/.ssh/known_hosts") + client2.wait_until_tty_matches("1", "login:") + client2.send_chars("root\n") + client2.sleep(2) + client2.send_chars(ssh_cmd.strip() + "\n") + client2.sleep(2) + client2.send_chars("touch /tmp/client_2\n") - client.wait_for_file("/tmp/client_2") - ''; - } -) + client.wait_for_file("/tmp/client_2") + ''; +} diff --git a/nixos/tests/tomcat.nix b/nixos/tests/tomcat.nix index 6d8b9f496454..a025f54b8035 100644 --- a/nixos/tests/tomcat.nix +++ b/nixos/tests/tomcat.nix @@ -1,33 +1,31 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "tomcat"; - meta.maintainers = [ lib.maintainers.anthonyroussel ]; +{ lib, pkgs, ... }: +{ + name = "tomcat"; + meta.maintainers = [ lib.maintainers.anthonyroussel ]; - nodes.machine = - { pkgs, ... }: - { - services.tomcat = { - enable = true; - port = 8001; - axis2.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.tomcat = { + enable = true; + port = 8001; + axis2.enable = true; }; + }; - testScript = '' - machine.wait_for_unit("tomcat.service") - machine.wait_for_open_port(8001) - machine.wait_for_file("/var/tomcat/webapps/examples"); + testScript = '' + machine.wait_for_unit("tomcat.service") + machine.wait_for_open_port(8001) + machine.wait_for_file("/var/tomcat/webapps/examples"); - machine.succeed( - "curl -sS --fail http://localhost:8001/examples/servlets/servlet/HelloWorldExample | grep 'Hello World!'" - ) - machine.succeed( - "curl -sS --fail http://localhost:8001/examples/jsp/jsp2/simpletag/hello.jsp | grep 'Hello, world!'" - ) - machine.succeed( - "curl -sS --fail http://localhost:8001/axis2/axis2-web/HappyAxis.jsp | grep 'Found Axis2'" - ) - ''; - } -) + machine.succeed( + "curl -sS --fail http://localhost:8001/examples/servlets/servlet/HelloWorldExample | grep 'Hello World!'" + ) + machine.succeed( + "curl -sS --fail http://localhost:8001/examples/jsp/jsp2/simpletag/hello.jsp | grep 'Hello, world!'" + ) + machine.succeed( + "curl -sS --fail http://localhost:8001/axis2/axis2-web/HappyAxis.jsp | grep 'Found Axis2'" + ) + ''; +} diff --git a/nixos/tests/tor.nix b/nixos/tests/tor.nix index 6eff3c7e260a..ce35a38c7e0b 100644 --- a/nixos/tests/tor.nix +++ b/nixos/tests/tor.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "tor"; - meta.maintainers = with lib.maintainers; [ joachifm ]; +{ lib, ... }: +{ + name = "tor"; + meta.maintainers = with lib.maintainers; [ joachifm ]; - nodes.client = - { pkgs, ... }: - { - boot.kernelParams = [ - "audit=0" - "apparmor=0" - "quiet" - ]; - networking.firewall.enable = false; - networking.useDHCP = false; + nodes.client = + { pkgs, ... }: + { + boot.kernelParams = [ + "audit=0" + "apparmor=0" + "quiet" + ]; + networking.firewall.enable = false; + networking.useDHCP = false; - environment.systemPackages = [ pkgs.netcat ]; - services.tor.enable = true; - services.tor.client.enable = true; - services.tor.settings.ControlPort = 9051; - }; + environment.systemPackages = [ pkgs.netcat ]; + services.tor.enable = true; + services.tor.client.enable = true; + services.tor.settings.ControlPort = 9051; + }; - testScript = '' - client.wait_for_unit("tor.service") - client.wait_for_open_port(9051) - assert "514 Authentication required." in client.succeed( - "echo GETINFO version | nc 127.0.0.1 9051" - ) - ''; - } -) + testScript = '' + client.wait_for_unit("tor.service") + client.wait_for_open_port(9051) + assert "514 Authentication required." in client.succeed( + "echo GETINFO version | nc 127.0.0.1 9051" + ) + ''; +} diff --git a/nixos/tests/trafficserver.nix b/nixos/tests/trafficserver.nix index 9a64534e4357..e8a6fdb1f8fb 100644 --- a/nixos/tests/trafficserver.nix +++ b/nixos/tests/trafficserver.nix @@ -19,188 +19,186 @@ # - bin/traffic_logcat # - bin/traffic_logstats # - bin/tspush -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "trafficserver"; - meta = with pkgs.lib.maintainers; { - maintainers = [ midchildan ]; - }; +{ pkgs, ... }: +{ + name = "trafficserver"; + meta = with pkgs.lib.maintainers; { + maintainers = [ midchildan ]; + }; - nodes = { - ats = - { - pkgs, - lib, - config, - ... - }: - let - user = config.users.users.trafficserver.name; - group = config.users.groups.trafficserver.name; - healthchecks = pkgs.writeText "healthchecks.conf" '' - /status /tmp/ats.status text/plain 200 500 - ''; - in - { - services.trafficserver.enable = true; - - services.trafficserver.records = { - proxy.config.http.server_ports = "80 80:ipv6"; - proxy.config.hostdb.host_file.path = "/etc/hosts"; - proxy.config.log.max_space_mb_headroom = 0; - proxy.config.http.push_method_enabled = 1; - - # check that cache storage is usable before accepting traffic - proxy.config.http.wait_for_cache = 2; - }; - - services.trafficserver.plugins = [ - { - path = "healthchecks.so"; - arg = toString healthchecks; - } - { path = "xdebug.so"; } - ]; - - services.trafficserver.remap = '' - map http://httpbin.test http://httpbin - map http://pristine-host-hdr.test http://httpbin \ - @plugin=conf_remap.so \ - @pparam=proxy.config.url_remap.pristine_host_hdr=1 - map http://ats/tspush http://httpbin/cache \ - @plugin=conf_remap.so \ - @pparam=proxy.config.http.cache.required_headers=0 - ''; - - services.trafficserver.storage = '' - /dev/vdb volume=1 - ''; - - networking.firewall.allowedTCPPorts = [ 80 ]; - virtualisation.emptyDiskImages = [ 256 ]; - services.udev.extraRules = '' - KERNEL=="vdb", OWNER="${user}", GROUP="${group}" - ''; - }; - - httpbin = - { pkgs, lib, ... }: - let - python = pkgs.python3.withPackages ( - ps: with ps; [ - httpbin - gunicorn - gevent - ] - ); - in - { - systemd.services.httpbin = { - enable = true; - after = [ "network.target" ]; - wantedBy = [ "multi-user.target" ]; - serviceConfig = { - ExecStart = "${python}/bin/gunicorn -b 0.0.0.0:80 httpbin:app -k gevent"; - }; - }; - - networking.firewall.allowedTCPPorts = [ 80 ]; - }; - - client = - { pkgs, lib, ... }: - { - environment.systemPackages = with pkgs; [ curl ]; - }; - }; - - testScript = - { nodes, ... }: + nodes = { + ats = + { + pkgs, + lib, + config, + ... + }: let - sampleFile = pkgs.writeText "sample.txt" '' - It's the season of White Album. + user = config.users.users.trafficserver.name; + group = config.users.groups.trafficserver.name; + healthchecks = pkgs.writeText "healthchecks.conf" '' + /status /tmp/ats.status text/plain 200 500 ''; in - '' - import json - import re + { + services.trafficserver.enable = true; - ats.wait_for_unit("trafficserver") - ats.wait_for_open_port(80) - httpbin.wait_for_unit("httpbin") - httpbin.wait_for_open_port(80) - client.systemctl("start network-online.target") - client.wait_for_unit("network-online.target") + services.trafficserver.records = { + proxy.config.http.server_ports = "80 80:ipv6"; + proxy.config.hostdb.host_file.path = "/etc/hosts"; + proxy.config.log.max_space_mb_headroom = 0; + proxy.config.http.push_method_enabled = 1; - with subtest("Traffic Server is running"): - out = ats.succeed("traffic_ctl server status") - assert out.strip() == "Proxy -- on" + # check that cache storage is usable before accepting traffic + proxy.config.http.wait_for_cache = 2; + }; - with subtest("traffic_crashlog is running"): - ats.succeed("pgrep -f traffic_crashlog") + services.trafficserver.plugins = [ + { + path = "healthchecks.so"; + arg = toString healthchecks; + } + { path = "xdebug.so"; } + ]; - with subtest("basic remapping works"): - out = client.succeed("curl -vv -H 'Host: httpbin.test' http://ats/headers") - assert json.loads(out)["headers"]["Host"] == "httpbin" + services.trafficserver.remap = '' + map http://httpbin.test http://httpbin + map http://pristine-host-hdr.test http://httpbin \ + @plugin=conf_remap.so \ + @pparam=proxy.config.url_remap.pristine_host_hdr=1 + map http://ats/tspush http://httpbin/cache \ + @plugin=conf_remap.so \ + @pparam=proxy.config.http.cache.required_headers=0 + ''; - with subtest("conf_remap plugin works"): - out = client.succeed( - "curl -vv -H 'Host: pristine-host-hdr.test' http://ats/headers" - ) - assert json.loads(out)["headers"]["Host"] == "pristine-host-hdr.test" + services.trafficserver.storage = '' + /dev/vdb volume=1 + ''; - with subtest("caching works"): - out = client.succeed( - "curl -vv -D - -H 'Host: httpbin.test' -H 'X-Debug: X-Cache' http://ats/cache/60 -o /dev/null" - ) - assert "X-Cache: miss" in out + networking.firewall.allowedTCPPorts = [ 80 ]; + virtualisation.emptyDiskImages = [ 256 ]; + services.udev.extraRules = '' + KERNEL=="vdb", OWNER="${user}", GROUP="${group}" + ''; + }; - out = client.succeed( - "curl -vv -D - -H 'Host: httpbin.test' -H 'X-Debug: X-Cache' http://ats/cache/60 -o /dev/null" - ) - assert "X-Cache: hit-fresh" in out + httpbin = + { pkgs, lib, ... }: + let + python = pkgs.python3.withPackages ( + ps: with ps; [ + httpbin + gunicorn + gevent + ] + ); + in + { + systemd.services.httpbin = { + enable = true; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + ExecStart = "${python}/bin/gunicorn -b 0.0.0.0:80 httpbin:app -k gevent"; + }; + }; - with subtest("pushing to cache works"): - url = "http://ats/tspush" + networking.firewall.allowedTCPPorts = [ 80 ]; + }; - ats.succeed(f"echo {url} > /tmp/urls.txt") - out = ats.succeed( - f"tspush -f '${sampleFile}' -u {url}" - ) - assert "HTTP/1.0 201 Created" in out, "cache push failed" + client = + { pkgs, lib, ... }: + { + environment.systemPackages = with pkgs; [ curl ]; + }; + }; - out = ats.succeed( - "traffic_cache_tool --spans /etc/trafficserver/storage.config find --input /tmp/urls.txt" - ) - assert "Span: /dev/vdb" in out, "cache not stored on disk" - - out = client.succeed(f"curl {url}").strip() - expected = ( - open("${sampleFile}").read().strip() - ) - assert out == expected, "cache content mismatch" - - with subtest("healthcheck plugin works"): - out = client.succeed("curl -vv http://ats/status -o /dev/null -w '%{http_code}'") - assert out.strip() == "500" - - ats.succeed("touch /tmp/ats.status") - - out = client.succeed("curl -vv http://ats/status -o /dev/null -w '%{http_code}'") - assert out.strip() == "200" - - with subtest("logging works"): - access_log_path = "/var/log/trafficserver/squid.blog" - ats.wait_for_file(access_log_path) - - out = ats.succeed(f"traffic_logcat {access_log_path}").split("\n")[0] - expected = "^\S+ \S+ \S+ TCP_MISS/200 \S+ GET http://httpbin/headers - DIRECT/httpbin application/json$" - assert re.fullmatch(expected, out) is not None, "no matching logs" - - out = json.loads(ats.succeed(f"traffic_logstats -jf {access_log_path}")) - assert isinstance(out, dict) - assert out["total"]["error.total"]["req"] == "0", "unexpected log stat" + testScript = + { nodes, ... }: + let + sampleFile = pkgs.writeText "sample.txt" '' + It's the season of White Album. ''; - } -) + in + '' + import json + import re + + ats.wait_for_unit("trafficserver") + ats.wait_for_open_port(80) + httpbin.wait_for_unit("httpbin") + httpbin.wait_for_open_port(80) + client.systemctl("start network-online.target") + client.wait_for_unit("network-online.target") + + with subtest("Traffic Server is running"): + out = ats.succeed("traffic_ctl server status") + assert out.strip() == "Proxy -- on" + + with subtest("traffic_crashlog is running"): + ats.succeed("pgrep -f traffic_crashlog") + + with subtest("basic remapping works"): + out = client.succeed("curl -vv -H 'Host: httpbin.test' http://ats/headers") + assert json.loads(out)["headers"]["Host"] == "httpbin" + + with subtest("conf_remap plugin works"): + out = client.succeed( + "curl -vv -H 'Host: pristine-host-hdr.test' http://ats/headers" + ) + assert json.loads(out)["headers"]["Host"] == "pristine-host-hdr.test" + + with subtest("caching works"): + out = client.succeed( + "curl -vv -D - -H 'Host: httpbin.test' -H 'X-Debug: X-Cache' http://ats/cache/60 -o /dev/null" + ) + assert "X-Cache: miss" in out + + out = client.succeed( + "curl -vv -D - -H 'Host: httpbin.test' -H 'X-Debug: X-Cache' http://ats/cache/60 -o /dev/null" + ) + assert "X-Cache: hit-fresh" in out + + with subtest("pushing to cache works"): + url = "http://ats/tspush" + + ats.succeed(f"echo {url} > /tmp/urls.txt") + out = ats.succeed( + f"tspush -f '${sampleFile}' -u {url}" + ) + assert "HTTP/1.0 201 Created" in out, "cache push failed" + + out = ats.succeed( + "traffic_cache_tool --spans /etc/trafficserver/storage.config find --input /tmp/urls.txt" + ) + assert "Span: /dev/vdb" in out, "cache not stored on disk" + + out = client.succeed(f"curl {url}").strip() + expected = ( + open("${sampleFile}").read().strip() + ) + assert out == expected, "cache content mismatch" + + with subtest("healthcheck plugin works"): + out = client.succeed("curl -vv http://ats/status -o /dev/null -w '%{http_code}'") + assert out.strip() == "500" + + ats.succeed("touch /tmp/ats.status") + + out = client.succeed("curl -vv http://ats/status -o /dev/null -w '%{http_code}'") + assert out.strip() == "200" + + with subtest("logging works"): + access_log_path = "/var/log/trafficserver/squid.blog" + ats.wait_for_file(access_log_path) + + out = ats.succeed(f"traffic_logcat {access_log_path}").split("\n")[0] + expected = "^\S+ \S+ \S+ TCP_MISS/200 \S+ GET http://httpbin/headers - DIRECT/httpbin application/json$" + assert re.fullmatch(expected, out) is not None, "no matching logs" + + out = json.loads(ats.succeed(f"traffic_logstats -jf {access_log_path}")) + assert isinstance(out, dict) + assert out["total"]["error.total"]["req"] == "0", "unexpected log stat" + ''; +} diff --git a/nixos/tests/transfer-sh.nix b/nixos/tests/transfer-sh.nix index 32750b1fdc25..dc628db33f1e 100644 --- a/nixos/tests/transfer-sh.nix +++ b/nixos/tests/transfer-sh.nix @@ -1,25 +1,23 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "transfer-sh"; +{ pkgs, lib, ... }: +{ + name = "transfer-sh"; - meta = { - maintainers = with lib.maintainers; [ ocfox ]; + meta = { + maintainers = with lib.maintainers; [ ocfox ]; + }; + + nodes.machine = + { pkgs, ... }: + { + services.transfer-sh = { + enable = true; + settings.LISTENER = ":1234"; + }; }; - nodes.machine = - { pkgs, ... }: - { - services.transfer-sh = { - enable = true; - settings.LISTENER = ":1234"; - }; - }; - - testScript = '' - machine.wait_for_unit("transfer-sh.service") - machine.wait_for_open_port(1234) - machine.succeed("curl --fail http://localhost:1234/") - ''; - } -) + testScript = '' + machine.wait_for_unit("transfer-sh.service") + machine.wait_for_open_port(1234) + machine.succeed("curl --fail http://localhost:1234/") + ''; +} diff --git a/nixos/tests/trezord.nix b/nixos/tests/trezord.nix index d8b85d99f09c..4955ecb9c714 100644 --- a/nixos/tests/trezord.nix +++ b/nixos/tests/trezord.nix @@ -1,27 +1,25 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "trezord"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ - mmahut - _1000101 - ]; - }; - nodes = { - machine = - { ... }: - { - services.trezord.enable = true; - services.trezord.emulator.enable = true; - }; - }; +{ pkgs, ... }: +{ + name = "trezord"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ + mmahut + _1000101 + ]; + }; + nodes = { + machine = + { ... }: + { + services.trezord.enable = true; + services.trezord.emulator.enable = true; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("trezord.service") - machine.wait_for_open_port(21325) - machine.wait_until_succeeds("curl -fL http://localhost:21325/status/ | grep Version") - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("trezord.service") + machine.wait_for_open_port(21325) + machine.wait_until_succeeds("curl -fL http://localhost:21325/status/ | grep Version") + ''; +} diff --git a/nixos/tests/trickster.nix b/nixos/tests/trickster.nix index 97ee1855496e..e987341bc7de 100644 --- a/nixos/tests/trickster.nix +++ b/nixos/tests/trickster.nix @@ -1,44 +1,42 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "trickster"; - meta = with pkgs.lib; { - maintainers = with maintainers; [ _1000101 ]; - }; +{ pkgs, ... }: +{ + name = "trickster"; + meta = with pkgs.lib; { + maintainers = with maintainers; [ _1000101 ]; + }; - nodes = { - prometheus = - { ... }: - { - services.prometheus.enable = true; - networking.firewall.allowedTCPPorts = [ 9090 ]; - }; - trickster = - { ... }: - { - services.trickster.enable = true; - }; - }; + nodes = { + prometheus = + { ... }: + { + services.prometheus.enable = true; + networking.firewall.allowedTCPPorts = [ 9090 ]; + }; + trickster = + { ... }: + { + services.trickster.enable = true; + }; + }; - testScript = '' - start_all() - prometheus.wait_for_unit("prometheus.service") - prometheus.wait_for_open_port(9090) - prometheus.wait_until_succeeds( - "curl -fL http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" - ) - trickster.wait_for_unit("trickster.service") - trickster.wait_for_open_port(8082) - trickster.wait_for_open_port(9090) - trickster.wait_until_succeeds( - "curl -fL http://localhost:8082/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" - ) - trickster.wait_until_succeeds( - "curl -fL http://prometheus:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" - ) - trickster.wait_until_succeeds( - "curl -fL http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" - ) - ''; - } -) + testScript = '' + start_all() + prometheus.wait_for_unit("prometheus.service") + prometheus.wait_for_open_port(9090) + prometheus.wait_until_succeeds( + "curl -fL http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" + ) + trickster.wait_for_unit("trickster.service") + trickster.wait_for_open_port(8082) + trickster.wait_for_open_port(9090) + trickster.wait_until_succeeds( + "curl -fL http://localhost:8082/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" + ) + trickster.wait_until_succeeds( + "curl -fL http://prometheus:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" + ) + trickster.wait_until_succeeds( + "curl -fL http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'" + ) + ''; +} diff --git a/nixos/tests/tsm-client-gui.nix b/nixos/tests/tsm-client-gui.nix index edae66f67028..7e3dbb6f116a 100644 --- a/nixos/tests/tsm-client-gui.nix +++ b/nixos/tests/tsm-client-gui.nix @@ -5,58 +5,56 @@ # to show its main application window # and verifies some configuration information. -import ./make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "tsm-client"; +{ lib, pkgs, ... }: +{ + name = "tsm-client"; - enableOCR = true; + enableOCR = true; - nodes.machine = - { pkgs, ... }: - { - imports = [ ./common/x11.nix ]; - programs.tsmClient = { - enable = true; - package = pkgs.tsm-client-withGui; - defaultServername = "testserver"; - servers.testserver = { - # 192.0.0.8 is a "dummy address" according to RFC 7600 - tcpserveraddress = "192.0.0.8"; - nodename = "SOME-NODE"; - passworddir = "/tmp"; - }; + nodes.machine = + { pkgs, ... }: + { + imports = [ ./common/x11.nix ]; + programs.tsmClient = { + enable = true; + package = pkgs.tsm-client-withGui; + defaultServername = "testserver"; + servers.testserver = { + # 192.0.0.8 is a "dummy address" according to RFC 7600 + tcpserveraddress = "192.0.0.8"; + nodename = "SOME-NODE"; + passworddir = "/tmp"; }; }; + }; - testScript = '' - machine.succeed("which dsmj") # fail early if this is missing - machine.wait_for_x() - machine.execute("DSM_LOG=/tmp dsmj -optfile=/dev/null >&2 &") + testScript = '' + machine.succeed("which dsmj") # fail early if this is missing + machine.wait_for_x() + machine.execute("DSM_LOG=/tmp dsmj -optfile=/dev/null >&2 &") - # does it report the "TCP/IP connection failure" error code? - machine.wait_for_window("IBM Storage Protect") - machine.wait_for_text("ANS2610S") - machine.send_key("esc") + # does it report the "TCP/IP connection failure" error code? + machine.wait_for_window("IBM Storage Protect") + machine.wait_for_text("ANS2610S") + machine.send_key("esc") - # it asks to continue to restore a local backupset now; - # "yes" (return) leads to the main application window - machine.wait_for_text("backupset") - machine.send_key("ret") + # it asks to continue to restore a local backupset now; + # "yes" (return) leads to the main application window + machine.wait_for_text("backupset") + machine.send_key("ret") - # main window: navigate to "Connection Information" - machine.wait_for_text("Welcome") - machine.send_key("alt-f") # "File" menu - machine.send_key("c") # "Connection Information" + # main window: navigate to "Connection Information" + machine.wait_for_text("Welcome") + machine.send_key("alt-f") # "File" menu + machine.send_key("c") # "Connection Information" - # "Connection Information" dialog box - machine.wait_for_window("Connection Information") - machine.wait_for_text("SOME-NODE") - machine.wait_for_text("${pkgs.tsm-client.passthru.unwrapped.version}") + # "Connection Information" dialog box + machine.wait_for_window("Connection Information") + machine.wait_for_text("SOME-NODE") + machine.wait_for_text("${pkgs.tsm-client.passthru.unwrapped.version}") - machine.shutdown() - ''; + machine.shutdown() + ''; - meta.maintainers = [ lib.maintainers.yarny ]; - } -) + meta.maintainers = [ lib.maintainers.yarny ]; +} diff --git a/nixos/tests/tuptime.nix b/nixos/tests/tuptime.nix index a879781c55c0..b5bde1280e89 100644 --- a/nixos/tests/tuptime.nix +++ b/nixos/tests/tuptime.nix @@ -1,32 +1,30 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "tuptime"; - meta = with pkgs.lib.maintainers; { - maintainers = [ evils ]; +{ pkgs, ... }: +{ + name = "tuptime"; + meta = with pkgs.lib.maintainers; { + maintainers = [ evils ]; + }; + + nodes.machine = + { pkgs, ... }: + { + imports = [ ../modules/profiles/minimal.nix ]; + services.tuptime.enable = true; }; - nodes.machine = - { pkgs, ... }: - { - imports = [ ../modules/profiles/minimal.nix ]; - services.tuptime.enable = true; - }; + testScript = '' + # see if it starts + start_all() + machine.wait_for_unit("multi-user.target") + machine.succeed("tuptime | grep 'System startups:[[:blank:]]*1'") + machine.succeed("tuptime | grep 'System uptime:[[:blank:]]*100.0%'") + machine.shutdown() - testScript = '' - # see if it starts - start_all() - machine.wait_for_unit("multi-user.target") - machine.succeed("tuptime | grep 'System startups:[[:blank:]]*1'") - machine.succeed("tuptime | grep 'System uptime:[[:blank:]]*100.0%'") - machine.shutdown() - - # restart machine and see if it correctly reports the reboot - machine.start() - machine.wait_for_unit("multi-user.target") - machine.succeed("tuptime | grep 'System startups:[[:blank:]]*2'") - machine.succeed("tuptime | grep 'System shutdowns:[[:blank:]]*1 ok'") - machine.shutdown() - ''; - } -) + # restart machine and see if it correctly reports the reboot + machine.start() + machine.wait_for_unit("multi-user.target") + machine.succeed("tuptime | grep 'System startups:[[:blank:]]*2'") + machine.succeed("tuptime | grep 'System shutdowns:[[:blank:]]*1 ok'") + machine.shutdown() + ''; +} diff --git a/nixos/tests/turbovnc-headless-server.nix b/nixos/tests/turbovnc-headless-server.nix index 6b3ade7179ff..2c670fc60ee7 100644 --- a/nixos/tests/turbovnc-headless-server.nix +++ b/nixos/tests/turbovnc-headless-server.nix @@ -1,157 +1,155 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "turbovnc-headless-server"; - meta = { - maintainers = with lib.maintainers; [ nh2 ]; - }; +{ pkgs, lib, ... }: +{ + name = "turbovnc-headless-server"; + meta = { + maintainers = with lib.maintainers; [ nh2 ]; + }; - nodes.machine = - { pkgs, ... }: - { + nodes.machine = + { pkgs, ... }: + { - environment.systemPackages = with pkgs; [ - mesa-demos - procps # for `pkill`, `pidof` in the test - scrot # for screenshotting Xorg - turbovnc + environment.systemPackages = with pkgs; [ + mesa-demos + procps # for `pkill`, `pidof` in the test + scrot # for screenshotting Xorg + turbovnc + ]; + + programs.turbovnc.ensureHeadlessSoftwareOpenGL = true; + + networking.firewall = { + # Reject instead of drop, for failures instead of hangs. + rejectPackets = true; + allowedTCPPorts = [ + 5900 # VNC :0, for seeing what's going on in the server ]; - - programs.turbovnc.ensureHeadlessSoftwareOpenGL = true; - - networking.firewall = { - # Reject instead of drop, for failures instead of hangs. - rejectPackets = true; - allowedTCPPorts = [ - 5900 # VNC :0, for seeing what's going on in the server - ]; - }; - - # So that we can ssh into the VM, see e.g. - # https://nixos.org/manual/nixos/stable/#sec-nixos-test-port-forwarding - services.openssh.enable = true; - users.mutableUsers = false; - # `test-instrumentation.nix` already sets an empty root password. - # The following have to all be set to allow an empty SSH login password. - services.openssh.settings.PermitRootLogin = "yes"; - services.openssh.settings.PermitEmptyPasswords = "yes"; - security.pam.services.sshd.allowNullPassword = true; # the default `UsePam yes` makes this necessary }; - testScript = '' - def wait_until_terminated_or_succeeds( - termination_check_shell_command, - success_check_shell_command, - get_detail_message_fn, - retries=60, - retry_sleep=0.5, - ): - def check_success(): - command_exit_code, _output = machine.execute(success_check_shell_command) - return command_exit_code == 0 + # So that we can ssh into the VM, see e.g. + # https://nixos.org/manual/nixos/stable/#sec-nixos-test-port-forwarding + services.openssh.enable = true; + users.mutableUsers = false; + # `test-instrumentation.nix` already sets an empty root password. + # The following have to all be set to allow an empty SSH login password. + services.openssh.settings.PermitRootLogin = "yes"; + services.openssh.settings.PermitEmptyPasswords = "yes"; + security.pam.services.sshd.allowNullPassword = true; # the default `UsePam yes` makes this necessary + }; - for _ in range(retries): - exit_check_exit_code, _output = machine.execute(termination_check_shell_command) - is_terminated = exit_check_exit_code != 0 - if is_terminated: - if check_success(): - return - else: - details = get_detail_message_fn() - raise Exception( - f"termination check ({termination_check_shell_command}) triggered without command succeeding ({success_check_shell_command}); details: {details}" - ) - else: - if check_success(): - return - import time - time.sleep(retry_sleep) + testScript = '' + def wait_until_terminated_or_succeeds( + termination_check_shell_command, + success_check_shell_command, + get_detail_message_fn, + retries=60, + retry_sleep=0.5, + ): + def check_success(): + command_exit_code, _output = machine.execute(success_check_shell_command) + return command_exit_code == 0 - if not check_success(): - details = get_detail_message_fn() - raise Exception( - f"action timed out ({success_check_shell_command}); details: {details}" - ) + for _ in range(retries): + exit_check_exit_code, _output = machine.execute(termination_check_shell_command) + is_terminated = exit_check_exit_code != 0 + if is_terminated: + if check_success(): + return + else: + details = get_detail_message_fn() + raise Exception( + f"termination check ({termination_check_shell_command}) triggered without command succeeding ({success_check_shell_command}); details: {details}" + ) + else: + if check_success(): + return + import time + time.sleep(retry_sleep) + + if not check_success(): + details = get_detail_message_fn() + raise Exception( + f"action timed out ({success_check_shell_command}); details: {details}" + ) - # Below we use the pattern: - # (cmd | tee stdout.log) 3>&1 1>&2 2>&3 | tee stderr.log - # to capture both stderr and stdout while also teeing them, see: - # https://unix.stackexchange.com/questions/6430/how-to-redirect-stderr-and-stdout-to-different-files-and-also-display-in-termina/6431#6431 + # Below we use the pattern: + # (cmd | tee stdout.log) 3>&1 1>&2 2>&3 | tee stderr.log + # to capture both stderr and stdout while also teeing them, see: + # https://unix.stackexchange.com/questions/6430/how-to-redirect-stderr-and-stdout-to-different-files-and-also-display-in-termina/6431#6431 - # Starts headless VNC server, backgrounding it. - def start_xvnc(): - xvnc_command = " ".join( - [ - "Xvnc", - ":0", - "-iglx", - "-auth /root/.Xauthority", - "-geometry 1240x900", - "-depth 24", - "-rfbwait 5000", - "-deferupdate 1", - "-verbose", - "-securitytypes none", - # We don't enforce localhost listening such that we - # can connect from outside the VM using - # env QEMU_NET_OPTS=hostfwd=tcp::5900-:5900 $(nix-build nixos/tests/turbovnc-headless-server.nix -A driver)/bin/nixos-test-driver - # for testing purposes, and so that we can in the future - # add another test case that connects the TurboVNC client. - # "-localhost", - ] - ) - machine.execute( - # Note trailing & for backgrounding. - f"({xvnc_command} | tee /tmp/Xvnc.stdout) 3>&1 1>&2 2>&3 | tee /tmp/Xvnc.stderr >&2 &", - ) + # Starts headless VNC server, backgrounding it. + def start_xvnc(): + xvnc_command = " ".join( + [ + "Xvnc", + ":0", + "-iglx", + "-auth /root/.Xauthority", + "-geometry 1240x900", + "-depth 24", + "-rfbwait 5000", + "-deferupdate 1", + "-verbose", + "-securitytypes none", + # We don't enforce localhost listening such that we + # can connect from outside the VM using + # env QEMU_NET_OPTS=hostfwd=tcp::5900-:5900 $(nix-build nixos/tests/turbovnc-headless-server.nix -A driver)/bin/nixos-test-driver + # for testing purposes, and so that we can in the future + # add another test case that connects the TurboVNC client. + # "-localhost", + ] + ) + machine.execute( + # Note trailing & for backgrounding. + f"({xvnc_command} | tee /tmp/Xvnc.stdout) 3>&1 1>&2 2>&3 | tee /tmp/Xvnc.stderr >&2 &", + ) - # Waits until the server log message that tells us that GLX is ready - # (requires `-verbose` above), avoiding screenshoting racing below. - def wait_until_xvnc_glx_ready(): - machine.wait_until_succeeds("test -f /tmp/Xvnc.stderr") - wait_until_terminated_or_succeeds( - termination_check_shell_command="pidof Xvnc", - success_check_shell_command="grep 'GLX: Initialized DRISWRAST' /tmp/Xvnc.stderr", - get_detail_message_fn=lambda: "Contents of /tmp/Xvnc.stderr:\n" - + machine.succeed("cat /tmp/Xvnc.stderr"), - ) + # Waits until the server log message that tells us that GLX is ready + # (requires `-verbose` above), avoiding screenshoting racing below. + def wait_until_xvnc_glx_ready(): + machine.wait_until_succeeds("test -f /tmp/Xvnc.stderr") + wait_until_terminated_or_succeeds( + termination_check_shell_command="pidof Xvnc", + success_check_shell_command="grep 'GLX: Initialized DRISWRAST' /tmp/Xvnc.stderr", + get_detail_message_fn=lambda: "Contents of /tmp/Xvnc.stderr:\n" + + machine.succeed("cat /tmp/Xvnc.stderr"), + ) - # Starts glxgears, backgrounding it. Waits until it prints the `GL_RENDERER`. - # Does not quit glxgears. - def test_glxgears_prints_renderer(): - machine.execute( - # Note trailing & for backgrounding. - "(env DISPLAY=:0 glxgears -info | tee /tmp/glxgears.stdout) 3>&1 1>&2 2>&3 | tee /tmp/glxgears.stderr >&2 &" - ) - machine.wait_until_succeeds("test -f /tmp/glxgears.stderr") - wait_until_terminated_or_succeeds( - termination_check_shell_command="pidof glxgears", - success_check_shell_command="grep 'GL_RENDERER' /tmp/glxgears.stdout", - get_detail_message_fn=lambda: "Contents of /tmp/glxgears.stderr:\n" - + machine.succeed("cat /tmp/glxgears.stderr"), - ) + # Starts glxgears, backgrounding it. Waits until it prints the `GL_RENDERER`. + # Does not quit glxgears. + def test_glxgears_prints_renderer(): + machine.execute( + # Note trailing & for backgrounding. + "(env DISPLAY=:0 glxgears -info | tee /tmp/glxgears.stdout) 3>&1 1>&2 2>&3 | tee /tmp/glxgears.stderr >&2 &" + ) + machine.wait_until_succeeds("test -f /tmp/glxgears.stderr") + wait_until_terminated_or_succeeds( + termination_check_shell_command="pidof glxgears", + success_check_shell_command="grep 'GL_RENDERER' /tmp/glxgears.stdout", + get_detail_message_fn=lambda: "Contents of /tmp/glxgears.stderr:\n" + + machine.succeed("cat /tmp/glxgears.stderr"), + ) - with subtest("Start Xvnc"): - start_xvnc() - wait_until_xvnc_glx_ready() + with subtest("Start Xvnc"): + start_xvnc() + wait_until_xvnc_glx_ready() - with subtest("Run 3D application (glxgears)"): - test_glxgears_prints_renderer() + with subtest("Run 3D application (glxgears)"): + test_glxgears_prints_renderer() - # Take screenshot; should display the glxgears. - machine.succeed("scrot --display :0 /tmp/glxgears.png") + # Take screenshot; should display the glxgears. + machine.succeed("scrot --display :0 /tmp/glxgears.png") - # Copy files down. - machine.copy_from_vm("/tmp/glxgears.png") - machine.copy_from_vm("/tmp/glxgears.stdout") - machine.copy_from_vm("/tmp/Xvnc.stdout") - machine.copy_from_vm("/tmp/Xvnc.stderr") - ''; + # Copy files down. + machine.copy_from_vm("/tmp/glxgears.png") + machine.copy_from_vm("/tmp/glxgears.stdout") + machine.copy_from_vm("/tmp/Xvnc.stdout") + machine.copy_from_vm("/tmp/Xvnc.stderr") + ''; - } -) +} diff --git a/nixos/tests/turn-rs.nix b/nixos/tests/turn-rs.nix index 750a141c224a..4404a50f52d9 100644 --- a/nixos/tests/turn-rs.nix +++ b/nixos/tests/turn-rs.nix @@ -1,65 +1,63 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "turn-rs"; +{ pkgs, ... }: +{ + name = "turn-rs"; - nodes = { - server = { - virtualisation.vlans = [ 1 ]; + nodes = { + server = { + virtualisation.vlans = [ 1 ]; - networking = { - useNetworkd = true; - useDHCP = false; - firewall.enable = false; - }; + networking = { + useNetworkd = true; + useDHCP = false; + firewall.enable = false; + }; - systemd.network.networks."01-eth1" = { - name = "eth1"; - networkConfig.Address = "10.0.0.1/24"; - }; + systemd.network.networks."01-eth1" = { + name = "eth1"; + networkConfig.Address = "10.0.0.1/24"; + }; - services.turn-rs = { - enable = true; - secretFile = pkgs.writeText "secret" '' - USER_1_CREDS="foobar" - ''; - settings = { - turn = { - realm = "localhost"; - interfaces = [ - { - transport = "udp"; - bind = "127.0.0.1:3478"; - external = "127.0.0.1:3478"; - } - { - transport = "tcp"; - bind = "127.0.0.1:3478"; - external = "127.0.0.1:3478"; - } - ]; - }; - - auth.static_credentials.user1 = "$USER_1_CREDS"; + services.turn-rs = { + enable = true; + secretFile = pkgs.writeText "secret" '' + USER_1_CREDS="foobar" + ''; + settings = { + turn = { + realm = "localhost"; + interfaces = [ + { + transport = "udp"; + bind = "127.0.0.1:3478"; + external = "127.0.0.1:3478"; + } + { + transport = "tcp"; + bind = "127.0.0.1:3478"; + external = "127.0.0.1:3478"; + } + ]; }; + + auth.static_credentials.user1 = "$USER_1_CREDS"; }; }; }; + }; - testScript = # python - '' - import json + testScript = # python + '' + import json - start_all() - server.wait_for_unit('turn-rs.service') - server.wait_for_open_port(3000, "127.0.0.1") + start_all() + server.wait_for_unit('turn-rs.service') + server.wait_for_open_port(3000, "127.0.0.1") - info = server.succeed('curl http://localhost:3000/info') - jsonInfo = json.loads(info) - assert len(jsonInfo['interfaces']) == 2, f'Interfaces doesn\'t contain two entries:\n{json.dumps(jsonInfo, indent=2)}' + info = server.succeed('curl http://localhost:3000/info') + jsonInfo = json.loads(info) + assert len(jsonInfo['interfaces']) == 2, f'Interfaces doesn\'t contain two entries:\n{json.dumps(jsonInfo, indent=2)}' - config = server.succeed('cat /run/turn-rs/config.toml') - assert 'foobar' in config, f'Secrets are not properly injected:\n{config}' - ''; - } -) + config = server.succeed('cat /run/turn-rs/config.toml') + assert 'foobar' in config, f'Secrets are not properly injected:\n{config}' + ''; +} diff --git a/nixos/tests/txredisapi.nix b/nixos/tests/txredisapi.nix index ecfb79ea13d7..171d3efee78f 100644 --- a/nixos/tests/txredisapi.nix +++ b/nixos/tests/txredisapi.nix @@ -1,40 +1,38 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "txredisapi"; - meta = with pkgs.lib.maintainers; { - maintainers = [ dandellion ]; - }; +{ pkgs, ... }: +{ + name = "txredisapi"; + meta = with pkgs.lib.maintainers; { + maintainers = [ dandellion ]; + }; - nodes = { - machine = - { pkgs, ... }: + nodes = { + machine = + { pkgs, ... }: - { - services.redis.servers."".enable = true; + { + services.redis.servers."".enable = true; - environment.systemPackages = with pkgs; [ - (python3.withPackages (ps: [ - ps.twisted - ps.txredisapi - ps.mock - ])) - ]; - }; - }; + environment.systemPackages = with pkgs; [ + (python3.withPackages (ps: [ + ps.twisted + ps.txredisapi + ps.mock + ])) + ]; + }; + }; - testScript = - { nodes, ... }: - let - inherit (nodes.machine.config.services) redis; - in - '' - start_all() - machine.wait_for_unit("redis") - machine.wait_for_file("${redis.servers."".unixSocket}") - machine.succeed("ln -s ${redis.servers."".unixSocket} /tmp/redis.sock") + testScript = + { nodes, ... }: + let + inherit (nodes.machine.config.services) redis; + in + '' + start_all() + machine.wait_for_unit("redis") + machine.wait_for_file("${redis.servers."".unixSocket}") + machine.succeed("ln -s ${redis.servers."".unixSocket} /tmp/redis.sock") - tests = machine.succeed("PYTHONPATH=\"${pkgs.python3Packages.txredisapi.src}\" python -m twisted.trial ${pkgs.python3Packages.txredisapi.src}/tests") - ''; - } -) + tests = machine.succeed("PYTHONPATH=\"${pkgs.python3Packages.txredisapi.src}\" python -m twisted.trial ${pkgs.python3Packages.txredisapi.src}/tests") + ''; +} diff --git a/nixos/tests/typesense.nix b/nixos/tests/typesense.nix index dbd9e4e38a9f..2d97150bcec5 100644 --- a/nixos/tests/typesense.nix +++ b/nixos/tests/typesense.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - testPort = 8108; - in - { - name = "typesense"; - meta.maintainers = with pkgs.lib.maintainers; [ oddlama ]; +{ pkgs, ... }: +let + testPort = 8108; +in +{ + name = "typesense"; + meta.maintainers = with pkgs.lib.maintainers; [ oddlama ]; - nodes.machine = - { ... }: - { - services.typesense = { - enable = true; - apiKeyFile = pkgs.writeText "typesense-api-key" "dummy"; - settings.server = { - api-port = testPort; - api-address = "0.0.0.0"; - }; + nodes.machine = + { ... }: + { + services.typesense = { + enable = true; + apiKeyFile = pkgs.writeText "typesense-api-key" "dummy"; + settings.server = { + api-port = testPort; + api-address = "0.0.0.0"; }; }; + }; - testScript = '' - machine.wait_for_unit("typesense.service") - machine.wait_for_open_port(${toString testPort}) - # After waiting for the port, typesense still hasn't initialized the database, so wait until we can connect successfully - assert machine.wait_until_succeeds("curl --fail http://localhost:${toString testPort}/health") == '{"ok":true}' - ''; - } -) + testScript = '' + machine.wait_for_unit("typesense.service") + machine.wait_for_open_port(${toString testPort}) + # After waiting for the port, typesense still hasn't initialized the database, so wait until we can connect successfully + assert machine.wait_until_succeeds("curl --fail http://localhost:${toString testPort}/health") == '{"ok":true}' + ''; +} diff --git a/nixos/tests/ucarp.nix b/nixos/tests/ucarp.nix index ec24d49e95a9..385412d60120 100644 --- a/nixos/tests/ucarp.nix +++ b/nixos/tests/ucarp.nix @@ -1,80 +1,78 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: +{ pkgs, lib, ... }: - let - addrShared = "192.168.0.1"; - addrHostA = "192.168.0.10"; - addrHostB = "192.168.0.11"; +let + addrShared = "192.168.0.1"; + addrHostA = "192.168.0.10"; + addrHostB = "192.168.0.11"; - mkUcarpHost = - addr: - { - config, - pkgs, - lib, - ... - }: - { - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = addr; - prefixLength = 24; - } - ]; + mkUcarpHost = + addr: + { + config, + pkgs, + lib, + ... + }: + { + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = addr; + prefixLength = 24; + } + ]; - networking.ucarp = { - enable = true; - interface = "eth1"; - srcIp = addr; - vhId = 1; - passwordFile = "${pkgs.writeText "ucarp-pass" "secure"}"; - addr = addrShared; - upscript = pkgs.writeScript "upscript" '' - #!/bin/sh - ${pkgs.iproute2}/bin/ip addr add "$2"/24 dev "$1" - ''; - downscript = pkgs.writeScript "downscript" '' - #!/bin/sh - ${pkgs.iproute2}/bin/ip addr del "$2"/24 dev "$1" - ''; - }; + networking.ucarp = { + enable = true; + interface = "eth1"; + srcIp = addr; + vhId = 1; + passwordFile = "${pkgs.writeText "ucarp-pass" "secure"}"; + addr = addrShared; + upscript = pkgs.writeScript "upscript" '' + #!/bin/sh + ${pkgs.iproute2}/bin/ip addr add "$2"/24 dev "$1" + ''; + downscript = pkgs.writeScript "downscript" '' + #!/bin/sh + ${pkgs.iproute2}/bin/ip addr del "$2"/24 dev "$1" + ''; }; - in - { - name = "ucarp"; - meta.maintainers = with lib.maintainers; [ oxzi ]; - - nodes = { - hostA = mkUcarpHost addrHostA; - hostB = mkUcarpHost addrHostB; }; +in +{ + name = "ucarp"; + meta.maintainers = with lib.maintainers; [ oxzi ]; - testScript = '' - def is_master(host): - ipOutput = host.succeed("ip addr show dev eth1") - return "inet ${addrShared}/24" in ipOutput + nodes = { + hostA = mkUcarpHost addrHostA; + hostB = mkUcarpHost addrHostB; + }; + + testScript = '' + def is_master(host): + ipOutput = host.succeed("ip addr show dev eth1") + return "inet ${addrShared}/24" in ipOutput - start_all() + start_all() - # First, let both hosts start and let a master node be selected - for host, peer in [(hostA, "${addrHostB}"), (hostB, "${addrHostA}")]: - host.wait_for_unit("ucarp.service") - host.succeed(f"ping -c 1 {peer}") + # First, let both hosts start and let a master node be selected + for host, peer in [(hostA, "${addrHostB}"), (hostB, "${addrHostA}")]: + host.wait_for_unit("ucarp.service") + host.succeed(f"ping -c 1 {peer}") - hostA.sleep(5) + hostA.sleep(5) - hostA_master, hostB_master = is_master(hostA), is_master(hostB) - assert hostA_master != hostB_master, "only one master node is allowed" + hostA_master, hostB_master = is_master(hostA), is_master(hostB) + assert hostA_master != hostB_master, "only one master node is allowed" - master_host = hostA if hostA_master else hostB - backup_host = hostB if hostA_master else hostA + master_host = hostA if hostA_master else hostB + backup_host = hostB if hostA_master else hostA - # Let's crash the master host and let the backup take over - master_host.crash() + # Let's crash the master host and let the backup take over + master_host.crash() - backup_host.sleep(5) - assert is_master(backup_host), "backup did not take over" - ''; - } -) + backup_host.sleep(5) + assert is_master(backup_host), "backup did not take over" + ''; +} diff --git a/nixos/tests/udisks2.nix b/nixos/tests/udisks2.nix index 3800a53ef196..591ad0741e28 100644 --- a/nixos/tests/udisks2.nix +++ b/nixos/tests/udisks2.nix @@ -1,74 +1,72 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let +let - # FIXME: 404s - stick = pkgs.fetchurl { - url = "https://nixos.org/~eelco/nix/udisks-test.img.xz"; - sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b"; + # FIXME: 404s + stick = pkgs.fetchurl { + url = "https://nixos.org/~eelco/nix/udisks-test.img.xz"; + sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b"; + }; + +in + +{ + name = "udisks2"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + + nodes.machine = + { ... }: + { + services.udisks2.enable = true; + imports = [ ./common/user-account.nix ]; + + security.polkit.extraConfig = '' + polkit.addRule(function(action, subject) { + if (subject.user == "alice") return "yes"; + }); + ''; }; - in + testScript = '' + import lzma - { - name = "udisks2"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; + machine.systemctl("start udisks2") + machine.wait_for_unit("udisks2.service") - nodes.machine = - { ... }: - { - services.udisks2.enable = true; - imports = [ ./common/user-account.nix ]; + with lzma.open( + "${stick}" + ) as data, open(machine.state_dir / "usbstick.img", "wb") as stick: + stick.write(data.read()) - security.polkit.extraConfig = '' - polkit.addRule(function(action, subject) { - if (subject.user == "alice") return "yes"; - }); - ''; - }; + machine.succeed("udisksctl info -b /dev/vda >&2") + machine.fail("udisksctl info -b /dev/sda1") - testScript = '' - import lzma + # Attach a USB stick and wait for it to show up. + machine.send_monitor_command( + f"drive_add 0 id=stick,if=none,file={stick.name},format=raw" + ) + machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick") + machine.wait_until_succeeds("udisksctl info -b /dev/sda1") + machine.succeed("udisksctl info -b /dev/sda1 | grep 'IdLabel:.*USBSTICK'") - machine.systemctl("start udisks2") - machine.wait_for_unit("udisks2.service") + # Mount the stick as a non-root user and do some stuff with it. + machine.succeed("su - alice -c 'udisksctl info -b /dev/sda1'") + machine.succeed("su - alice -c 'udisksctl mount -b /dev/sda1'") + machine.succeed( + "su - alice -c 'cat /run/media/alice/USBSTICK/test.txt' | grep -q 'Hello World'" + ) + machine.succeed("su - alice -c 'echo foo > /run/media/alice/USBSTICK/bar.txt'") - with lzma.open( - "${stick}" - ) as data, open(machine.state_dir / "usbstick.img", "wb") as stick: - stick.write(data.read()) + # Unmounting the stick should make the mountpoint disappear. + machine.succeed("su - alice -c 'udisksctl unmount -b /dev/sda1'") + machine.fail("[ -d /run/media/alice/USBSTICK ]") - machine.succeed("udisksctl info -b /dev/vda >&2") - machine.fail("udisksctl info -b /dev/sda1") + # Remove the USB stick. + machine.send_monitor_command("device_del stick") + machine.wait_until_fails("udisksctl info -b /dev/sda1") + machine.fail("[ -e /dev/sda ]") + ''; - # Attach a USB stick and wait for it to show up. - machine.send_monitor_command( - f"drive_add 0 id=stick,if=none,file={stick.name},format=raw" - ) - machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick") - machine.wait_until_succeeds("udisksctl info -b /dev/sda1") - machine.succeed("udisksctl info -b /dev/sda1 | grep 'IdLabel:.*USBSTICK'") - - # Mount the stick as a non-root user and do some stuff with it. - machine.succeed("su - alice -c 'udisksctl info -b /dev/sda1'") - machine.succeed("su - alice -c 'udisksctl mount -b /dev/sda1'") - machine.succeed( - "su - alice -c 'cat /run/media/alice/USBSTICK/test.txt' | grep -q 'Hello World'" - ) - machine.succeed("su - alice -c 'echo foo > /run/media/alice/USBSTICK/bar.txt'") - - # Unmounting the stick should make the mountpoint disappear. - machine.succeed("su - alice -c 'udisksctl unmount -b /dev/sda1'") - machine.fail("[ -d /run/media/alice/USBSTICK ]") - - # Remove the USB stick. - machine.send_monitor_command("device_del stick") - machine.wait_until_fails("udisksctl info -b /dev/sda1") - machine.fail("[ -e /dev/sda ]") - ''; - - } -) +} diff --git a/nixos/tests/ulogd/ulogd.nix b/nixos/tests/ulogd/ulogd.nix index 9146ec44561d..2130a334fea1 100644 --- a/nixos/tests/ulogd/ulogd.nix +++ b/nixos/tests/ulogd/ulogd.nix @@ -1,61 +1,59 @@ -import ../make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "ulogd"; +{ pkgs, lib, ... }: +{ + name = "ulogd"; - meta.maintainers = with lib.maintainers; [ p-h ]; - - nodes.machine = - { ... }: - { - networking.firewall.enable = false; - networking.nftables.enable = true; - networking.nftables.ruleset = '' - table inet filter { - chain input { - type filter hook input priority 0; - icmp type { echo-request, echo-reply } log group 2 accept - } - - chain output { - type filter hook output priority 0; policy accept; - icmp type { echo-request, echo-reply } log group 2 accept - } - - chain forward { - type filter hook forward priority 0; policy drop; - } + meta.maintainers = with lib.maintainers; [ p-h ]; + nodes.machine = + { ... }: + { + networking.firewall.enable = false; + networking.nftables.enable = true; + networking.nftables.ruleset = '' + table inet filter { + chain input { + type filter hook input priority 0; + icmp type { echo-request, echo-reply } log group 2 accept } - ''; - services.ulogd = { - enable = true; - settings = { - global = { - logfile = "/var/log/ulogd.log"; - stack = [ - "log1:NFLOG,base1:BASE,ifi1:IFINDEX,ip2str1:IP2STR,print1:PRINTPKT,emu1:LOGEMU" - "log1:NFLOG,base1:BASE,pcap1:PCAP" - ]; - }; - log1.group = 2; + chain output { + type filter hook output priority 0; policy accept; + icmp type { echo-request, echo-reply } log group 2 accept + } - pcap1 = { - sync = 1; - file = "/var/log/ulogd.pcap"; - }; + chain forward { + type filter hook forward priority 0; policy drop; + } - emu1 = { - sync = 1; - file = "/var/log/ulogd_pkts.log"; - }; + } + ''; + services.ulogd = { + enable = true; + settings = { + global = { + logfile = "/var/log/ulogd.log"; + stack = [ + "log1:NFLOG,base1:BASE,ifi1:IFINDEX,ip2str1:IP2STR,print1:PRINTPKT,emu1:LOGEMU" + "log1:NFLOG,base1:BASE,pcap1:PCAP" + ]; + }; + + log1.group = 2; + + pcap1 = { + sync = 1; + file = "/var/log/ulogd.pcap"; + }; + + emu1 = { + sync = 1; + file = "/var/log/ulogd_pkts.log"; }; }; - - environment.systemPackages = with pkgs; [ tcpdump ]; }; - testScript = lib.readFile ./ulogd.py; - } -) + environment.systemPackages = with pkgs; [ tcpdump ]; + }; + + testScript = lib.readFile ./ulogd.py; +} diff --git a/nixos/tests/umurmur.nix b/nixos/tests/umurmur.nix index 0fcdeb1847ff..d5d167b70cf2 100644 --- a/nixos/tests/umurmur.nix +++ b/nixos/tests/umurmur.nix @@ -1,99 +1,97 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - client = - { pkgs, ... }: - { - imports = [ ./common/x11.nix ]; - environment.systemPackages = [ pkgs.mumble ]; - }; - port = 56457; - in - { - name = "mumble"; - meta = with pkgs.lib.maintainers; { - maintainers = [ _3JlOy-PYCCKUi ]; +let + client = + { pkgs, ... }: + { + imports = [ ./common/x11.nix ]; + environment.systemPackages = [ pkgs.mumble ]; }; + port = 56457; +in +{ + name = "mumble"; + meta = with pkgs.lib.maintainers; { + maintainers = [ _3JlOy-PYCCKUi ]; + }; - nodes = { - server = - { ... }: - { - services.umurmur = { - enable = true; - openFirewall = true; - settings = { - password = "testpassword"; - channels = [ - { - name = "root"; - parent = ""; - description = "Root channel. No entry."; - noenter = true; - } - { - name = "lobby"; - parent = "root"; - description = "Lobby channel"; - } - ]; - default_channel = "lobby"; - bindport = port; - }; + nodes = { + server = + { ... }: + { + services.umurmur = { + enable = true; + openFirewall = true; + settings = { + password = "testpassword"; + channels = [ + { + name = "root"; + parent = ""; + description = "Root channel. No entry."; + noenter = true; + } + { + name = "lobby"; + parent = "root"; + description = "Lobby channel"; + } + ]; + default_channel = "lobby"; + bindport = port; }; }; + }; - client1 = client; - client2 = client; - }; + client1 = client; + client2 = client; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("umurmur.service") - client1.wait_for_x() - client2.wait_for_x() + server.wait_for_unit("umurmur.service") + client1.wait_for_x() + client2.wait_for_x() - client1.execute("mumble mumble://client1:testpassword\@server:${toString port}/lobby >&2 &") - client2.execute("mumble mumble://client2:testpassword\@server:${toString port}/lobby >&2 &") + client1.execute("mumble mumble://client1:testpassword\@server:${toString port}/lobby >&2 &") + client2.execute("mumble mumble://client2:testpassword\@server:${toString port}/lobby >&2 &") - # cancel client audio configuration - client1.wait_for_window(r"Audio Tuning Wizard") - client2.wait_for_window(r"Audio Tuning Wizard") - server.sleep(5) # wait because mumble is slow to register event handlers - client1.send_key("esc") - client2.send_key("esc") + # cancel client audio configuration + client1.wait_for_window(r"Audio Tuning Wizard") + client2.wait_for_window(r"Audio Tuning Wizard") + server.sleep(5) # wait because mumble is slow to register event handlers + client1.send_key("esc") + client2.send_key("esc") - # cancel client cert configuration - client1.wait_for_window(r"Certificate Management") - client2.wait_for_window(r"Certificate Management") - server.sleep(5) # wait because mumble is slow to register event handlers - client1.send_key("esc") - client2.send_key("esc") + # cancel client cert configuration + client1.wait_for_window(r"Certificate Management") + client2.wait_for_window(r"Certificate Management") + server.sleep(5) # wait because mumble is slow to register event handlers + client1.send_key("esc") + client2.send_key("esc") - # accept server certificate - client1.wait_for_window(r"^Mumble$") - client2.wait_for_window(r"^Mumble$") - server.sleep(5) # wait because mumble is slow to register event handlers - client1.send_chars("y") - client2.send_chars("y") - server.sleep(5) # wait because mumble is slow to register event handlers + # accept server certificate + client1.wait_for_window(r"^Mumble$") + client2.wait_for_window(r"^Mumble$") + server.sleep(5) # wait because mumble is slow to register event handlers + client1.send_chars("y") + client2.send_chars("y") + server.sleep(5) # wait because mumble is slow to register event handlers - # sometimes the wrong of the 2 windows is focused, we switch focus and try pressing "y" again - client1.send_key("alt-tab") - client2.send_key("alt-tab") - server.sleep(5) # wait because mumble is slow to register event handlers - client1.send_chars("y") - client2.send_chars("y") + # sometimes the wrong of the 2 windows is focused, we switch focus and try pressing "y" again + client1.send_key("alt-tab") + client2.send_key("alt-tab") + server.sleep(5) # wait because mumble is slow to register event handlers + client1.send_chars("y") + client2.send_chars("y") - # Find clients in logs - server.wait_until_succeeds( - "journalctl -eu umurmur -o cat | grep -q 'User client1 authenticated'" - ) - server.wait_until_succeeds( - "journalctl -eu umurmur -o cat | grep -q 'User client2 authenticated'" - ) - ''; - } -) + # Find clients in logs + server.wait_until_succeeds( + "journalctl -eu umurmur -o cat | grep -q 'User client1 authenticated'" + ) + server.wait_until_succeeds( + "journalctl -eu umurmur -o cat | grep -q 'User client2 authenticated'" + ) + ''; +} diff --git a/nixos/tests/unbound.nix b/nixos/tests/unbound.nix index 0aa3a6a0c16f..1e656fc5fcf2 100644 --- a/nixos/tests/unbound.nix +++ b/nixos/tests/unbound.nix @@ -16,379 +16,377 @@ access to that socket. Also, when there is no socket configured, users shouldn't be able to access the control socket at all. Not even root. */ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - let - # common client configuration that we can just use for the multitude of - # clients we are constructing - common = - { lib, pkgs, ... }: +{ pkgs, lib, ... }: +let + # common client configuration that we can just use for the multitude of + # clients we are constructing + common = + { lib, pkgs, ... }: + { + config = { + environment.systemPackages = [ pkgs.knot-dns ]; + + # disable the root anchor update as we do not have internet access during + # the test execution + services.unbound.enableRootTrustAnchor = false; + + # we want to test the full-variant of the package to also get DoH support + services.unbound.package = pkgs.unbound-full; + }; + }; + + cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' + openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=dns.example.local' + mkdir -p $out + cp key.pem cert.pem $out + ''; +in +{ + name = "unbound"; + meta = with pkgs.lib.maintainers; { + maintainers = [ andir ]; + }; + + nodes = { + + # The server that actually serves our zones, this tests unbounds authoriative mode + authoritative = { - config = { - environment.systemPackages = [ pkgs.knot-dns ]; + lib, + pkgs, + config, + ... + }: + { + imports = [ common ]; + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.0.1"; + prefixLength = 24; + } + ]; + networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ + { + address = "fd21::1"; + prefixLength = 64; + } + ]; + networking.firewall.allowedTCPPorts = [ 53 ]; + networking.firewall.allowedUDPPorts = [ 53 ]; - # disable the root anchor update as we do not have internet access during - # the test execution - services.unbound.enableRootTrustAnchor = false; - - # we want to test the full-variant of the package to also get DoH support - services.unbound.package = pkgs.unbound-full; + services.unbound = { + enable = true; + settings = { + server = { + interface = [ + "192.168.0.1" + "fd21::1" + "::1" + "127.0.0.1" + ]; + access-control = [ + "192.168.0.0/24 allow" + "fd21::/64 allow" + "::1 allow" + "127.0.0.0/8 allow" + ]; + local-data = [ + ''"example.local. IN A 1.2.3.4"'' + ''"example.local. IN AAAA abcd::eeff"'' + ]; + }; + }; }; }; - cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } '' - openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=dns.example.local' - mkdir -p $out - cp key.pem cert.pem $out + # The resolver that knows that forwards (only) to the authoritative server + # and listens on UDP/53, TCP/53 & TCP/853. + resolver = + { lib, nodes, ... }: + { + imports = [ common ]; + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.0.2"; + prefixLength = 24; + } + ]; + networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ + { + address = "fd21::2"; + prefixLength = 64; + } + ]; + networking.firewall.allowedTCPPorts = [ + 53 # regular DNS + 853 # DNS over TLS + 443 # DNS over HTTPS + ]; + networking.firewall.allowedUDPPorts = [ 53 ]; + + services.unbound = { + enable = true; + settings = { + server = { + interface = [ + "::1" + "127.0.0.1" + "192.168.0.2" + "fd21::2" + "192.168.0.2@853" + "fd21::2@853" + "::1@853" + "127.0.0.1@853" + "192.168.0.2@443" + "fd21::2@443" + "::1@443" + "127.0.0.1@443" + ]; + access-control = [ + "192.168.0.0/24 allow" + "fd21::/64 allow" + "::1 allow" + "127.0.0.0/8 allow" + ]; + tls-service-pem = "${cert}/cert.pem"; + tls-service-key = "${cert}/key.pem"; + }; + forward-zone = [ + { + name = "."; + forward-addr = [ + (lib.head nodes.authoritative.networking.interfaces.eth1.ipv6.addresses).address + (lib.head nodes.authoritative.networking.interfaces.eth1.ipv4.addresses).address + ]; + } + ]; + }; + }; + }; + + # machine that runs a local unbound that will be reconfigured during test execution + local_resolver = + { + lib, + nodes, + config, + ... + }: + { + imports = [ common ]; + networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ + { + address = "192.168.0.3"; + prefixLength = 24; + } + ]; + networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ + { + address = "fd21::3"; + prefixLength = 64; + } + ]; + networking.firewall.allowedTCPPorts = [ + 53 # regular DNS + ]; + networking.firewall.allowedUDPPorts = [ 53 ]; + + services.unbound = { + enable = true; + settings = { + server = { + interface = [ + "::1" + "127.0.0.1" + ]; + access-control = [ + "::1 allow" + "127.0.0.0/8 allow" + ]; + }; + include = "/etc/unbound/extra*.conf"; + }; + localControlSocketPath = "/run/unbound/unbound.ctl"; + }; + + users.users = { + # user that is permitted to access the unix socket + someuser = { + isSystemUser = true; + group = "someuser"; + extraGroups = [ + config.users.users.unbound.group + ]; + }; + + # user that is not permitted to access the unix socket + unauthorizeduser = { + isSystemUser = true; + group = "unauthorizeduser"; + }; + + }; + users.groups = { + someuser = { }; + unauthorizeduser = { }; + }; + + # Used for testing configuration reloading + environment.etc = { + "unbound-extra1.conf".text = '' + forward-zone: + name: "example.local." + forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address} + forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address} + ''; + "unbound-extra2.conf".text = '' + auth-zone: + name: something.local. + zonefile: ${pkgs.writeText "zone" '' + something.local. IN A 3.4.5.6 + ''} + ''; + }; + }; + + # plain node that only has network access and doesn't run any part of the + # resolver software locally + client = + { lib, nodes, ... }: + { + imports = [ common ]; + networking.nameservers = [ + (lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address + (lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address + ]; + networking.interfaces.eth1.ipv4.addresses = [ + { + address = "192.168.0.10"; + prefixLength = 24; + } + ]; + networking.interfaces.eth1.ipv6.addresses = [ + { + address = "fd21::10"; + prefixLength = 64; + } + ]; + }; + }; + + testScript = + { nodes, ... }: + '' + import typing + + zone = "example.local." + records = [("AAAA", "abcd::eeff"), ("A", "1.2.3.4")] + + + def query( + machine, + host: str, + query_type: str, + query: str, + expected: typing.Optional[str] = None, + args: typing.Optional[typing.List[str]] = None, + ): + """ + Execute a single query and compare the result with expectation + """ + text_args = "" + if args: + text_args = " ".join(args) + + out = machine.succeed( + f"kdig {text_args} {query} {query_type} @{host} +short" + ).strip() + machine.log(f"{host} replied with {out}") + if expected: + assert expected == out, f"Expected `{expected}` but got `{out}`" + + + def test(machine, remotes, /, doh=False, zone=zone, records=records, args=[]): + """ + Run queries for the given remotes on the given machine. + """ + for query_type, expected in records: + for remote in remotes: + query(machine, remote, query_type, zone, expected, args) + query(machine, remote, query_type, zone, expected, ["+tcp"] + args) + if doh: + query( + machine, + remote, + query_type, + zone, + expected, + ["+tcp", "+tls"] + args, + ) + query( + machine, + remote, + query_type, + zone, + expected, + ["+https"] + args, + ) + + + client.start() + authoritative.wait_for_unit("unbound.service") + + # verify that we can resolve locally + with subtest("test the authoritative servers local responses"): + test(authoritative, ["::1", "127.0.0.1"]) + + resolver.wait_for_unit("unbound.service") + + with subtest("root is unable to use unbounc-control when the socket is not configured"): + resolver.succeed("which unbound-control") # the binary must exist + resolver.fail("unbound-control list_forwards") # the invocation must fail + + # verify that the resolver is able to resolve on all the local protocols + with subtest("test that the resolver resolves on all protocols and transports"): + test(resolver, ["::1", "127.0.0.1"], doh=True) + + resolver.wait_for_unit("multi-user.target") + + with subtest("client should be able to query the resolver"): + test(client, ["${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True) + + # discard the client we do not need anymore + client.shutdown() + + local_resolver.wait_for_unit("multi-user.target") + + # link a new config file to /etc/unbound/extra.conf + local_resolver.succeed("ln -s /etc/unbound-extra1.conf /etc/unbound/extra1.conf") + + # reload the server & ensure the forwarding works + with subtest("test that the local resolver resolves on all protocols and transports"): + local_resolver.succeed("systemctl reload unbound") + print(local_resolver.succeed("journalctl -u unbound -n 1000")) + test(local_resolver, ["::1", "127.0.0.1"], args=["+timeout=60"]) + + with subtest("test that we can use the unbound control socket"): + out = local_resolver.succeed( + "sudo -u someuser -- unbound-control list_forwards" + ).strip() + + # Thank you black! Can't really break this line into a readable version. + expected = "example.local. IN forward ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}" + assert out == expected, f"Expected `{expected}` but got `{out}` instead." + local_resolver.fail("sudo -u unauthorizeduser -- unbound-control list_forwards") + + + # link a new config file to /etc/unbound/extra.conf + local_resolver.succeed("ln -sf /etc/unbound-extra2.conf /etc/unbound/extra2.conf") + + # reload the server & ensure the new local zone works + with subtest("test that we can query the new local zone"): + local_resolver.succeed("unbound-control reload") + r = [("A", "3.4.5.6")] + test(local_resolver, ["::1", "127.0.0.1"], zone="something.local.", records=r) ''; - in - { - name = "unbound"; - meta = with pkgs.lib.maintainers; { - maintainers = [ andir ]; - }; - - nodes = { - - # The server that actually serves our zones, this tests unbounds authoriative mode - authoritative = - { - lib, - pkgs, - config, - ... - }: - { - imports = [ common ]; - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.0.1"; - prefixLength = 24; - } - ]; - networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ - { - address = "fd21::1"; - prefixLength = 64; - } - ]; - networking.firewall.allowedTCPPorts = [ 53 ]; - networking.firewall.allowedUDPPorts = [ 53 ]; - - services.unbound = { - enable = true; - settings = { - server = { - interface = [ - "192.168.0.1" - "fd21::1" - "::1" - "127.0.0.1" - ]; - access-control = [ - "192.168.0.0/24 allow" - "fd21::/64 allow" - "::1 allow" - "127.0.0.0/8 allow" - ]; - local-data = [ - ''"example.local. IN A 1.2.3.4"'' - ''"example.local. IN AAAA abcd::eeff"'' - ]; - }; - }; - }; - }; - - # The resolver that knows that forwards (only) to the authoritative server - # and listens on UDP/53, TCP/53 & TCP/853. - resolver = - { lib, nodes, ... }: - { - imports = [ common ]; - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.0.2"; - prefixLength = 24; - } - ]; - networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ - { - address = "fd21::2"; - prefixLength = 64; - } - ]; - networking.firewall.allowedTCPPorts = [ - 53 # regular DNS - 853 # DNS over TLS - 443 # DNS over HTTPS - ]; - networking.firewall.allowedUDPPorts = [ 53 ]; - - services.unbound = { - enable = true; - settings = { - server = { - interface = [ - "::1" - "127.0.0.1" - "192.168.0.2" - "fd21::2" - "192.168.0.2@853" - "fd21::2@853" - "::1@853" - "127.0.0.1@853" - "192.168.0.2@443" - "fd21::2@443" - "::1@443" - "127.0.0.1@443" - ]; - access-control = [ - "192.168.0.0/24 allow" - "fd21::/64 allow" - "::1 allow" - "127.0.0.0/8 allow" - ]; - tls-service-pem = "${cert}/cert.pem"; - tls-service-key = "${cert}/key.pem"; - }; - forward-zone = [ - { - name = "."; - forward-addr = [ - (lib.head nodes.authoritative.networking.interfaces.eth1.ipv6.addresses).address - (lib.head nodes.authoritative.networking.interfaces.eth1.ipv4.addresses).address - ]; - } - ]; - }; - }; - }; - - # machine that runs a local unbound that will be reconfigured during test execution - local_resolver = - { - lib, - nodes, - config, - ... - }: - { - imports = [ common ]; - networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ - { - address = "192.168.0.3"; - prefixLength = 24; - } - ]; - networking.interfaces.eth1.ipv6.addresses = lib.mkForce [ - { - address = "fd21::3"; - prefixLength = 64; - } - ]; - networking.firewall.allowedTCPPorts = [ - 53 # regular DNS - ]; - networking.firewall.allowedUDPPorts = [ 53 ]; - - services.unbound = { - enable = true; - settings = { - server = { - interface = [ - "::1" - "127.0.0.1" - ]; - access-control = [ - "::1 allow" - "127.0.0.0/8 allow" - ]; - }; - include = "/etc/unbound/extra*.conf"; - }; - localControlSocketPath = "/run/unbound/unbound.ctl"; - }; - - users.users = { - # user that is permitted to access the unix socket - someuser = { - isSystemUser = true; - group = "someuser"; - extraGroups = [ - config.users.users.unbound.group - ]; - }; - - # user that is not permitted to access the unix socket - unauthorizeduser = { - isSystemUser = true; - group = "unauthorizeduser"; - }; - - }; - users.groups = { - someuser = { }; - unauthorizeduser = { }; - }; - - # Used for testing configuration reloading - environment.etc = { - "unbound-extra1.conf".text = '' - forward-zone: - name: "example.local." - forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address} - forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address} - ''; - "unbound-extra2.conf".text = '' - auth-zone: - name: something.local. - zonefile: ${pkgs.writeText "zone" '' - something.local. IN A 3.4.5.6 - ''} - ''; - }; - }; - - # plain node that only has network access and doesn't run any part of the - # resolver software locally - client = - { lib, nodes, ... }: - { - imports = [ common ]; - networking.nameservers = [ - (lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address - (lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address - ]; - networking.interfaces.eth1.ipv4.addresses = [ - { - address = "192.168.0.10"; - prefixLength = 24; - } - ]; - networking.interfaces.eth1.ipv6.addresses = [ - { - address = "fd21::10"; - prefixLength = 64; - } - ]; - }; - }; - - testScript = - { nodes, ... }: - '' - import typing - - zone = "example.local." - records = [("AAAA", "abcd::eeff"), ("A", "1.2.3.4")] - - - def query( - machine, - host: str, - query_type: str, - query: str, - expected: typing.Optional[str] = None, - args: typing.Optional[typing.List[str]] = None, - ): - """ - Execute a single query and compare the result with expectation - """ - text_args = "" - if args: - text_args = " ".join(args) - - out = machine.succeed( - f"kdig {text_args} {query} {query_type} @{host} +short" - ).strip() - machine.log(f"{host} replied with {out}") - if expected: - assert expected == out, f"Expected `{expected}` but got `{out}`" - - - def test(machine, remotes, /, doh=False, zone=zone, records=records, args=[]): - """ - Run queries for the given remotes on the given machine. - """ - for query_type, expected in records: - for remote in remotes: - query(machine, remote, query_type, zone, expected, args) - query(machine, remote, query_type, zone, expected, ["+tcp"] + args) - if doh: - query( - machine, - remote, - query_type, - zone, - expected, - ["+tcp", "+tls"] + args, - ) - query( - machine, - remote, - query_type, - zone, - expected, - ["+https"] + args, - ) - - - client.start() - authoritative.wait_for_unit("unbound.service") - - # verify that we can resolve locally - with subtest("test the authoritative servers local responses"): - test(authoritative, ["::1", "127.0.0.1"]) - - resolver.wait_for_unit("unbound.service") - - with subtest("root is unable to use unbounc-control when the socket is not configured"): - resolver.succeed("which unbound-control") # the binary must exist - resolver.fail("unbound-control list_forwards") # the invocation must fail - - # verify that the resolver is able to resolve on all the local protocols - with subtest("test that the resolver resolves on all protocols and transports"): - test(resolver, ["::1", "127.0.0.1"], doh=True) - - resolver.wait_for_unit("multi-user.target") - - with subtest("client should be able to query the resolver"): - test(client, ["${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True) - - # discard the client we do not need anymore - client.shutdown() - - local_resolver.wait_for_unit("multi-user.target") - - # link a new config file to /etc/unbound/extra.conf - local_resolver.succeed("ln -s /etc/unbound-extra1.conf /etc/unbound/extra1.conf") - - # reload the server & ensure the forwarding works - with subtest("test that the local resolver resolves on all protocols and transports"): - local_resolver.succeed("systemctl reload unbound") - print(local_resolver.succeed("journalctl -u unbound -n 1000")) - test(local_resolver, ["::1", "127.0.0.1"], args=["+timeout=60"]) - - with subtest("test that we can use the unbound control socket"): - out = local_resolver.succeed( - "sudo -u someuser -- unbound-control list_forwards" - ).strip() - - # Thank you black! Can't really break this line into a readable version. - expected = "example.local. IN forward ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}" - assert out == expected, f"Expected `{expected}` but got `{out}` instead." - local_resolver.fail("sudo -u unauthorizeduser -- unbound-control list_forwards") - - - # link a new config file to /etc/unbound/extra.conf - local_resolver.succeed("ln -sf /etc/unbound-extra2.conf /etc/unbound/extra2.conf") - - # reload the server & ensure the new local zone works - with subtest("test that we can query the new local zone"): - local_resolver.succeed("unbound-control reload") - r = [("A", "3.4.5.6")] - test(local_resolver, ["::1", "127.0.0.1"], zone="something.local.", records=r) - ''; - } -) +} diff --git a/nixos/tests/uptermd.nix b/nixos/tests/uptermd.nix index a7f18fa3cba7..c5020d5ef683 100644 --- a/nixos/tests/uptermd.nix +++ b/nixos/tests/uptermd.nix @@ -1,71 +1,69 @@ -import ./make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - client = - { pkgs, ... }: +let + client = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.upterm ]; + }; +in +{ + name = "uptermd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fleaz ]; + }; + + nodes = { + server = + { config, ... }: { - environment.systemPackages = [ pkgs.upterm ]; - }; - in - { - name = "uptermd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fleaz ]; - }; - - nodes = { - server = - { config, ... }: - { - services.uptermd = { - enable = true; - openFirewall = true; - port = 1337; - }; + services.uptermd = { + enable = true; + openFirewall = true; + port = 1337; }; - client1 = client; - client2 = client; - }; + }; + client1 = client; + client2 = client; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("uptermd.service") - server.systemctl("start network-online.target") - server.wait_for_unit("network-online.target") + server.wait_for_unit("uptermd.service") + server.systemctl("start network-online.target") + server.wait_for_unit("network-online.target") - # wait for upterm port to be reachable - client1.wait_until_succeeds("nc -z -v server 1337") + # wait for upterm port to be reachable + client1.wait_until_succeeds("nc -z -v server 1337") - # Add SSH hostkeys from the server to both clients - # uptermd needs an '@cert-authority entry so we need to modify the known_hosts file - client1.execute("mkdir -p ~/.ssh && ssh -o StrictHostKeyChecking=no -p 1337 server ls") - client1.execute("echo @cert-authority $(cat ~/.ssh/known_hosts) > ~/.ssh/known_hosts") - client2.execute("mkdir -p ~/.ssh && ssh -o StrictHostKeyChecking=no -p 1337 server ls") - client2.execute("echo @cert-authority $(cat ~/.ssh/known_hosts) > ~/.ssh/known_hosts") + # Add SSH hostkeys from the server to both clients + # uptermd needs an '@cert-authority entry so we need to modify the known_hosts file + client1.execute("mkdir -p ~/.ssh && ssh -o StrictHostKeyChecking=no -p 1337 server ls") + client1.execute("echo @cert-authority $(cat ~/.ssh/known_hosts) > ~/.ssh/known_hosts") + client2.execute("mkdir -p ~/.ssh && ssh -o StrictHostKeyChecking=no -p 1337 server ls") + client2.execute("echo @cert-authority $(cat ~/.ssh/known_hosts) > ~/.ssh/known_hosts") - client1.wait_for_unit("multi-user.target") - client1.wait_until_succeeds("pgrep -f 'agetty.*tty1'") - client1.wait_until_tty_matches("1", "login: ") - client1.send_chars("root\n") - client1.wait_until_succeeds("pgrep -u root bash") + client1.wait_for_unit("multi-user.target") + client1.wait_until_succeeds("pgrep -f 'agetty.*tty1'") + client1.wait_until_tty_matches("1", "login: ") + client1.send_chars("root\n") + client1.wait_until_succeeds("pgrep -u root bash") - client1.execute("ssh-keygen -t ed25519 -N \"\" -f /root/.ssh/id_ed25519") - client1.send_chars("TERM=xterm upterm host --server ssh://server:1337 --force-command hostname -- bash > /tmp/session-details\n") - client1.wait_for_file("/tmp/session-details") - client1.send_key("q") + client1.execute("ssh-keygen -t ed25519 -N \"\" -f /root/.ssh/id_ed25519") + client1.send_chars("TERM=xterm upterm host --server ssh://server:1337 --force-command hostname -- bash > /tmp/session-details\n") + client1.wait_for_file("/tmp/session-details") + client1.send_key("q") - # uptermd can't connect if we don't have a keypair - client2.execute("ssh-keygen -t ed25519 -N \"\" -f /root/.ssh/id_ed25519") + # uptermd can't connect if we don't have a keypair + client2.execute("ssh-keygen -t ed25519 -N \"\" -f /root/.ssh/id_ed25519") - # Grep the ssh connect command from the output of 'upterm host' - ssh_command = client1.succeed("grep 'SSH Session' /tmp/session-details | cut -d':' -f2-").strip() + # Grep the ssh connect command from the output of 'upterm host' + ssh_command = client1.succeed("grep 'SSH Session' /tmp/session-details | cut -d':' -f2-").strip() - # Connect with client2. Because we used '--force-command hostname' we should get "client1" as the output - output = client2.succeed(ssh_command) + # Connect with client2. Because we used '--force-command hostname' we should get "client1" as the output + output = client2.succeed(ssh_command) - assert output.strip() == "client1" - ''; - } -) + assert output.strip() == "client1" + ''; +} diff --git a/nixos/tests/uptime-kuma.nix b/nixos/tests/uptime-kuma.nix index e79546246f62..1d40b4766a8c 100644 --- a/nixos/tests/uptime-kuma.nix +++ b/nixos/tests/uptime-kuma.nix @@ -1,21 +1,19 @@ -import ./make-test-python.nix ( - { lib, ... }: +{ lib, ... }: - { - name = "uptime-kuma"; - meta.maintainers = with lib.maintainers; [ julienmalka ]; +{ + name = "uptime-kuma"; + meta.maintainers = with lib.maintainers; [ julienmalka ]; - nodes.machine = - { pkgs, ... }: - { - services.uptime-kuma.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.uptime-kuma.enable = true; + }; - testScript = '' - machine.start() - machine.wait_for_unit("uptime-kuma.service") - machine.wait_for_open_port(3001) - machine.succeed("curl --fail http://localhost:3001/") - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("uptime-kuma.service") + machine.wait_for_open_port(3001) + machine.succeed("curl --fail http://localhost:3001/") + ''; +} diff --git a/nixos/tests/urn-timer.nix b/nixos/tests/urn-timer.nix index 157e26b30b5e..5748cf096f3e 100644 --- a/nixos/tests/urn-timer.nix +++ b/nixos/tests/urn-timer.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "urn-timer"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "urn-timer"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.urn-timer ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.urn-timer ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - machine.execute("urn-gtk ${pkgs.urn-timer.src}/splits_examples/sotn.json >&2 &") - machine.wait_for_window("urn") - machine.wait_for_text(r"(Mist|Bat|Reverse|Dracula)") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.execute("urn-gtk ${pkgs.urn-timer.src}/splits_examples/sotn.json >&2 &") + machine.wait_for_window("urn") + machine.wait_for_text(r"(Mist|Bat|Reverse|Dracula)") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/usbguard.nix b/nixos/tests/usbguard.nix index 210b8b2adac5..959d062b8366 100644 --- a/nixos/tests/usbguard.nix +++ b/nixos/tests/usbguard.nix @@ -1,68 +1,66 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "usbguard"; - meta = with pkgs.lib.maintainers; { - maintainers = [ tnias ]; +{ pkgs, ... }: +{ + name = "usbguard"; + meta = with pkgs.lib.maintainers; { + maintainers = [ tnias ]; + }; + + nodes.machine = + { ... }: + { + services.usbguard = { + enable = true; + IPCAllowedUsers = [ + "alice" + "root" + ]; + + # As virtual USB devices get attached to the "QEMU USB Hub" we need to + # allow Hubs. Otherwise we would have to explicitly allow them too. + rules = '' + allow with-interface equals { 09:00:00 } + ''; + }; + imports = [ ./common/user-account.nix ]; }; - nodes.machine = - { ... }: - { - services.usbguard = { - enable = true; - IPCAllowedUsers = [ - "alice" - "root" - ]; + testScript = '' + # create a blank disk image for our fake USB stick + with open(machine.state_dir / "usbstick.img", "wb") as stick: + stick.write(b"\x00" * (1024 * 1024)) - # As virtual USB devices get attached to the "QEMU USB Hub" we need to - # allow Hubs. Otherwise we would have to explicitly allow them too. - rules = '' - allow with-interface equals { 09:00:00 } - ''; - }; - imports = [ ./common/user-account.nix ]; - }; + # wait for machine to have started and the usbguard service to be up + machine.wait_for_unit("usbguard.service") - testScript = '' - # create a blank disk image for our fake USB stick - with open(machine.state_dir / "usbstick.img", "wb") as stick: - stick.write(b"\x00" * (1024 * 1024)) + with subtest("IPC access control"): + # User "alice" is allowed to access the IPC interface + machine.succeed("su alice -c 'usbguard list-devices'") - # wait for machine to have started and the usbguard service to be up - machine.wait_for_unit("usbguard.service") + # User "bob" is not allowed to access the IPC interface + machine.fail("su bob -c 'usbguard list-devices'") - with subtest("IPC access control"): - # User "alice" is allowed to access the IPC interface - machine.succeed("su alice -c 'usbguard list-devices'") + with subtest("check basic functionality"): + # at this point we expect that no USB HDD is connected + machine.fail("usbguard list-devices | grep -E 'QEMU USB HARDDRIVE'") - # User "bob" is not allowed to access the IPC interface - machine.fail("su bob -c 'usbguard list-devices'") + # insert usb device + machine.send_monitor_command( + f"drive_add 0 id=stick,if=none,file={stick.name},format=raw" + ) + machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick") - with subtest("check basic functionality"): - # at this point we expect that no USB HDD is connected - machine.fail("usbguard list-devices | grep -E 'QEMU USB HARDDRIVE'") + # the attached USB HDD should show up after a short while + machine.wait_until_succeeds("usbguard list-devices | grep -E 'QEMU USB HARDDRIVE'") - # insert usb device - machine.send_monitor_command( - f"drive_add 0 id=stick,if=none,file={stick.name},format=raw" - ) - machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick") + # at this point there should be a **blocked** USB HDD + machine.succeed("usbguard list-devices | grep -E 'block.*QEMU USB HARDDRIVE'") + machine.fail("usbguard list-devices | grep -E ' allow .*QEMU USB HARDDRIVE'") - # the attached USB HDD should show up after a short while - machine.wait_until_succeeds("usbguard list-devices | grep -E 'QEMU USB HARDDRIVE'") + # allow storage devices + machine.succeed("usbguard allow-device 'with-interface { 08:*:* }'") - # at this point there should be a **blocked** USB HDD - machine.succeed("usbguard list-devices | grep -E 'block.*QEMU USB HARDDRIVE'") - machine.fail("usbguard list-devices | grep -E ' allow .*QEMU USB HARDDRIVE'") - - # allow storage devices - machine.succeed("usbguard allow-device 'with-interface { 08:*:* }'") - - # at this point there should be an **allowed** USB HDD - machine.succeed("usbguard list-devices | grep -E ' allow .*QEMU USB HARDDRIVE'") - machine.fail("usbguard list-devices | grep -E ' block .*QEMU USB HARDDRIVE'") - ''; - } -) + # at this point there should be an **allowed** USB HDD + machine.succeed("usbguard list-devices | grep -E ' allow .*QEMU USB HARDDRIVE'") + machine.fail("usbguard list-devices | grep -E ' block .*QEMU USB HARDDRIVE'") + ''; +} diff --git a/nixos/tests/user-activation-scripts.nix b/nixos/tests/user-activation-scripts.nix index 58d1b97a09d6..e8ea2d05c465 100644 --- a/nixos/tests/user-activation-scripts.nix +++ b/nixos/tests/user-activation-scripts.nix @@ -1,42 +1,40 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "user-activation-scripts"; - meta = with lib.maintainers; { - maintainers = [ chkno ]; +{ lib, ... }: +{ + name = "user-activation-scripts"; + meta = with lib.maintainers; { + maintainers = [ chkno ]; + }; + + nodes.machine = { + system.switch.enable = true; + system.userActivationScripts.foo = "mktemp ~/user-activation-ran.XXXXXX"; + users.users.alice = { + initialPassword = "pass1"; + isNormalUser = true; }; + systemd.user.tmpfiles.users.alice.rules = [ "r %h/file-to-remove" ]; + }; - nodes.machine = { - system.switch.enable = true; - system.userActivationScripts.foo = "mktemp ~/user-activation-ran.XXXXXX"; - users.users.alice = { - initialPassword = "pass1"; - isNormalUser = true; - }; - systemd.user.tmpfiles.users.alice.rules = [ "r %h/file-to-remove" ]; - }; - - testScript = '' - def verify_user_activation_run_count(n): - machine.succeed( - '[[ "$(find /home/alice/ -name user-activation-ran.\\* | wc -l)" == %s ]]' % n - ) + testScript = '' + def verify_user_activation_run_count(n): + machine.succeed( + '[[ "$(find /home/alice/ -name user-activation-ran.\\* | wc -l)" == %s ]]' % n + ) - machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("getty@tty1.service") - machine.wait_until_tty_matches("1", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("1", "Password: ") - machine.send_chars("pass1\n") - machine.send_chars("touch login-ok\n") - machine.wait_for_file("/home/alice/login-ok") - verify_user_activation_run_count(1) + machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("getty@tty1.service") + machine.wait_until_tty_matches("1", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("1", "Password: ") + machine.send_chars("pass1\n") + machine.send_chars("touch login-ok\n") + machine.wait_for_file("/home/alice/login-ok") + verify_user_activation_run_count(1) - machine.succeed("touch /home/alice/file-to-remove") - machine.succeed("/run/current-system/bin/switch-to-configuration test") - verify_user_activation_run_count(2) - machine.succeed("[[ ! -f /home/alice/file-to-remove ]] || false") - ''; - } -) + machine.succeed("touch /home/alice/file-to-remove") + machine.succeed("/run/current-system/bin/switch-to-configuration test") + verify_user_activation_run_count(2) + machine.succeed("[[ ! -f /home/alice/file-to-remove ]] || false") + ''; +} diff --git a/nixos/tests/user-home-mode.nix b/nixos/tests/user-home-mode.nix index f5bcbcbcef8a..dca1e67ef943 100644 --- a/nixos/tests/user-home-mode.nix +++ b/nixos/tests/user-home-mode.nix @@ -1,40 +1,38 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "user-home-mode"; - meta = with lib.maintainers; { - maintainers = [ fbeffa ]; - }; +{ lib, ... }: +{ + name = "user-home-mode"; + meta = with lib.maintainers; { + maintainers = [ fbeffa ]; + }; - nodes.machine = { - users.users.alice = { - initialPassword = "pass1"; - isNormalUser = true; - }; - users.users.bob = { - initialPassword = "pass2"; - isNormalUser = true; - homeMode = "750"; - }; - users.users.carol = { - initialPassword = "pass3"; - isNormalUser = true; - createHome = true; - home = "/users/carol"; - }; + nodes.machine = { + users.users.alice = { + initialPassword = "pass1"; + isNormalUser = true; }; + users.users.bob = { + initialPassword = "pass2"; + isNormalUser = true; + homeMode = "750"; + }; + users.users.carol = { + initialPassword = "pass3"; + isNormalUser = true; + createHome = true; + home = "/users/carol"; + }; + }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("getty@tty1.service") - machine.wait_until_tty_matches("1", "login: ") - machine.send_chars("alice\n") - machine.wait_until_tty_matches("1", "Password: ") - machine.send_chars("pass1\n") - machine.succeed('[ "$(stat -c %a /home/alice)" == "700" ]') - machine.succeed('[ "$(stat -c %a /home/bob)" == "750" ]') - machine.succeed('[ "$(stat -c %a /users)" == "755" ]') - machine.succeed('[ "$(stat -c %a /users/carol)" == "700" ]') - ''; - } -) + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("getty@tty1.service") + machine.wait_until_tty_matches("1", "login: ") + machine.send_chars("alice\n") + machine.wait_until_tty_matches("1", "Password: ") + machine.send_chars("pass1\n") + machine.succeed('[ "$(stat -c %a /home/alice)" == "700" ]') + machine.succeed('[ "$(stat -c %a /home/bob)" == "750" ]') + machine.succeed('[ "$(stat -c %a /users)" == "755" ]') + machine.succeed('[ "$(stat -c %a /users/carol)" == "700" ]') + ''; +} diff --git a/nixos/tests/ustreamer.nix b/nixos/tests/ustreamer.nix index a47dc42c0c66..a9cf7150915e 100644 --- a/nixos/tests/ustreamer.nix +++ b/nixos/tests/ustreamer.nix @@ -1,74 +1,72 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "ustreamer-vmtest"; - nodes = { - client = - { ... }: - { - environment.systemPackages = [ pkgs.curl ]; +{ pkgs, ... }: +{ + name = "ustreamer-vmtest"; + nodes = { + client = + { ... }: + { + environment.systemPackages = [ pkgs.curl ]; + }; + camera = + { config, ... }: + let + configFile = pkgs.writeText "akvcam-configFile" '' + [Cameras] + cameras/size = 2 + + cameras/1/type = output + cameras/1/mode = mmap, userptr, rw + cameras/1/description = Virtual Camera (output device) + cameras/1/formats = 2 + cameras/1/videonr = 7 + + cameras/2/type = capture + cameras/2/mode = mmap, rw + cameras/2/description = Virtual Camera + cameras/2/formats = 1, 2 + cameras/2/videonr = 9 + + [Connections] + connections/size = 1 + connections/1/connection = 1:2 + + [Formats] + formats/size = 2 + + formats/1/format = YUY2 + formats/1/width = 640 + formats/1/height = 480 + formats/1/fps = 30 + + formats/2/format = RGB24, YUY2 + formats/2/width = 640 + formats/2/height = 480 + formats/2/fps = 20/1, 15/2 + ''; + in + { + services.ustreamer = { + enable = true; + device = "/dev/video9"; + extraArgs = [ "--device-timeout=8" ]; }; - camera = - { config, ... }: - let - configFile = pkgs.writeText "akvcam-configFile" '' - [Cameras] - cameras/size = 2 + networking.firewall.allowedTCPPorts = [ 8080 ]; - cameras/1/type = output - cameras/1/mode = mmap, userptr, rw - cameras/1/description = Virtual Camera (output device) - cameras/1/formats = 2 - cameras/1/videonr = 7 + boot.extraModulePackages = [ config.boot.kernelPackages.akvcam ]; + boot.kernelModules = [ "akvcam" ]; + boot.extraModprobeConfig = '' + options akvcam config_file=${configFile} + ''; + }; + }; - cameras/2/type = capture - cameras/2/mode = mmap, rw - cameras/2/description = Virtual Camera - cameras/2/formats = 1, 2 - cameras/2/videonr = 9 + testScript = '' + start_all() - [Connections] - connections/size = 1 - connections/1/connection = 1:2 + camera.wait_for_unit("ustreamer.service") + camera.wait_for_open_port(8080) - [Formats] - formats/size = 2 - - formats/1/format = YUY2 - formats/1/width = 640 - formats/1/height = 480 - formats/1/fps = 30 - - formats/2/format = RGB24, YUY2 - formats/2/width = 640 - formats/2/height = 480 - formats/2/fps = 20/1, 15/2 - ''; - in - { - services.ustreamer = { - enable = true; - device = "/dev/video9"; - extraArgs = [ "--device-timeout=8" ]; - }; - networking.firewall.allowedTCPPorts = [ 8080 ]; - - boot.extraModulePackages = [ config.boot.kernelPackages.akvcam ]; - boot.kernelModules = [ "akvcam" ]; - boot.extraModprobeConfig = '' - options akvcam config_file=${configFile} - ''; - }; - }; - - testScript = '' - start_all() - - camera.wait_for_unit("ustreamer.service") - camera.wait_for_open_port(8080) - - client.wait_for_unit("multi-user.target") - client.succeed("curl http://camera:8080") - ''; - } -) + client.wait_for_unit("multi-user.target") + client.succeed("curl http://camera:8080") + ''; +} diff --git a/nixos/tests/uwsgi.nix b/nixos/tests/uwsgi.nix index ea8e73b0b638..52b9d6361077 100644 --- a/nixos/tests/uwsgi.nix +++ b/nixos/tests/uwsgi.nix @@ -1,87 +1,85 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "uwsgi"; - meta = with pkgs.lib.maintainers; { - maintainers = [ lnl7 ]; - }; +{ pkgs, ... }: +{ + name = "uwsgi"; + meta = with pkgs.lib.maintainers; { + maintainers = [ lnl7 ]; + }; - nodes.machine = - { pkgs, ... }: - { - users.users.hello = { - isSystemUser = true; - group = "hello"; + nodes.machine = + { pkgs, ... }: + { + users.users.hello = { + isSystemUser = true; + group = "hello"; + }; + users.groups.hello = { }; + + services.uwsgi = { + enable = true; + plugins = [ + "python3" + "php" + ]; + capabilities = [ "CAP_NET_BIND_SERVICE" ]; + instance.type = "emperor"; + + instance.vassals.hello = { + type = "normal"; + immediate-uid = "hello"; + immediate-gid = "hello"; + module = "wsgi:application"; + http = ":80"; + cap = "net_bind_service"; + pythonPackages = self: [ self.flask ]; + chdir = pkgs.writeTextDir "wsgi.py" '' + from flask import Flask + import subprocess + application = Flask(__name__) + + @application.route("/") + def hello(): + return "Hello, World!" + + @application.route("/whoami") + def whoami(): + whoami = "${pkgs.coreutils}/bin/whoami" + proc = subprocess.run(whoami, capture_output=True) + return proc.stdout.decode().strip() + ''; }; - users.groups.hello = { }; - services.uwsgi = { - enable = true; - plugins = [ - "python3" - "php" - ]; - capabilities = [ "CAP_NET_BIND_SERVICE" ]; - instance.type = "emperor"; - - instance.vassals.hello = { - type = "normal"; - immediate-uid = "hello"; - immediate-gid = "hello"; - module = "wsgi:application"; - http = ":80"; - cap = "net_bind_service"; - pythonPackages = self: [ self.flask ]; - chdir = pkgs.writeTextDir "wsgi.py" '' - from flask import Flask - import subprocess - application = Flask(__name__) - - @application.route("/") - def hello(): - return "Hello, World!" - - @application.route("/whoami") - def whoami(): - whoami = "${pkgs.coreutils}/bin/whoami" - proc = subprocess.run(whoami, capture_output=True) - return proc.stdout.decode().strip() - ''; - }; - - instance.vassals.php = { - type = "normal"; - master = true; - workers = 2; - http-socket = ":8000"; - http-socket-modifier1 = 14; - php-index = "index.php"; - php-docroot = pkgs.writeTextDir "index.php" '' - - ''; - }; + instance.vassals.php = { + type = "normal"; + master = true; + workers = 2; + http-socket = ":8000"; + http-socket-modifier1 = 14; + php-index = "index.php"; + php-docroot = pkgs.writeTextDir "index.php" '' + + ''; }; }; + }; - testScript = '' - machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("uwsgi.service") + testScript = '' + machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("uwsgi.service") - with subtest("uWSGI has started"): - machine.wait_for_unit("uwsgi.service") + with subtest("uWSGI has started"): + machine.wait_for_unit("uwsgi.service") - with subtest("Vassal can bind on port <1024"): - machine.wait_for_open_port(80) - hello = machine.succeed("curl -f http://machine").strip() - assert "Hello, World!" in hello, f"Excepted 'Hello, World!', got '{hello}'" + with subtest("Vassal can bind on port <1024"): + machine.wait_for_open_port(80) + hello = machine.succeed("curl -f http://machine").strip() + assert "Hello, World!" in hello, f"Excepted 'Hello, World!', got '{hello}'" - with subtest("Vassal is running as dedicated user"): - username = machine.succeed("curl -f http://machine/whoami").strip() - assert username == "hello", f"Excepted 'hello', got '{username}'" + with subtest("Vassal is running as dedicated user"): + username = machine.succeed("curl -f http://machine/whoami").strip() + assert username == "hello", f"Excepted 'hello', got '{username}'" - with subtest("PHP plugin is working"): - machine.wait_for_open_port(8000) - assert "Hello World" in machine.succeed("curl -fv http://machine:8000") - ''; - } -) + with subtest("PHP plugin is working"): + machine.wait_for_open_port(8000) + assert "Hello World" in machine.succeed("curl -fv http://machine:8000") + ''; +} diff --git a/nixos/tests/v2ray.nix b/nixos/tests/v2ray.nix index c20720d057d0..29b7c8635444 100644 --- a/nixos/tests/v2ray.nix +++ b/nixos/tests/v2ray.nix @@ -1,99 +1,97 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let +{ lib, pkgs, ... }: +let - v2rayUser = { - # A random UUID. - id = "a6a46834-2150-45f8-8364-0f6f6ab32384"; - alterId = 0; # Non-zero support will be disabled in the future. - }; + v2rayUser = { + # A random UUID. + id = "a6a46834-2150-45f8-8364-0f6f6ab32384"; + alterId = 0; # Non-zero support will be disabled in the future. + }; - # 1080 [http proxy] -> 1081 [vmess] -> direct - v2rayConfig = { - inbounds = [ - { - tag = "http_in"; - port = 1080; - listen = "127.0.0.1"; - protocol = "http"; - } - { - tag = "vmess_in"; - port = 1081; - listen = "127.0.0.1"; - protocol = "vmess"; - settings.clients = [ v2rayUser ]; - } - ]; - outbounds = [ - { - tag = "vmess_out"; - protocol = "vmess"; - settings.vnext = [ - { - address = "127.0.0.1"; - port = 1081; - users = [ v2rayUser ]; - } - ]; - } - { - tag = "direct"; - protocol = "freedom"; - } - ]; - routing.rules = [ - { - type = "field"; - inboundTag = "http_in"; - outboundTag = "vmess_out"; - } - { - type = "field"; - inboundTag = "vmess_in"; - outboundTag = "direct"; - } - - # Assert assets "geoip" and "geosite" are accessible. - { - type = "field"; - ip = [ "geoip:private" ]; - domain = [ "geosite:category-ads" ]; - outboundTag = "direct"; - } - ]; - }; - - in - { - name = "v2ray"; - meta = with lib.maintainers; { - maintainers = [ servalcatty ]; - }; - nodes.machine = - { pkgs, ... }: + # 1080 [http proxy] -> 1081 [vmess] -> direct + v2rayConfig = { + inbounds = [ { - environment.systemPackages = [ pkgs.curl ]; - services.v2ray = { - enable = true; - config = v2rayConfig; - }; - services.httpd = { - enable = true; - adminAddr = "foo@example.org"; - }; + tag = "http_in"; + port = 1080; + listen = "127.0.0.1"; + protocol = "http"; + } + { + tag = "vmess_in"; + port = 1081; + listen = "127.0.0.1"; + protocol = "vmess"; + settings.clients = [ v2rayUser ]; + } + ]; + outbounds = [ + { + tag = "vmess_out"; + protocol = "vmess"; + settings.vnext = [ + { + address = "127.0.0.1"; + port = 1081; + users = [ v2rayUser ]; + } + ]; + } + { + tag = "direct"; + protocol = "freedom"; + } + ]; + routing.rules = [ + { + type = "field"; + inboundTag = "http_in"; + outboundTag = "vmess_out"; + } + { + type = "field"; + inboundTag = "vmess_in"; + outboundTag = "direct"; + } + + # Assert assets "geoip" and "geosite" are accessible. + { + type = "field"; + ip = [ "geoip:private" ]; + domain = [ "geosite:category-ads" ]; + outboundTag = "direct"; + } + ]; + }; + +in +{ + name = "v2ray"; + meta = with lib.maintainers; { + maintainers = [ servalcatty ]; + }; + nodes.machine = + { pkgs, ... }: + { + environment.systemPackages = [ pkgs.curl ]; + services.v2ray = { + enable = true; + config = v2rayConfig; }; + services.httpd = { + enable = true; + adminAddr = "foo@example.org"; + }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - machine.wait_for_unit("httpd.service") - machine.wait_for_unit("v2ray.service") - machine.wait_for_open_port(80) - machine.wait_for_open_port(1080) - machine.succeed( - "curl --fail --max-time 10 --proxy http://localhost:1080 http://localhost" - ) - ''; - } -) + machine.wait_for_unit("httpd.service") + machine.wait_for_unit("v2ray.service") + machine.wait_for_open_port(80) + machine.wait_for_open_port(1080) + machine.succeed( + "curl --fail --max-time 10 --proxy http://localhost:1080 http://localhost" + ) + ''; +} diff --git a/nixos/tests/vault-agent.nix b/nixos/tests/vault-agent.nix index dfebc6b21828..4c9f98a3df7e 100644 --- a/nixos/tests/vault-agent.nix +++ b/nixos/tests/vault-agent.nix @@ -1,63 +1,61 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "vault-agent"; +{ pkgs, ... }: +{ + name = "vault-agent"; - nodes.machine = - { config, pkgs, ... }: - { - services.vault-agent.instances.example.settings = { - vault.address = config.environment.variables.VAULT_ADDR; + nodes.machine = + { config, pkgs, ... }: + { + services.vault-agent.instances.example.settings = { + vault.address = config.environment.variables.VAULT_ADDR; - auto_auth = [ - { - method = [ - { - type = "token_file"; - config.token_file_path = pkgs.writeText "vault-token" config.environment.variables.VAULT_TOKEN; - } - ]; - } - ]; + auto_auth = [ + { + method = [ + { + type = "token_file"; + config.token_file_path = pkgs.writeText "vault-token" config.environment.variables.VAULT_TOKEN; + } + ]; + } + ]; - template = [ - { - contents = '' - {{- with secret "secret/example" }} - {{ .Data.data.key }}" - {{- end }} - ''; - perms = "0600"; - destination = "/example"; - } - ]; - }; - - services.vault = { - enable = true; - dev = true; - devRootTokenID = config.environment.variables.VAULT_TOKEN; - }; - - environment = { - systemPackages = [ pkgs.vault ]; - variables = { - VAULT_ADDR = "http://localhost:8200"; - VAULT_TOKEN = "root"; - }; - }; + template = [ + { + contents = '' + {{- with secret "secret/example" }} + {{ .Data.data.key }}" + {{- end }} + ''; + perms = "0600"; + destination = "/example"; + } + ]; }; - testScript = '' - machine.wait_for_unit("vault.service") - machine.wait_for_open_port(8200) + services.vault = { + enable = true; + dev = true; + devRootTokenID = config.environment.variables.VAULT_TOKEN; + }; - machine.wait_until_succeeds('vault kv put secret/example key=example') + environment = { + systemPackages = [ pkgs.vault ]; + variables = { + VAULT_ADDR = "http://localhost:8200"; + VAULT_TOKEN = "root"; + }; + }; + }; - machine.wait_for_unit("vault-agent-example.service") + testScript = '' + machine.wait_for_unit("vault.service") + machine.wait_for_open_port(8200) - machine.wait_for_file("/example") - machine.succeed('grep "example" /example') - ''; - } -) + machine.wait_until_succeeds('vault kv put secret/example key=example') + + machine.wait_for_unit("vault-agent-example.service") + + machine.wait_for_file("/example") + machine.succeed('grep "example" /example') + ''; +} diff --git a/nixos/tests/vault-dev.nix b/nixos/tests/vault-dev.nix index b66b72ac74a9..a70a35b6d736 100644 --- a/nixos/tests/vault-dev.nix +++ b/nixos/tests/vault-dev.nix @@ -1,42 +1,40 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "vault-dev"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - lnl7 - mic92 - ]; - }; - nodes.machine = - { pkgs, config, ... }: - { - environment.systemPackages = [ pkgs.vault ]; - environment.variables.VAULT_ADDR = "http://127.0.0.1:8200"; - environment.variables.VAULT_TOKEN = "phony-secret"; +{ pkgs, ... }: +{ + name = "vault-dev"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + lnl7 + mic92 + ]; + }; + nodes.machine = + { pkgs, config, ... }: + { + environment.systemPackages = [ pkgs.vault ]; + environment.variables.VAULT_ADDR = "http://127.0.0.1:8200"; + environment.variables.VAULT_TOKEN = "phony-secret"; - services.vault = { - enable = true; - dev = true; - devRootTokenID = config.environment.variables.VAULT_TOKEN; - }; + services.vault = { + enable = true; + dev = true; + devRootTokenID = config.environment.variables.VAULT_TOKEN; }; + }; - testScript = '' - import json - start_all() - machine.wait_for_unit("multi-user.target") - machine.wait_for_unit("vault.service") - machine.wait_for_open_port(8200) - out = machine.succeed("vault status -format=json") - print(out) - status = json.loads(out) - assert status.get("initialized") == True - machine.succeed("vault kv put secret/foo bar=baz") - out = machine.succeed("vault kv get -format=json secret/foo") - print(out) - status = json.loads(out) - assert status.get("data", {}).get("data", {}).get("bar") == "baz" - ''; - } -) + testScript = '' + import json + start_all() + machine.wait_for_unit("multi-user.target") + machine.wait_for_unit("vault.service") + machine.wait_for_open_port(8200) + out = machine.succeed("vault status -format=json") + print(out) + status = json.loads(out) + assert status.get("initialized") == True + machine.succeed("vault kv put secret/foo bar=baz") + out = machine.succeed("vault kv get -format=json secret/foo") + print(out) + status = json.loads(out) + assert status.get("data", {}).get("data", {}).get("bar") == "baz" + ''; +} diff --git a/nixos/tests/vault-postgresql.nix b/nixos/tests/vault-postgresql.nix index 33d45306a36c..b0ccb6ae0456 100644 --- a/nixos/tests/vault-postgresql.nix +++ b/nixos/tests/vault-postgresql.nix @@ -6,71 +6,69 @@ always covered, availability isn't) - the postgres integration works */ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "vault-postgresql"; - meta = with pkgs.lib.maintainers; { - maintainers = [ - lnl7 - roberth - ]; - }; - nodes.machine = - { lib, pkgs, ... }: - { - environment.systemPackages = [ pkgs.vault ]; - environment.variables.VAULT_ADDR = "http://127.0.0.1:8200"; - services.vault.enable = true; - services.vault.extraSettingsPaths = [ "/run/vault.hcl" ]; +{ pkgs, ... }: +{ + name = "vault-postgresql"; + meta = with pkgs.lib.maintainers; { + maintainers = [ + lnl7 + roberth + ]; + }; + nodes.machine = + { lib, pkgs, ... }: + { + environment.systemPackages = [ pkgs.vault ]; + environment.variables.VAULT_ADDR = "http://127.0.0.1:8200"; + services.vault.enable = true; + services.vault.extraSettingsPaths = [ "/run/vault.hcl" ]; - systemd.services.vault = { - after = [ - "postgresql.service" - ]; - # Try for about 10 minutes rather than the default of 5 attempts. - serviceConfig.RestartSec = 1; - serviceConfig.StartLimitBurst = 600; - }; - # systemd.services.vault.unitConfig.RequiresMountsFor = "/run/keys/"; - - services.postgresql.enable = true; - services.postgresql.initialScript = pkgs.writeText "init.psql" '' - CREATE USER vaultuser WITH ENCRYPTED PASSWORD 'thisisthepass'; - GRANT CONNECT ON DATABASE postgres TO vaultuser; - - -- https://www.vaultproject.io/docs/configuration/storage/postgresql - CREATE TABLE vault_kv_store ( - parent_path TEXT COLLATE "C" NOT NULL, - path TEXT COLLATE "C", - key TEXT COLLATE "C", - value BYTEA, - CONSTRAINT pkey PRIMARY KEY (path, key) - ); - CREATE INDEX parent_path_idx ON vault_kv_store (parent_path); - - GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO vaultuser; - ''; + systemd.services.vault = { + after = [ + "postgresql.service" + ]; + # Try for about 10 minutes rather than the default of 5 attempts. + serviceConfig.RestartSec = 1; + serviceConfig.StartLimitBurst = 600; }; + # systemd.services.vault.unitConfig.RequiresMountsFor = "/run/keys/"; - testScript = '' - secretConfig = """ - storage "postgresql" { - connection_url = "postgres://vaultuser:thisisthepass@localhost/postgres?sslmode=disable" - } - """ + services.postgresql.enable = true; + services.postgresql.initialScript = pkgs.writeText "init.psql" '' + CREATE USER vaultuser WITH ENCRYPTED PASSWORD 'thisisthepass'; + GRANT CONNECT ON DATABASE postgres TO vaultuser; - start_all() + -- https://www.vaultproject.io/docs/configuration/storage/postgresql + CREATE TABLE vault_kv_store ( + parent_path TEXT COLLATE "C" NOT NULL, + path TEXT COLLATE "C", + key TEXT COLLATE "C", + value BYTEA, + CONSTRAINT pkey PRIMARY KEY (path, key) + ); + CREATE INDEX parent_path_idx ON vault_kv_store (parent_path); - machine.wait_for_unit("multi-user.target") - machine.succeed("cat >/root/vault.hcl </root/vault.hcl < No current-user-principal returned, re-using URL http://localhost:8080/user/calendars/ - # but we do not need username/password. - }; + xandikos_calendars = { + type = "caldav"; + url = "http://localhost:8080/user/calendars"; + # Xandikos warns + # > No current-user-principal returned, re-using URL http://localhost:8080/user/calendars/ + # but we do not need username/password. + }; - xandikos_contacts = { - type = "carddav"; - url = "http://localhost:8080/user/contacts"; - }; + xandikos_contacts = { + type = "carddav"; + url = "http://localhost:8080/user/contacts"; + }; - local_calendars = { - type = "filesystem"; - path = "~/calendars"; - fileext = ".ics"; - }; + local_calendars = { + type = "filesystem"; + path = "~/calendars"; + fileext = ".ics"; + }; - local_contacts = { - type = "filesystem"; - path = "~/contacts"; - fileext = ".vcf"; - }; + local_contacts = { + type = "filesystem"; + path = "~/contacts"; + fileext = ".vcf"; + }; - mkPairs = a: b: { - calendars = { - a = "${a}_calendars"; - b = "${b}_calendars"; - collections = [ - "from a" - "from b" - ]; - }; - contacts = { - a = "${a}_contacts"; - b = "${b}_contacts"; - collections = [ - "from a" - "from b" - ]; - }; - }; - - mkRadicaleProps = - tag: - pkgs.writeText "Radicale.props" ( - builtins.toJSON { - inherit tag; - } - ); - - writeLines = - name: eol: lines: - pkgs.writeText name (lib.concatMapStrings (l: "${l}${eol}") lines); - - prodid = "-//NixOS test//EN"; - dtstamp = "20231129T194743Z"; - - writeICS = - { - uid, - summary, - dtstart, - dtend, - }: - writeLines "${uid}.ics" "\r\n" [ - "BEGIN:VCALENDAR" - "VERSION:2.0" - "PRODID:${prodid}" - "BEGIN:VEVENT" - "UID:${uid}" - "SUMMARY:${summary}" - "DTSTART:${dtstart}" - "DTEND:${dtend}" - "DTSTAMP:${dtstamp}" - "END:VEVENT" - "END:VCALENDAR" + mkPairs = a: b: { + calendars = { + a = "${a}_calendars"; + b = "${b}_calendars"; + collections = [ + "from a" + "from b" ]; - - foo_ics = writeICS { - uid = "foo"; - summary = "Epochalypse"; - dtstart = "19700101T000000Z"; - dtend = "20380119T031407Z"; }; - - bar_ics = writeICS { - uid = "bar"; - summary = "One Billion Seconds"; - dtstart = "19700101T000000Z"; - dtend = "20010909T014640Z"; - }; - - writeVCF = - { - uid, - name, - displayName, - email, - }: - writeLines "${uid}.vcf" "\r\n" [ - # One of the tools enforces this order of fields. - "BEGIN:VCARD" - "VERSION:4.0" - "UID:${uid}" - "EMAIL;TYPE=INTERNET:${email}" - "FN:${displayName}" - "N:${name}" - "END:VCARD" + contacts = { + a = "${a}_contacts"; + b = "${b}_contacts"; + collections = [ + "from a" + "from b" ]; - - foo_vcf = writeVCF { - uid = "foo"; - name = "Doe;John;;;"; - displayName = "John Doe"; - email = "john.doe@example.org"; }; + }; - bar_vcf = writeVCF { - uid = "bar"; - name = "Doe;Jane;;;"; - displayName = "Jane Doe"; - email = "jane.doe@example.org"; - }; + mkRadicaleProps = + tag: + pkgs.writeText "Radicale.props" ( + builtins.toJSON { + inherit tag; + } + ); - in - { - name = "vdirsyncer"; + writeLines = + name: eol: lines: + pkgs.writeText name (lib.concatMapStrings (l: "${l}${eol}") lines); - meta.maintainers = with lib.maintainers; [ schnusch ]; + prodid = "-//NixOS test//EN"; + dtstamp = "20231129T194743Z"; - nodes = { - machine = { - services.radicale = { - enable = true; - settings.auth.type = "none"; - }; + writeICS = + { + uid, + summary, + dtstart, + dtend, + }: + writeLines "${uid}.ics" "\r\n" [ + "BEGIN:VCALENDAR" + "VERSION:2.0" + "PRODID:${prodid}" + "BEGIN:VEVENT" + "UID:${uid}" + "SUMMARY:${summary}" + "DTSTART:${dtstart}" + "DTEND:${dtend}" + "DTSTAMP:${dtstamp}" + "END:VEVENT" + "END:VCALENDAR" + ]; - services.xandikos = { - enable = true; - extraOptions = [ "--autocreate" ]; - }; + foo_ics = writeICS { + uid = "foo"; + summary = "Epochalypse"; + dtstart = "19700101T000000Z"; + dtend = "20380119T031407Z"; + }; - services.vdirsyncer = { - enable = true; - jobs = { + bar_ics = writeICS { + uid = "bar"; + summary = "One Billion Seconds"; + dtstart = "19700101T000000Z"; + dtend = "20010909T014640Z"; + }; - alice = { - user = "alice"; - group = "users"; - config = { - statusPath = "/home/alice/.vdirsyncer"; - storages = { - inherit - local_calendars - local_contacts - radicale_calendars - radicale_contacts - ; - }; - pairs = mkPairs "local" "radicale"; + writeVCF = + { + uid, + name, + displayName, + email, + }: + writeLines "${uid}.vcf" "\r\n" [ + # One of the tools enforces this order of fields. + "BEGIN:VCARD" + "VERSION:4.0" + "UID:${uid}" + "EMAIL;TYPE=INTERNET:${email}" + "FN:${displayName}" + "N:${name}" + "END:VCARD" + ]; + + foo_vcf = writeVCF { + uid = "foo"; + name = "Doe;John;;;"; + displayName = "John Doe"; + email = "john.doe@example.org"; + }; + + bar_vcf = writeVCF { + uid = "bar"; + name = "Doe;Jane;;;"; + displayName = "Jane Doe"; + email = "jane.doe@example.org"; + }; + +in +{ + name = "vdirsyncer"; + + meta.maintainers = with lib.maintainers; [ schnusch ]; + + nodes = { + machine = { + services.radicale = { + enable = true; + settings.auth.type = "none"; + }; + + services.xandikos = { + enable = true; + extraOptions = [ "--autocreate" ]; + }; + + services.vdirsyncer = { + enable = true; + jobs = { + + alice = { + user = "alice"; + group = "users"; + config = { + statusPath = "/home/alice/.vdirsyncer"; + storages = { + inherit + local_calendars + local_contacts + radicale_calendars + radicale_contacts + ; }; - forceDiscover = true; + pairs = mkPairs "local" "radicale"; }; - - bob = { - user = "bob"; - group = "users"; - config = { - statusPath = "/home/bob/.vdirsyncer"; - storages = { - inherit - local_calendars - local_contacts - xandikos_calendars - xandikos_contacts - ; - }; - pairs = mkPairs "local" "xandikos"; - }; - forceDiscover = true; - }; - - remote = { - config = { - storages = { - inherit - radicale_calendars - radicale_contacts - xandikos_calendars - xandikos_contacts - ; - }; - pairs = mkPairs "radicale" "xandikos"; - }; - forceDiscover = true; - }; - + forceDiscover = true; + }; + + bob = { + user = "bob"; + group = "users"; + config = { + statusPath = "/home/bob/.vdirsyncer"; + storages = { + inherit + local_calendars + local_contacts + xandikos_calendars + xandikos_contacts + ; + }; + pairs = mkPairs "local" "xandikos"; + }; + forceDiscover = true; + }; + + remote = { + config = { + storages = { + inherit + radicale_calendars + radicale_contacts + xandikos_calendars + xandikos_contacts + ; + }; + pairs = mkPairs "radicale" "xandikos"; + }; + forceDiscover = true; }; - }; - users.users = { - alice.isNormalUser = true; - bob.isNormalUser = true; }; }; + + users.users = { + alice.isNormalUser = true; + bob.isNormalUser = true; + }; }; + }; - testScript = '' - def run_unit(name): - machine.systemctl(f"start {name}") - # The service is Type=oneshot without RemainAfterExit=yes. Once it - # is finished it is no longer active and wait_for_unit will fail. - # When that happens we check if it actually failed. - try: - machine.wait_for_unit(name) - except: - machine.fail(f"systemctl is-failed {name}") + testScript = '' + def run_unit(name): + machine.systemctl(f"start {name}") + # The service is Type=oneshot without RemainAfterExit=yes. Once it + # is finished it is no longer active and wait_for_unit will fail. + # When that happens we check if it actually failed. + try: + machine.wait_for_unit(name) + except: + machine.fail(f"systemctl is-failed {name}") - start_all() + start_all() - machine.wait_for_open_port(5232) - machine.wait_for_open_port(8080) - machine.wait_for_unit("multi-user.target") + machine.wait_for_open_port(5232) + machine.wait_for_open_port(8080) + machine.wait_for_unit("multi-user.target") - with subtest("alice -> radicale"): - # vdirsyncer cannot create create collections on Radicale, - # see https://vdirsyncer.pimutils.org/en/stable/tutorials/radicale.html - machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VCALENDAR"} /var/lib/radicale/collections/collection-root/alice/foocal/.Radicale.props") - machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VADDRESSBOOK"} /var/lib/radicale/collections/collection-root/alice/foocard/.Radicale.props") + with subtest("alice -> radicale"): + # vdirsyncer cannot create create collections on Radicale, + # see https://vdirsyncer.pimutils.org/en/stable/tutorials/radicale.html + machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VCALENDAR"} /var/lib/radicale/collections/collection-root/alice/foocal/.Radicale.props") + machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VADDRESSBOOK"} /var/lib/radicale/collections/collection-root/alice/foocard/.Radicale.props") - machine.succeed("runuser -u alice -- install -Dm 644 ${foo_ics} /home/alice/calendars/foocal/foo.ics") - machine.succeed("runuser -u alice -- install -Dm 644 ${foo_vcf} /home/alice/contacts/foocard/foo.vcf") - run_unit("vdirsyncer@alice.service") + machine.succeed("runuser -u alice -- install -Dm 644 ${foo_ics} /home/alice/calendars/foocal/foo.ics") + machine.succeed("runuser -u alice -- install -Dm 644 ${foo_vcf} /home/alice/contacts/foocard/foo.vcf") + run_unit("vdirsyncer@alice.service") - # test statusPath - machine.succeed("test -d /home/alice/.vdirsyncer") - machine.fail("test -e /var/lib/private/vdirsyncer/alice") + # test statusPath + machine.succeed("test -d /home/alice/.vdirsyncer") + machine.fail("test -e /var/lib/private/vdirsyncer/alice") - with subtest("bob -> xandikos"): - # I suspect Radicale shares the namespace for calendars and - # contacts, but Xandikos separates them. We just use `barcal` and - # `barcard` with Xandikos as well to avoid conflicts. - machine.succeed("runuser -u bob -- install -Dm 644 ${bar_ics} /home/bob/calendars/barcal/bar.ics") - machine.succeed("runuser -u bob -- install -Dm 644 ${bar_vcf} /home/bob/contacts/barcard/bar.vcf") - run_unit("vdirsyncer@bob.service") + with subtest("bob -> xandikos"): + # I suspect Radicale shares the namespace for calendars and + # contacts, but Xandikos separates them. We just use `barcal` and + # `barcard` with Xandikos as well to avoid conflicts. + machine.succeed("runuser -u bob -- install -Dm 644 ${bar_ics} /home/bob/calendars/barcal/bar.ics") + machine.succeed("runuser -u bob -- install -Dm 644 ${bar_vcf} /home/bob/contacts/barcard/bar.vcf") + run_unit("vdirsyncer@bob.service") - # test statusPath - machine.succeed("test -d /home/bob/.vdirsyncer") - machine.fail("test -e /var/lib/private/vdirsyncer/bob") + # test statusPath + machine.succeed("test -d /home/bob/.vdirsyncer") + machine.fail("test -e /var/lib/private/vdirsyncer/bob") - with subtest("radicale <-> xandikos"): - # vdirsyncer cannot create create collections on Radicale, - # see https://vdirsyncer.pimutils.org/en/stable/tutorials/radicale.html - machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VCALENDAR"} /var/lib/radicale/collections/collection-root/alice/barcal/.Radicale.props") - machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VADDRESSBOOK"} /var/lib/radicale/collections/collection-root/alice/barcard/.Radicale.props") + with subtest("radicale <-> xandikos"): + # vdirsyncer cannot create create collections on Radicale, + # see https://vdirsyncer.pimutils.org/en/stable/tutorials/radicale.html + machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VCALENDAR"} /var/lib/radicale/collections/collection-root/alice/barcal/.Radicale.props") + machine.succeed("runuser -u radicale -- install -Dm 644 ${mkRadicaleProps "VADDRESSBOOK"} /var/lib/radicale/collections/collection-root/alice/barcard/.Radicale.props") - run_unit("vdirsyncer@remote.service") + run_unit("vdirsyncer@remote.service") - # test statusPath - machine.succeed("test -d /var/lib/private/vdirsyncer/remote") + # test statusPath + machine.succeed("test -d /var/lib/private/vdirsyncer/remote") - with subtest("radicale -> alice"): - run_unit("vdirsyncer@alice.service") + with subtest("radicale -> alice"): + run_unit("vdirsyncer@alice.service") - with subtest("xandikos -> bob"): - run_unit("vdirsyncer@bob.service") + with subtest("xandikos -> bob"): + run_unit("vdirsyncer@bob.service") - with subtest("compare synced files"): - # iCalendar files get reordered - machine.succeed("diff -u --strip-trailing-cr <(sort /home/alice/calendars/foocal/foo.ics) <(sort /home/bob/calendars/foocal/foo.ics) >&2") - machine.succeed("diff -u --strip-trailing-cr <(sort /home/bob/calendars/barcal/bar.ics) <(sort /home/alice/calendars/barcal/bar.ics) >&2") + with subtest("compare synced files"): + # iCalendar files get reordered + machine.succeed("diff -u --strip-trailing-cr <(sort /home/alice/calendars/foocal/foo.ics) <(sort /home/bob/calendars/foocal/foo.ics) >&2") + machine.succeed("diff -u --strip-trailing-cr <(sort /home/bob/calendars/barcal/bar.ics) <(sort /home/alice/calendars/barcal/bar.ics) >&2") - machine.succeed("diff -u --strip-trailing-cr /home/alice/contacts/foocard/foo.vcf /home/bob/contacts/foocard/foo.vcf >&2") - machine.succeed("diff -u --strip-trailing-cr /home/bob/contacts/barcard/bar.vcf /home/alice/contacts/barcard/bar.vcf >&2") - ''; - } -) + machine.succeed("diff -u --strip-trailing-cr /home/alice/contacts/foocard/foo.vcf /home/bob/contacts/foocard/foo.vcf >&2") + machine.succeed("diff -u --strip-trailing-cr /home/bob/contacts/barcard/bar.vcf /home/alice/contacts/barcard/bar.vcf >&2") + ''; +} diff --git a/nixos/tests/vengi-tools.nix b/nixos/tests/vengi-tools.nix index fcf42c09a7a8..3260d5a62a28 100644 --- a/nixos/tests/vengi-tools.nix +++ b/nixos/tests/vengi-tools.nix @@ -1,31 +1,29 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "vengi-tools"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "vengi-tools"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.vengi-tools ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.vengi-tools ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - machine.execute("vengi-voxedit >&2 &") - machine.wait_for_window("voxedit") - # Let the window load fully - machine.sleep(15) - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.execute("vengi-voxedit >&2 &") + machine.wait_for_window("voxedit") + # Let the window load fully + machine.sleep(15) + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/vikunja.nix b/nixos/tests/vikunja.nix index f1108dc69330..7da5e2f23b30 100644 --- a/nixos/tests/vikunja.nix +++ b/nixos/tests/vikunja.nix @@ -1,81 +1,79 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "vikunja"; +{ pkgs, lib, ... }: +{ + name = "vikunja"; - meta.maintainers = with lib.maintainers; [ leona ]; + meta.maintainers = with lib.maintainers; [ leona ]; - nodes = { - vikunjaSqlite = - { ... }: - { - services.vikunja = { - enable = true; - database = { - type = "sqlite"; - }; - frontendScheme = "http"; - frontendHostname = "localhost"; + nodes = { + vikunjaSqlite = + { ... }: + { + services.vikunja = { + enable = true; + database = { + type = "sqlite"; }; - services.nginx = { - enable = true; - virtualHosts."http://localhost" = { - locations."/".proxyPass = "http://localhost:3456"; - }; + frontendScheme = "http"; + frontendHostname = "localhost"; + }; + services.nginx = { + enable = true; + virtualHosts."http://localhost" = { + locations."/".proxyPass = "http://localhost:3456"; }; }; - vikunjaPostgresql = - { pkgs, ... }: - { - services.vikunja = { - enable = true; - database = { - type = "postgres"; - user = "vikunja"; - database = "vikunja"; - host = "/run/postgresql"; - }; - frontendScheme = "http"; - frontendHostname = "localhost"; - port = 9090; + }; + vikunjaPostgresql = + { pkgs, ... }: + { + services.vikunja = { + enable = true; + database = { + type = "postgres"; + user = "vikunja"; + database = "vikunja"; + host = "/run/postgresql"; }; - services.postgresql = { - enable = true; - ensureDatabases = [ "vikunja" ]; - ensureUsers = [ - { - name = "vikunja"; - ensureDBOwnership = true; - } - ]; - }; - services.nginx = { - enable = true; - virtualHosts."http://localhost" = { - locations."/".proxyPass = "http://localhost:9090"; - }; + frontendScheme = "http"; + frontendHostname = "localhost"; + port = 9090; + }; + services.postgresql = { + enable = true; + ensureDatabases = [ "vikunja" ]; + ensureUsers = [ + { + name = "vikunja"; + ensureDBOwnership = true; + } + ]; + }; + services.nginx = { + enable = true; + virtualHosts."http://localhost" = { + locations."/".proxyPass = "http://localhost:9090"; }; }; - }; + }; + }; - testScript = '' - vikunjaSqlite.wait_for_unit("vikunja.service") - vikunjaSqlite.wait_for_open_port(3456) - vikunjaSqlite.succeed("curl --fail http://localhost:3456/api/v1/info") + testScript = '' + vikunjaSqlite.wait_for_unit("vikunja.service") + vikunjaSqlite.wait_for_open_port(3456) + vikunjaSqlite.succeed("curl --fail http://localhost:3456/api/v1/info") - vikunjaSqlite.wait_for_unit("nginx.service") - vikunjaSqlite.wait_for_open_port(80) - vikunjaSqlite.succeed("curl --fail http://localhost/api/v1/info") - vikunjaSqlite.succeed("curl --fail http://localhost") + vikunjaSqlite.wait_for_unit("nginx.service") + vikunjaSqlite.wait_for_open_port(80) + vikunjaSqlite.succeed("curl --fail http://localhost/api/v1/info") + vikunjaSqlite.succeed("curl --fail http://localhost") - vikunjaPostgresql.wait_for_unit("vikunja.service") - vikunjaPostgresql.wait_for_open_port(9090) - vikunjaPostgresql.succeed("curl --fail http://localhost:9090/api/v1/info") + vikunjaPostgresql.wait_for_unit("vikunja.service") + vikunjaPostgresql.wait_for_open_port(9090) + vikunjaPostgresql.succeed("curl --fail http://localhost:9090/api/v1/info") - vikunjaPostgresql.wait_for_unit("nginx.service") - vikunjaPostgresql.wait_for_open_port(80) - vikunjaPostgresql.succeed("curl --fail http://localhost/api/v1/info") - vikunjaPostgresql.succeed("curl --fail http://localhost") - ''; - } -) + vikunjaPostgresql.wait_for_unit("nginx.service") + vikunjaPostgresql.wait_for_open_port(80) + vikunjaPostgresql.succeed("curl --fail http://localhost/api/v1/info") + vikunjaPostgresql.succeed("curl --fail http://localhost") + ''; +} diff --git a/nixos/tests/vsftpd.nix b/nixos/tests/vsftpd.nix index 816014a10b54..f3bd182a3748 100644 --- a/nixos/tests/vsftpd.nix +++ b/nixos/tests/vsftpd.nix @@ -1,45 +1,43 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "vsftpd"; +{ pkgs, ... }: +{ + name = "vsftpd"; - nodes = { - server = { - services.vsftpd = { - enable = true; - userlistDeny = false; - localUsers = true; - userlist = [ "ftp-test-user" ]; - writeEnable = true; - localRoot = "/tmp"; - }; - networking.firewall.enable = false; - - users = { - users.ftp-test-user = { - isSystemUser = true; - password = "ftp-test-password"; - group = "ftp-test-group"; - }; - groups.ftp-test-group = { }; - }; + nodes = { + server = { + services.vsftpd = { + enable = true; + userlistDeny = false; + localUsers = true; + userlist = [ "ftp-test-user" ]; + writeEnable = true; + localRoot = "/tmp"; }; + networking.firewall.enable = false; - client = { }; + users = { + users.ftp-test-user = { + isSystemUser = true; + password = "ftp-test-password"; + group = "ftp-test-group"; + }; + groups.ftp-test-group = { }; + }; }; - testScript = '' - client.start() - server.wait_for_unit("vsftpd") - server.wait_for_open_port(21) + client = { }; + }; - client.succeed("curl -u ftp-test-user:ftp-test-password ftp://server") - client.succeed('echo "this is a test" > /tmp/test.file.up') - client.succeed("curl -v -T /tmp/test.file.up -u ftp-test-user:ftp-test-password ftp://server") - client.succeed("curl -u ftp-test-user:ftp-test-password ftp://server/test.file.up > /tmp/test.file.down") - client.succeed("diff /tmp/test.file.up /tmp/test.file.down") - assert client.succeed("cat /tmp/test.file.up") == server.succeed("cat /tmp/test.file.up") - assert client.succeed("cat /tmp/test.file.down") == server.succeed("cat /tmp/test.file.up") - ''; - } -) + testScript = '' + client.start() + server.wait_for_unit("vsftpd") + server.wait_for_open_port(21) + + client.succeed("curl -u ftp-test-user:ftp-test-password ftp://server") + client.succeed('echo "this is a test" > /tmp/test.file.up') + client.succeed("curl -v -T /tmp/test.file.up -u ftp-test-user:ftp-test-password ftp://server") + client.succeed("curl -u ftp-test-user:ftp-test-password ftp://server/test.file.up > /tmp/test.file.down") + client.succeed("diff /tmp/test.file.up /tmp/test.file.down") + assert client.succeed("cat /tmp/test.file.up") == server.succeed("cat /tmp/test.file.up") + assert client.succeed("cat /tmp/test.file.down") == server.succeed("cat /tmp/test.file.up") + ''; +} diff --git a/nixos/tests/waagent.nix b/nixos/tests/waagent.nix index 54fa645e8304..f12f393f2597 100644 --- a/nixos/tests/waagent.nix +++ b/nixos/tests/waagent.nix @@ -1,72 +1,70 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: - let - confPath = "/etc/waagent.conf"; - in - { - name = "waagent"; +{ lib, pkgs, ... }: +let + confPath = "/etc/waagent.conf"; +in +{ + name = "waagent"; - meta = { - maintainers = with lib.maintainers; [ codgician ]; - }; + meta = { + maintainers = with lib.maintainers; [ codgician ]; + }; - nodes.machine = { - services.waagent = { - enable = true; - settings = { - Provisioning = { - Enable = false; - Agent = "waagent"; - DeleteRootPassword = false; - RegenerateSshHostKeyPair = false; - SshHostKeyPairType = "ed25519"; - MonitorHostName = false; - }; - ResourceDisk = { - Format = false; - MountOptions = [ - "compress=lzo" - "mode=0600" - ]; - }; - OS.RootDeviceScsiTimeout = 300; - HttpProxy = { - Host = null; - Port = null; - }; - CGroups = { - EnforceLimits = false; - Excluded = [ ]; - }; + nodes.machine = { + services.waagent = { + enable = true; + settings = { + Provisioning = { + Enable = false; + Agent = "waagent"; + DeleteRootPassword = false; + RegenerateSshHostKeyPair = false; + SshHostKeyPairType = "ed25519"; + MonitorHostName = false; + }; + ResourceDisk = { + Format = false; + MountOptions = [ + "compress=lzo" + "mode=0600" + ]; + }; + OS.RootDeviceScsiTimeout = 300; + HttpProxy = { + Host = null; + Port = null; + }; + CGroups = { + EnforceLimits = false; + Excluded = [ ]; }; }; }; + }; - testScript = '' - # Defined values should be reflected in waagent.conf - machine.succeed("grep -q '^Provisioning.Enable=n$' '${confPath}'") - machine.succeed("grep -q '^Provisioning.Agent=waagent$' '${confPath}'") - machine.succeed("grep -q '^Provisioning.DeleteRootPassword=n$' '${confPath}'") - machine.succeed("grep -q '^Provisioning.RegenerateSshHostKeyPair=n$' '${confPath}'") - machine.succeed("grep -q '^Provisioning.SshHostKeyPairType=ed25519$' '${confPath}'") - machine.succeed("grep -q '^Provisioning.MonitorHostName=n$' '${confPath}'") - machine.succeed("grep -q '^ResourceDisk.Format=n$' '${confPath}'") - machine.succeed("grep -q '^ResourceDisk.MountOptions=compress=lzo,mode=0600$' '${confPath}'") - machine.succeed("grep -q '^OS.RootDeviceScsiTimeout=300$' '${confPath}'") + testScript = '' + # Defined values should be reflected in waagent.conf + machine.succeed("grep -q '^Provisioning.Enable=n$' '${confPath}'") + machine.succeed("grep -q '^Provisioning.Agent=waagent$' '${confPath}'") + machine.succeed("grep -q '^Provisioning.DeleteRootPassword=n$' '${confPath}'") + machine.succeed("grep -q '^Provisioning.RegenerateSshHostKeyPair=n$' '${confPath}'") + machine.succeed("grep -q '^Provisioning.SshHostKeyPairType=ed25519$' '${confPath}'") + machine.succeed("grep -q '^Provisioning.MonitorHostName=n$' '${confPath}'") + machine.succeed("grep -q '^ResourceDisk.Format=n$' '${confPath}'") + machine.succeed("grep -q '^ResourceDisk.MountOptions=compress=lzo,mode=0600$' '${confPath}'") + machine.succeed("grep -q '^OS.RootDeviceScsiTimeout=300$' '${confPath}'") - # Undocumented options should also be supported - machine.succeed("grep -q '^CGroups.EnforceLimits=n$' '${confPath}'") + # Undocumented options should also be supported + machine.succeed("grep -q '^CGroups.EnforceLimits=n$' '${confPath}'") - # Null values should be skipped and not exist in waagent.conf - machine.fail("grep -q '^HttpProxy.Host=' '${confPath}'") - machine.fail("grep -q '^HttpProxy.Port=' '${confPath}'") + # Null values should be skipped and not exist in waagent.conf + machine.fail("grep -q '^HttpProxy.Host=' '${confPath}'") + machine.fail("grep -q '^HttpProxy.Port=' '${confPath}'") - # Empty lists should be skipped and not exist in waagent.conf - machine.fail("grep -q '^CGroups.Excluded=' '${confPath}'") + # Empty lists should be skipped and not exist in waagent.conf + machine.fail("grep -q '^CGroups.Excluded=' '${confPath}'") - # Test service start - # Skip testing actual functionality due to lacking Azure infrasturcture - machine.wait_for_unit("waagent.service") - ''; - } -) + # Test service start + # Skip testing actual functionality due to lacking Azure infrasturcture + machine.wait_for_unit("waagent.service") + ''; +} diff --git a/nixos/tests/warzone2100.nix b/nixos/tests/warzone2100.nix index 4e9572a12c73..c7ce531e8dda 100644 --- a/nixos/tests/warzone2100.nix +++ b/nixos/tests/warzone2100.nix @@ -1,30 +1,28 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "warzone2100"; - meta = with pkgs.lib.maintainers; { - maintainers = [ fgaz ]; +{ pkgs, ... }: +{ + name = "warzone2100"; + meta = with pkgs.lib.maintainers; { + maintainers = [ fgaz ]; + }; + + nodes.machine = + { config, pkgs, ... }: + { + imports = [ + ./common/x11.nix + ]; + + services.xserver.enable = true; + environment.systemPackages = [ pkgs.warzone2100 ]; }; - nodes.machine = - { config, pkgs, ... }: - { - imports = [ - ./common/x11.nix - ]; + enableOCR = true; - services.xserver.enable = true; - environment.systemPackages = [ pkgs.warzone2100 ]; - }; - - enableOCR = true; - - testScript = '' - machine.wait_for_x() - machine.execute("warzone2100 >&2 &") - machine.wait_for_window("Warzone 2100") - machine.wait_for_text(r"(Single Player|Multi Player|Tutorial|Options|Quit Game)") - machine.screenshot("screen") - ''; - } -) + testScript = '' + machine.wait_for_x() + machine.execute("warzone2100 >&2 &") + machine.wait_for_window("Warzone 2100") + machine.wait_for_text(r"(Single Player|Multi Player|Tutorial|Options|Quit Game)") + machine.screenshot("screen") + ''; +} diff --git a/nixos/tests/wasabibackend.nix b/nixos/tests/wasabibackend.nix index 05f9ecd47bd6..f237a8d62075 100644 --- a/nixos/tests/wasabibackend.nix +++ b/nixos/tests/wasabibackend.nix @@ -1,43 +1,41 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "wasabibackend"; - meta = with pkgs.lib.maintainers; { - maintainers = [ mmahut ]; - }; +{ pkgs, ... }: +{ + name = "wasabibackend"; + meta = with pkgs.lib.maintainers; { + maintainers = [ mmahut ]; + }; - nodes = { - machine = - { ... }: - { - services.wasabibackend = { - enable = true; - network = "testnet"; - rpc = { - user = "alice"; - port = 18332; - }; - }; - services.bitcoind."testnet" = { - enable = true; - testnet = true; - rpc.users = { - alice.passwordHMAC = "e7096bc21da60b29ecdbfcdb2c3acc62$f948e61cb587c399358ed99c6ed245a41460b4bf75125d8330c9f6fcc13d7ae7"; - }; + nodes = { + machine = + { ... }: + { + services.wasabibackend = { + enable = true; + network = "testnet"; + rpc = { + user = "alice"; + port = 18332; }; }; - }; + services.bitcoind."testnet" = { + enable = true; + testnet = true; + rpc.users = { + alice.passwordHMAC = "e7096bc21da60b29ecdbfcdb2c3acc62$f948e61cb587c399358ed99c6ed245a41460b4bf75125d8330c9f6fcc13d7ae7"; + }; + }; + }; + }; - testScript = '' - start_all() - machine.wait_for_unit("wasabibackend.service") - machine.wait_until_succeeds( - "grep 'Wasabi Backend started' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt" - ) - machine.sleep(5) - machine.succeed( - "grep 'Config is successfully initialized' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt" - ) - ''; - } -) + testScript = '' + start_all() + machine.wait_for_unit("wasabibackend.service") + machine.wait_until_succeeds( + "grep 'Wasabi Backend started' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt" + ) + machine.sleep(5) + machine.succeed( + "grep 'Config is successfully initialized' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt" + ) + ''; +} diff --git a/nixos/tests/watchdogd.nix b/nixos/tests/watchdogd.nix index 711cdee65f8e..5519ca6fb695 100644 --- a/nixos/tests/watchdogd.nix +++ b/nixos/tests/watchdogd.nix @@ -1,27 +1,25 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "watchdogd"; - meta.maintainers = with lib.maintainers; [ vifino ]; +{ lib, ... }: +{ + name = "watchdogd"; + meta.maintainers = with lib.maintainers; [ vifino ]; - nodes.machine = - { pkgs, ... }: - { - virtualisation.qemu.options = [ - "-device i6300esb" # virtual watchdog timer - ]; - boot.kernelModules = [ "i6300esb" ]; - services.watchdogd.enable = true; - services.watchdogd.settings = { - supervisor.enabled = true; - }; + nodes.machine = + { pkgs, ... }: + { + virtualisation.qemu.options = [ + "-device i6300esb" # virtual watchdog timer + ]; + boot.kernelModules = [ "i6300esb" ]; + services.watchdogd.enable = true; + services.watchdogd.settings = { + supervisor.enabled = true; }; + }; - testScript = '' - machine.wait_for_unit("watchdogd.service") + testScript = '' + machine.wait_for_unit("watchdogd.service") - assert "i6300ESB" in machine.succeed("watchdogctl status") - machine.succeed("watchdogctl test") - ''; - } -) + assert "i6300ESB" in machine.succeed("watchdogctl status") + machine.succeed("watchdogctl test") + ''; +} diff --git a/nixos/tests/web-apps/healthchecks.nix b/nixos/tests/web-apps/healthchecks.nix index 1c6755ec922c..6915a37975cb 100644 --- a/nixos/tests/web-apps/healthchecks.nix +++ b/nixos/tests/web-apps/healthchecks.nix @@ -1,46 +1,44 @@ -import ../make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "healthchecks"; +{ lib, pkgs, ... }: +{ + name = "healthchecks"; - meta = with lib.maintainers; { - maintainers = [ phaer ]; - }; + meta = with lib.maintainers; { + maintainers = [ phaer ]; + }; - nodes.machine = - { ... }: - { - services.healthchecks = { - enable = true; - settings = { - SITE_NAME = "MyUniqueInstance"; - COMPRESS_ENABLED = "True"; - SECRET_KEY_FILE = pkgs.writeText "secret" "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; - }; + nodes.machine = + { ... }: + { + services.healthchecks = { + enable = true; + settings = { + SITE_NAME = "MyUniqueInstance"; + COMPRESS_ENABLED = "True"; + SECRET_KEY_FILE = pkgs.writeText "secret" "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; }; }; + }; - testScript = '' - machine.start() - machine.wait_for_unit("healthchecks.target") - machine.wait_until_succeeds("journalctl --since -1m --unit healthchecks --grep Listening") + testScript = '' + machine.start() + machine.wait_for_unit("healthchecks.target") + machine.wait_until_succeeds("journalctl --since -1m --unit healthchecks --grep Listening") - with subtest("Home screen loads"): - machine.succeed( - "curl -sSfL http://localhost:8000 | grep 'Log In'" - ) + with subtest("Home screen loads"): + machine.succeed( + "curl -sSfL http://localhost:8000 | grep '<title>Log In'" + ) - with subtest("Setting SITE_NAME via freeform option works"): - machine.succeed( - "curl -sSfL http://localhost:8000 | grep 'MyUniqueInstance'" - ) + with subtest("Setting SITE_NAME via freeform option works"): + machine.succeed( + "curl -sSfL http://localhost:8000 | grep 'MyUniqueInstance'" + ) - with subtest("Manage script works"): - # "shell" sucommand should succeed, needs python in PATH. - assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | sudo -u healthchecks healthchecks-manage shell") + with subtest("Manage script works"): + # "shell" sucommand should succeed, needs python in PATH. + assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | sudo -u healthchecks healthchecks-manage shell") - # Shouldn't fail if not called by healthchecks user - assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | healthchecks-manage shell") - ''; - } -) + # Shouldn't fail if not called by healthchecks user + assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | healthchecks-manage shell") + ''; +} diff --git a/nixos/tests/web-apps/immich-public-proxy.nix b/nixos/tests/web-apps/immich-public-proxy.nix index 5f2034b29442..f711e56abb48 100644 --- a/nixos/tests/web-apps/immich-public-proxy.nix +++ b/nixos/tests/web-apps/immich-public-proxy.nix @@ -1,105 +1,103 @@ -import ../make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "immich-public-proxy"; +{ pkgs, lib, ... }: +{ + name = "immich-public-proxy"; - nodes.machine = - { pkgs, ... }@args: - { - environment.systemPackages = [ - pkgs.imagemagick - pkgs.immich-cli - ]; - services.immich = { - enable = true; - port = 2283; - # disable a lot of features that aren't needed for this test - machine-learning.enable = false; - settings = { - backup.database.enabled = false; - machineLearning.enabled = false; - map.enabled = false; - reverseGeocoding.enabled = false; - metadata.faces.import = false; - newVersionCheck.enabled = false; - notifications.smtp.enabled = false; - }; - }; - services.immich-public-proxy = { - enable = true; - immichUrl = "http://localhost:2283"; - port = 8002; - settings.ipp.responseHeaders."X-NixOS" = "Rules"; + nodes.machine = + { pkgs, ... }@args: + { + environment.systemPackages = [ + pkgs.imagemagick + pkgs.immich-cli + ]; + services.immich = { + enable = true; + port = 2283; + # disable a lot of features that aren't needed for this test + machine-learning.enable = false; + settings = { + backup.database.enabled = false; + machineLearning.enabled = false; + map.enabled = false; + reverseGeocoding.enabled = false; + metadata.faces.import = false; + newVersionCheck.enabled = false; + notifications.smtp.enabled = false; }; }; + services.immich-public-proxy = { + enable = true; + immichUrl = "http://localhost:2283"; + port = 8002; + settings.ipp.responseHeaders."X-NixOS" = "Rules"; + }; + }; - testScript = '' - import json + testScript = '' + import json - machine.wait_for_unit("immich-server.service") - machine.wait_for_unit("immich-public-proxy.service") - machine.wait_for_open_port(2283) - machine.wait_for_open_port(8002) + machine.wait_for_unit("immich-server.service") + machine.wait_for_unit("immich-public-proxy.service") + machine.wait_for_open_port(2283) + machine.wait_for_open_port(8002) - # The proxy should be up - machine.succeed("curl -sf http://localhost:8002") + # The proxy should be up + machine.succeed("curl -sf http://localhost:8002") - # Verify the static assets are served - machine.succeed("curl -sf http://localhost:8002/robots.txt") - machine.succeed("curl -sf http://localhost:8002/share/static/style.css") + # Verify the static assets are served + machine.succeed("curl -sf http://localhost:8002/robots.txt") + machine.succeed("curl -sf http://localhost:8002/share/static/style.css") - # Check that the response header in the settings is sent - res = machine.succeed(""" - curl -sD - http://localhost:8002 -o /dev/null - """) - assert "x-nixos: rules" in res.lower(), res + # Check that the response header in the settings is sent + res = machine.succeed(""" + curl -sD - http://localhost:8002 -o /dev/null + """) + assert "x-nixos: rules" in res.lower(), res - # Log in to Immich and create an access key - machine.succeed(""" - curl -sf --json '{ "email": "test@example.com", "name": "Admin", "password": "admin" }' http://localhost:2283/api/auth/admin-sign-up - """) - res = machine.succeed(""" - curl -sf --json '{ "email": "test@example.com", "password": "admin" }' http://localhost:2283/api/auth/login - """) - token = json.loads(res)['accessToken'] - res = machine.succeed(""" - curl -sf -H 'Cookie: immich_access_token=%s' --json '{ "name": "API Key", "permissions": ["all"] }' http://localhost:2283/api/api-keys - """ % token) - key = json.loads(res)['secret'] - machine.succeed(f"immich login http://localhost:2283/api {key}") - res = machine.succeed("immich server-info") - print(res) + # Log in to Immich and create an access key + machine.succeed(""" + curl -sf --json '{ "email": "test@example.com", "name": "Admin", "password": "admin" }' http://localhost:2283/api/auth/admin-sign-up + """) + res = machine.succeed(""" + curl -sf --json '{ "email": "test@example.com", "password": "admin" }' http://localhost:2283/api/auth/login + """) + token = json.loads(res)['accessToken'] + res = machine.succeed(""" + curl -sf -H 'Cookie: immich_access_token=%s' --json '{ "name": "API Key", "permissions": ["all"] }' http://localhost:2283/api/api-keys + """ % token) + key = json.loads(res)['secret'] + machine.succeed(f"immich login http://localhost:2283/api {key}") + res = machine.succeed("immich server-info") + print(res) - # Upload some blank images to a new album - # If there's only one image, the proxy serves the image directly - machine.succeed("magick -size 800x600 canvas:white /tmp/white.png") - machine.succeed("immich upload -A '✨ Reproducible Moments ✨' /tmp/white.png") - machine.succeed("magick -size 800x600 canvas:black /tmp/black.png") - machine.succeed("immich upload -A '✨ Reproducible Moments ✨' /tmp/black.png") - res = machine.succeed("immich server-info") - print(res) + # Upload some blank images to a new album + # If there's only one image, the proxy serves the image directly + machine.succeed("magick -size 800x600 canvas:white /tmp/white.png") + machine.succeed("immich upload -A '✨ Reproducible Moments ✨' /tmp/white.png") + machine.succeed("magick -size 800x600 canvas:black /tmp/black.png") + machine.succeed("immich upload -A '✨ Reproducible Moments ✨' /tmp/black.png") + res = machine.succeed("immich server-info") + print(res) - # Get the new album id - res = machine.succeed(""" - curl -sf -H 'Cookie: immich_access_token=%s' http://localhost:2283/api/albums - """ % token) - album_id = json.loads(res)[0]['id'] + # Get the new album id + res = machine.succeed(""" + curl -sf -H 'Cookie: immich_access_token=%s' http://localhost:2283/api/albums + """ % token) + album_id = json.loads(res)[0]['id'] - # Create a shared link - res = machine.succeed(""" - curl -sf -H 'Cookie: immich_access_token=%s' --json '{ "albumId": "%s", "type": "ALBUM" }' http://localhost:2283/api/shared-links - """ % (token, album_id)) - share_key = json.loads(res)['key'] + # Create a shared link + res = machine.succeed(""" + curl -sf -H 'Cookie: immich_access_token=%s' --json '{ "albumId": "%s", "type": "ALBUM" }' http://localhost:2283/api/shared-links + """ % (token, album_id)) + share_key = json.loads(res)['key'] - # Access the share - machine.succeed(""" - curl -sf http://localhost:2283/share/%s - """ % share_key) + # Access the share + machine.succeed(""" + curl -sf http://localhost:2283/share/%s + """ % share_key) - # Access the share through the proxy - machine.succeed(""" - curl -sf http://localhost:8002/share/%s - """ % share_key) - ''; - } -) + # Access the share through the proxy + machine.succeed(""" + curl -sf http://localhost:8002/share/%s + """ % share_key) + ''; +} diff --git a/nixos/tests/web-apps/immich.nix b/nixos/tests/web-apps/immich.nix index 089037ad837c..550a1630bda8 100644 --- a/nixos/tests/web-apps/immich.nix +++ b/nixos/tests/web-apps/immich.nix @@ -1,60 +1,58 @@ -import ../make-test-python.nix ( - { ... }: - { - name = "immich-nixos"; +{ ... }: +{ + name = "immich-nixos"; - nodes.machine = - { pkgs, ... }: - { - # These tests need a little more juice - virtualisation = { - cores = 2; - memorySize = 2048; - diskSize = 4096; - }; - - environment.systemPackages = with pkgs; [ immich-cli ]; - - services.immich = { - enable = true; - environment.IMMICH_LOG_LEVEL = "verbose"; - }; + nodes.machine = + { pkgs, ... }: + { + # These tests need a little more juice + virtualisation = { + cores = 2; + memorySize = 2048; + diskSize = 4096; }; - testScript = '' - import json + environment.systemPackages = with pkgs; [ immich-cli ]; - machine.wait_for_unit("immich-server.service") + services.immich = { + enable = true; + environment.IMMICH_LOG_LEVEL = "verbose"; + }; + }; - machine.wait_for_open_port(2283) # Server - machine.wait_for_open_port(3003) # Machine learning - machine.succeed("curl --fail http://localhost:2283/") + testScript = '' + import json - machine.succeed(""" - curl -f --json '{ "email": "test@example.com", "name": "Admin", "password": "admin" }' http://localhost:2283/api/auth/admin-sign-up - """) - res = machine.succeed(""" - curl -f --json '{ "email": "test@example.com", "password": "admin" }' http://localhost:2283/api/auth/login - """) - token = json.loads(res)['accessToken'] + machine.wait_for_unit("immich-server.service") - res = machine.succeed(""" - curl -f -H 'Cookie: immich_access_token=%s' --json '{ "name": "API Key", "permissions": ["all"] }' http://localhost:2283/api/api-keys - """ % token) - key = json.loads(res)['secret'] + machine.wait_for_open_port(2283) # Server + machine.wait_for_open_port(3003) # Machine learning + machine.succeed("curl --fail http://localhost:2283/") - machine.succeed(f"immich login http://localhost:2283/api {key}") - res = machine.succeed("immich server-info") - print(res) + machine.succeed(""" + curl -f --json '{ "email": "test@example.com", "name": "Admin", "password": "admin" }' http://localhost:2283/api/auth/admin-sign-up + """) + res = machine.succeed(""" + curl -f --json '{ "email": "test@example.com", "password": "admin" }' http://localhost:2283/api/auth/login + """) + token = json.loads(res)['accessToken'] - machine.succeed(""" - curl -f -X PUT -H 'Cookie: immich_access_token=%s' --json '{ "command": "start" }' http://localhost:2283/api/jobs/backupDatabase - """ % token) - res = machine.succeed(""" - curl -f -H 'Cookie: immich_access_token=%s' http://localhost:2283/api/jobs - """ % token) - assert sum(json.loads(res)["backupDatabase"]["jobCounts"].values()) >= 1 - machine.wait_until_succeeds("ls /var/lib/immich/backups/*.sql.gz") - ''; - } -) + res = machine.succeed(""" + curl -f -H 'Cookie: immich_access_token=%s' --json '{ "name": "API Key", "permissions": ["all"] }' http://localhost:2283/api/api-keys + """ % token) + key = json.loads(res)['secret'] + + machine.succeed(f"immich login http://localhost:2283/api {key}") + res = machine.succeed("immich server-info") + print(res) + + machine.succeed(""" + curl -f -X PUT -H 'Cookie: immich_access_token=%s' --json '{ "command": "start" }' http://localhost:2283/api/jobs/backupDatabase + """ % token) + res = machine.succeed(""" + curl -f -H 'Cookie: immich_access_token=%s' http://localhost:2283/api/jobs + """ % token) + assert sum(json.loads(res)["backupDatabase"]["jobCounts"].values()) >= 1 + machine.wait_until_succeeds("ls /var/lib/immich/backups/*.sql.gz") + ''; +} diff --git a/nixos/tests/web-apps/netbox-upgrade.nix b/nixos/tests/web-apps/netbox-upgrade.nix index 417df2a32af0..49fa53daabd9 100644 --- a/nixos/tests/web-apps/netbox-upgrade.nix +++ b/nixos/tests/web-apps/netbox-upgrade.nix @@ -1,104 +1,102 @@ -import ../make-test-python.nix ( - { lib, pkgs, ... }: - let - oldNetbox = "netbox_4_1"; - newNetbox = "netbox_4_2"; +{ lib, pkgs, ... }: +let + oldNetbox = "netbox_4_1"; + newNetbox = "netbox_4_2"; - apiVersion = - version: - lib.pipe version [ - (lib.splitString ".") - (lib.take 2) - (lib.concatStringsSep ".") - ]; - oldApiVersion = apiVersion pkgs."${oldNetbox}".version; - newApiVersion = apiVersion pkgs."${newNetbox}".version; - in - { - name = "netbox-upgrade"; + apiVersion = + version: + lib.pipe version [ + (lib.splitString ".") + (lib.take 2) + (lib.concatStringsSep ".") + ]; + oldApiVersion = apiVersion pkgs."${oldNetbox}".version; + newApiVersion = apiVersion pkgs."${newNetbox}".version; +in +{ + name = "netbox-upgrade"; - meta = with lib.maintainers; { - maintainers = [ - minijackson - raitobezarius - ]; - }; + meta = with lib.maintainers; { + maintainers = [ + minijackson + raitobezarius + ]; + }; - nodes.machine = - { config, pkgs, ... }: - { - virtualisation.memorySize = 2048; - services.netbox = { - enable = true; - # Pick the NetBox package from this config's "pkgs" argument, - # so that `nixpkgs.config.permittedInsecurePackages` works - package = pkgs."${oldNetbox}"; - secretKeyFile = pkgs.writeText "secret" '' - abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 - ''; - }; - - services.nginx = { - enable = true; - - recommendedProxySettings = true; - - virtualHosts.netbox = { - default = true; - locations."/".proxyPass = "http://localhost:${toString config.services.netbox.port}"; - locations."/static/".alias = "/var/lib/netbox/static/"; - }; - }; - - users.users.nginx.extraGroups = [ "netbox" ]; - - networking.firewall.allowedTCPPorts = [ 80 ]; - - nixpkgs.config.permittedInsecurePackages = [ pkgs."${oldNetbox}".name ]; - - specialisation.upgrade.configuration.services.netbox.package = lib.mkForce pkgs."${newNetbox}"; + nodes.machine = + { config, pkgs, ... }: + { + virtualisation.memorySize = 2048; + services.netbox = { + enable = true; + # Pick the NetBox package from this config's "pkgs" argument, + # so that `nixpkgs.config.permittedInsecurePackages` works + package = pkgs."${oldNetbox}"; + secretKeyFile = pkgs.writeText "secret" '' + abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 + ''; }; - testScript = - { nodes, ... }: - '' - start_all() - machine.wait_for_unit("netbox.target") - machine.wait_for_unit("nginx.service") - machine.wait_until_succeeds("journalctl --since -1m --unit netbox --grep Listening") + services.nginx = { + enable = true; - def api_version(headers): - header = [header for header in headers.splitlines() if header.startswith("API-Version:")][0] - return header.split()[1] + recommendedProxySettings = true; - def check_api_version(version): - # Returns 403 with NetBox >= 4.0, - # but we still get the API version in the headers - headers = machine.succeed( - "curl -sSL http://localhost/api/ --head -H 'Content-Type: application/json'" - ) - assert api_version(headers) == version + virtualHosts.netbox = { + default = true; + locations."/".proxyPass = "http://localhost:${toString config.services.netbox.port}"; + locations."/static/".alias = "/var/lib/netbox/static/"; + }; + }; - with subtest("NetBox version is the old one"): - check_api_version("${oldApiVersion}") + users.users.nginx.extraGroups = [ "netbox" ]; - # Somehow, even though netbox-housekeeping.service has After=netbox.service, - # netbox-housekeeping.service and netbox.service still get started at the - # same time, making netbox-housekeeping fail (can't really do some house - # keeping job if the database is not correctly formed). - # - # So we don't check that the upgrade went well, we just check that - # netbox.service is active, and that netbox-housekeeping can be run - # successfully afterwards. - # - # This is not good UX, but the system should be working nonetheless. - machine.execute("${nodes.machine.system.build.toplevel}/specialisation/upgrade/bin/switch-to-configuration test >&2") + networking.firewall.allowedTCPPorts = [ 80 ]; - machine.wait_for_unit("netbox.service") - machine.succeed("systemctl start netbox-housekeeping.service") + nixpkgs.config.permittedInsecurePackages = [ pkgs."${oldNetbox}".name ]; - with subtest("NetBox version is the new one"): - check_api_version("${newApiVersion}") - ''; - } -) + specialisation.upgrade.configuration.services.netbox.package = lib.mkForce pkgs."${newNetbox}"; + }; + + testScript = + { nodes, ... }: + '' + start_all() + machine.wait_for_unit("netbox.target") + machine.wait_for_unit("nginx.service") + machine.wait_until_succeeds("journalctl --since -1m --unit netbox --grep Listening") + + def api_version(headers): + header = [header for header in headers.splitlines() if header.startswith("API-Version:")][0] + return header.split()[1] + + def check_api_version(version): + # Returns 403 with NetBox >= 4.0, + # but we still get the API version in the headers + headers = machine.succeed( + "curl -sSL http://localhost/api/ --head -H 'Content-Type: application/json'" + ) + assert api_version(headers) == version + + with subtest("NetBox version is the old one"): + check_api_version("${oldApiVersion}") + + # Somehow, even though netbox-housekeeping.service has After=netbox.service, + # netbox-housekeeping.service and netbox.service still get started at the + # same time, making netbox-housekeeping fail (can't really do some house + # keeping job if the database is not correctly formed). + # + # So we don't check that the upgrade went well, we just check that + # netbox.service is active, and that netbox-housekeeping can be run + # successfully afterwards. + # + # This is not good UX, but the system should be working nonetheless. + machine.execute("${nodes.machine.system.build.toplevel}/specialisation/upgrade/bin/switch-to-configuration test >&2") + + machine.wait_for_unit("netbox.service") + machine.succeed("systemctl start netbox-housekeeping.service") + + with subtest("NetBox version is the new one"): + check_api_version("${newApiVersion}") + ''; +} diff --git a/nixos/tests/web-apps/open-web-calendar.nix b/nixos/tests/web-apps/open-web-calendar.nix index b5cd59c56abd..9778031dabc0 100644 --- a/nixos/tests/web-apps/open-web-calendar.nix +++ b/nixos/tests/web-apps/open-web-calendar.nix @@ -1,51 +1,49 @@ -import ../make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - certs = import ../common/acme/server/snakeoil-certs.nix; +let + certs = import ../common/acme/server/snakeoil-certs.nix; - serverDomain = certs.domain; - in - { - name = "open-web-calendar"; - meta.maintainers = with pkgs.lib.maintainers; [ erictapen ]; + serverDomain = certs.domain; +in +{ + name = "open-web-calendar"; + meta.maintainers = with pkgs.lib.maintainers; [ erictapen ]; - nodes.server = - { pkgs, lib, ... }: - { - services.open-web-calendar = { - enable = true; - domain = serverDomain; - calendarSettings.title = "My custom title"; - }; - - services.nginx.virtualHosts."${serverDomain}" = { - enableACME = lib.mkForce false; - sslCertificate = certs."${serverDomain}".cert; - sslCertificateKey = certs."${serverDomain}".key; - }; - - security.pki.certificateFiles = [ certs.ca.cert ]; - - networking.hosts."::1" = [ "${serverDomain}" ]; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; + nodes.server = + { pkgs, lib, ... }: + { + services.open-web-calendar = { + enable = true; + domain = serverDomain; + calendarSettings.title = "My custom title"; }; - nodes.client = - { pkgs, nodes, ... }: - { - networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${serverDomain}" ]; - - security.pki.certificateFiles = [ certs.ca.cert ]; + services.nginx.virtualHosts."${serverDomain}" = { + enableACME = lib.mkForce false; + sslCertificate = certs."${serverDomain}".cert; + sslCertificateKey = certs."${serverDomain}".key; }; - testScript = '' - start_all() - server.wait_for_unit("open-web-calendar.socket") - server.wait_until_succeeds("curl -f https://${serverDomain}/ | grep 'My custom title'") - ''; - } -) + security.pki.certificateFiles = [ certs.ca.cert ]; + + networking.hosts."::1" = [ "${serverDomain}" ]; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; + }; + + nodes.client = + { pkgs, nodes, ... }: + { + networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${serverDomain}" ]; + + security.pki.certificateFiles = [ certs.ca.cert ]; + }; + + testScript = '' + start_all() + server.wait_for_unit("open-web-calendar.socket") + server.wait_until_succeeds("curl -f https://${serverDomain}/ | grep 'My custom title'") + ''; +} diff --git a/nixos/tests/web-apps/peering-manager.nix b/nixos/tests/web-apps/peering-manager.nix index 6a04d5f4e5e6..8f4e1d731ae8 100644 --- a/nixos/tests/web-apps/peering-manager.nix +++ b/nixos/tests/web-apps/peering-manager.nix @@ -1,47 +1,45 @@ -import ../make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "peering-manager"; +{ lib, pkgs, ... }: +{ + name = "peering-manager"; - meta = with lib.maintainers; { - maintainers = [ yuka ]; + meta = with lib.maintainers; { + maintainers = [ yuka ]; + }; + + nodes.machine = + { ... }: + { + services.peering-manager = { + enable = true; + secretKeyFile = pkgs.writeText "secret" '' + abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 + ''; + }; }; - nodes.machine = - { ... }: - { - services.peering-manager = { - enable = true; - secretKeyFile = pkgs.writeText "secret" '' - abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 - ''; - }; - }; + testScript = + { nodes }: + '' + machine.start() + machine.wait_for_unit("peering-manager.target") + machine.wait_until_succeeds("journalctl --since -1m --unit peering-manager --grep Listening") - testScript = - { nodes }: - '' - machine.start() - machine.wait_for_unit("peering-manager.target") - machine.wait_until_succeeds("journalctl --since -1m --unit peering-manager --grep Listening") - - print(machine.succeed( - "curl -sSfL http://[::1]:8001" - )) - with subtest("Home screen loads"): - machine.succeed( - "curl -sSfL http://[::1]:8001 | grep 'Home - Peering Manager'" - ) - with subtest("checks succeed"): - machine.succeed( - "systemctl stop peering-manager peering-manager-rq" - ) - machine.succeed( - "sudo -u postgres psql -c 'ALTER USER \"peering-manager\" WITH SUPERUSER;'" - ) - machine.succeed( - "cd ${nodes.machine.system.build.peeringManagerPkg}/opt/peering-manager ; peering-manager-manage test --no-input" - ) - ''; - } -) + print(machine.succeed( + "curl -sSfL http://[::1]:8001" + )) + with subtest("Home screen loads"): + machine.succeed( + "curl -sSfL http://[::1]:8001 | grep 'Home - Peering Manager'" + ) + with subtest("checks succeed"): + machine.succeed( + "systemctl stop peering-manager peering-manager-rq" + ) + machine.succeed( + "sudo -u postgres psql -c 'ALTER USER \"peering-manager\" WITH SUPERUSER;'" + ) + machine.succeed( + "cd ${nodes.machine.system.build.peeringManagerPkg}/opt/peering-manager ; peering-manager-manage test --no-input" + ) + ''; +} diff --git a/nixos/tests/web-apps/phylactery.nix b/nixos/tests/web-apps/phylactery.nix index 492d50d3d2d1..1643231ff8f0 100644 --- a/nixos/tests/web-apps/phylactery.nix +++ b/nixos/tests/web-apps/phylactery.nix @@ -1,25 +1,23 @@ -import ../make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "phylactery"; +{ pkgs, lib, ... }: +{ + name = "phylactery"; - nodes.machine = - { ... }: - { - services.phylactery = rec { - enable = true; - port = 8080; - library = "/tmp"; - }; + nodes.machine = + { ... }: + { + services.phylactery = rec { + enable = true; + port = 8080; + library = "/tmp"; }; + }; - testScript = '' - start_all() - machine.wait_for_unit('phylactery') - machine.wait_for_open_port(8080) - machine.wait_until_succeeds('curl localhost:8080') - ''; + testScript = '' + start_all() + machine.wait_for_unit('phylactery') + machine.wait_for_open_port(8080) + machine.wait_until_succeeds('curl localhost:8080') + ''; - meta.maintainers = with lib.maintainers; [ McSinyx ]; - } -) + meta.maintainers = with lib.maintainers; [ McSinyx ]; +} diff --git a/nixos/tests/web-apps/tt-rss.nix b/nixos/tests/web-apps/tt-rss.nix index 1dc2daf535d7..933a6419923c 100644 --- a/nixos/tests/web-apps/tt-rss.nix +++ b/nixos/tests/web-apps/tt-rss.nix @@ -1,47 +1,45 @@ -import ../make-test-python.nix ( - { ... }: - { - name = "tt-rss-nixos"; +{ ... }: +{ + name = "tt-rss-nixos"; - nodes.machine = - { pkgs, ... }: - { - services.tt-rss = { - enable = true; - virtualHost = "localhost"; - selfUrlPath = "http://localhost/"; - pluginPackages = with pkgs; [ - tt-rss-plugin-auth-ldap - tt-rss-plugin-feediron - ]; - plugins = [ - "auth_internal" - "feediron" - "note" - ]; - singleUserMode = true; - themePackages = with pkgs; [ tt-rss-theme-feedly ]; - }; + nodes.machine = + { pkgs, ... }: + { + services.tt-rss = { + enable = true; + virtualHost = "localhost"; + selfUrlPath = "http://localhost/"; + pluginPackages = with pkgs; [ + tt-rss-plugin-auth-ldap + tt-rss-plugin-feediron + ]; + plugins = [ + "auth_internal" + "feediron" + "note" + ]; + singleUserMode = true; + themePackages = with pkgs; [ tt-rss-theme-feedly ]; }; + }; - testScript = '' - import json - import re - machine.wait_for_unit("tt-rss.service") + testScript = '' + import json + import re + machine.wait_for_unit("tt-rss.service") - matches = re.search('__csrf_token = "([^"]*)"', machine.succeed("curl -sSfL --cookie cjar --cookie-jar cjar -sSfL http://localhost/")) - if matches is None: - assert False, "CSRF token not found" - csrf_token = matches.group(1) + matches = re.search('__csrf_token = "([^"]*)"', machine.succeed("curl -sSfL --cookie cjar --cookie-jar cjar -sSfL http://localhost/")) + if matches is None: + assert False, "CSRF token not found" + csrf_token = matches.group(1) - # Ensure themes are loaded. No API found for these, so it's a crude check. - preference_page = machine.succeed("curl -sSfL --cookie cjar --cookie-jar cjar http://localhost/backend.php?op=Pref_Prefs") - assert "feedly" in preference_page + # Ensure themes are loaded. No API found for these, so it's a crude check. + preference_page = machine.succeed("curl -sSfL --cookie cjar --cookie-jar cjar http://localhost/backend.php?op=Pref_Prefs") + assert "feedly" in preference_page - plugins = json.loads(machine.succeed(f"curl -sSfL --cookie cjar --cookie-jar cjar 'http://localhost/backend.php' -X POST --data-raw 'op=Pref_Prefs&method=getPluginsList&csrf_token={csrf_token}'"))["plugins"] - expected_plugins = ["auth_internal", "auth_ldap", "feediron", "note"]; - found_plugins = [p["name"] for p in plugins if p["name"] in expected_plugins] - assert len(found_plugins) == len(expected_plugins), f"Expected plugins {expected_plugins}, found {found_plugins}" - ''; - } -) + plugins = json.loads(machine.succeed(f"curl -sSfL --cookie cjar --cookie-jar cjar 'http://localhost/backend.php' -X POST --data-raw 'op=Pref_Prefs&method=getPluginsList&csrf_token={csrf_token}'"))["plugins"] + expected_plugins = ["auth_internal", "auth_ldap", "feediron", "note"]; + found_plugins = [p["name"] for p in plugins if p["name"] in expected_plugins] + assert len(found_plugins) == len(expected_plugins), f"Expected plugins {expected_plugins}, found {found_plugins}" + ''; +} diff --git a/nixos/tests/web-apps/weblate.nix b/nixos/tests/web-apps/weblate.nix index b294dd01cfe5..27df88621973 100644 --- a/nixos/tests/web-apps/weblate.nix +++ b/nixos/tests/web-apps/weblate.nix @@ -1,105 +1,103 @@ -import ../make-test-python.nix ( - { pkgs, ... }: +{ pkgs, ... }: - let - certs = import ../common/acme/server/snakeoil-certs.nix; +let + certs = import ../common/acme/server/snakeoil-certs.nix; - serverDomain = certs.domain; + serverDomain = certs.domain; - admin = { - username = "admin"; - password = "snakeoilpass"; - }; - # An API token that we manually insert into the db as a valid one. - apiToken = "OVJh65sXaAfQMZ4NTcIGbFZIyBZbEZqWTi7azdDf"; - in - { - name = "weblate"; - meta.maintainers = with pkgs.lib.maintainers; [ erictapen ]; + admin = { + username = "admin"; + password = "snakeoilpass"; + }; + # An API token that we manually insert into the db as a valid one. + apiToken = "OVJh65sXaAfQMZ4NTcIGbFZIyBZbEZqWTi7azdDf"; +in +{ + name = "weblate"; + meta.maintainers = with pkgs.lib.maintainers; [ erictapen ]; - nodes.server = - { pkgs, lib, ... }: - { - virtualisation.memorySize = 2048; + nodes.server = + { pkgs, lib, ... }: + { + virtualisation.memorySize = 2048; - services.weblate = { - enable = true; - localDomain = "${serverDomain}"; - djangoSecretKeyFile = pkgs.writeText "weblate-django-secret" "thisissnakeoilsecretwithmorethan50characterscorrecthorsebatterystaple"; - extraConfig = '' - # Weblate tries to fetch Avatars from the network - ENABLE_AVATARS = False - ''; - }; - - services.nginx.virtualHosts."${serverDomain}" = { - enableACME = lib.mkForce false; - sslCertificate = certs."${serverDomain}".cert; - sslCertificateKey = certs."${serverDomain}".key; - }; - - security.pki.certificateFiles = [ certs.ca.cert ]; - - networking.hosts."::1" = [ "${serverDomain}" ]; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - ]; - - users.users.weblate.shell = pkgs.bashInteractive; - }; - - nodes.client = - { pkgs, nodes, ... }: - { - environment.systemPackages = [ pkgs.wlc ]; - - environment.etc."xdg/weblate".text = '' - [weblate] - url = https://${serverDomain}/api/ - key = ${apiToken} + services.weblate = { + enable = true; + localDomain = "${serverDomain}"; + djangoSecretKeyFile = pkgs.writeText "weblate-django-secret" "thisissnakeoilsecretwithmorethan50characterscorrecthorsebatterystaple"; + extraConfig = '' + # Weblate tries to fetch Avatars from the network + ENABLE_AVATARS = False ''; - - networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${serverDomain}" ]; - - security.pki.certificateFiles = [ certs.ca.cert ]; }; - testScript = '' - import json + services.nginx.virtualHosts."${serverDomain}" = { + enableACME = lib.mkForce false; + sslCertificate = certs."${serverDomain}".cert; + sslCertificateKey = certs."${serverDomain}".key; + }; - start_all() - server.wait_for_unit("weblate.socket") - server.wait_until_succeeds("curl -f https://${serverDomain}/") - server.succeed("sudo -iu weblate -- weblate createadmin --username ${admin.username} --password ${admin.password} --email weblate@example.org") + security.pki.certificateFiles = [ certs.ca.cert ]; - # It's easier to replace the generated API token with a predefined one than - # to extract it at runtime. - server.succeed("sudo -iu weblate -- psql -d weblate -c \"UPDATE authtoken_token SET key = '${apiToken}' WHERE user_id = (SELECT id FROM weblate_auth_user WHERE username = 'admin');\"") + networking.hosts."::1" = [ "${serverDomain}" ]; + networking.firewall.allowedTCPPorts = [ + 80 + 443 + ]; - client.wait_for_unit("multi-user.target") + users.users.weblate.shell = pkgs.bashInteractive; + }; - # Test the official Weblate client wlc. - client.wait_until_succeeds("REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt wlc --debug list-projects") + nodes.client = + { pkgs, nodes, ... }: + { + environment.systemPackages = [ pkgs.wlc ]; - def call_wl_api(arg): - (rv, result) = client.execute("curl -H \"Content-Type: application/json\" -H \"Authorization: Token ${apiToken}\" https://${serverDomain}/api/{}".format(arg)) - assert rv == 0 - print(result) + environment.etc."xdg/weblate".text = '' + [weblate] + url = https://${serverDomain}/api/ + key = ${apiToken} + ''; - call_wl_api("users/ --data '{}'".format( - json.dumps( - {"username": "test1", - "full_name": "test1", - "email": "test1@example.org" - }))) + networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${serverDomain}" ]; - # TODO: Check sending and receiving email. - # server.wait_for_unit("postfix.service") + security.pki.certificateFiles = [ certs.ca.cert ]; + }; - server.succeed("sudo -iu weblate -- weblate check") - # TODO: The goal is for this to succeed, but there are still some checks failing. - # server.succeed("sudo -iu weblate -- weblate check --deploy") - ''; - } -) + testScript = '' + import json + + start_all() + server.wait_for_unit("weblate.socket") + server.wait_until_succeeds("curl -f https://${serverDomain}/") + server.succeed("sudo -iu weblate -- weblate createadmin --username ${admin.username} --password ${admin.password} --email weblate@example.org") + + # It's easier to replace the generated API token with a predefined one than + # to extract it at runtime. + server.succeed("sudo -iu weblate -- psql -d weblate -c \"UPDATE authtoken_token SET key = '${apiToken}' WHERE user_id = (SELECT id FROM weblate_auth_user WHERE username = 'admin');\"") + + client.wait_for_unit("multi-user.target") + + # Test the official Weblate client wlc. + client.wait_until_succeeds("REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt wlc --debug list-projects") + + def call_wl_api(arg): + (rv, result) = client.execute("curl -H \"Content-Type: application/json\" -H \"Authorization: Token ${apiToken}\" https://${serverDomain}/api/{}".format(arg)) + assert rv == 0 + print(result) + + call_wl_api("users/ --data '{}'".format( + json.dumps( + {"username": "test1", + "full_name": "test1", + "email": "test1@example.org" + }))) + + # TODO: Check sending and receiving email. + # server.wait_for_unit("postfix.service") + + server.succeed("sudo -iu weblate -- weblate check") + # TODO: The goal is for this to succeed, but there are still some checks failing. + # server.succeed("sudo -iu weblate -- weblate check --deploy") + ''; +} diff --git a/nixos/tests/web-servers/ttyd.nix b/nixos/tests/web-servers/ttyd.nix index 8735269afcff..948224e6fcb8 100644 --- a/nixos/tests/web-servers/ttyd.nix +++ b/nixos/tests/web-servers/ttyd.nix @@ -1,36 +1,34 @@ -import ../make-test-python.nix ( - { lib, pkgs, ... }: - { - name = "ttyd"; - meta.maintainers = with lib.maintainers; [ stunkymonkey ]; +{ lib, pkgs, ... }: +{ + name = "ttyd"; + meta.maintainers = with lib.maintainers; [ stunkymonkey ]; - nodes.readonly = - { pkgs, ... }: - { - services.ttyd = { - enable = true; - entrypoint = [ (lib.getExe pkgs.htop) ]; - writeable = false; - }; + nodes.readonly = + { pkgs, ... }: + { + services.ttyd = { + enable = true; + entrypoint = [ (lib.getExe pkgs.htop) ]; + writeable = false; }; + }; - nodes.writeable = - { pkgs, ... }: - { - services.ttyd = { - enable = true; - username = "foo"; - passwordFile = pkgs.writeText "password" "bar"; - writeable = true; - }; + nodes.writeable = + { pkgs, ... }: + { + services.ttyd = { + enable = true; + username = "foo"; + passwordFile = pkgs.writeText "password" "bar"; + writeable = true; }; + }; - testScript = '' - for machine in [readonly, writeable]: - machine.wait_for_unit("ttyd.service") - machine.wait_for_open_port(7681) - response = machine.succeed("curl -vvv -u foo:bar -s -H 'Host: ttyd' http://127.0.0.1:7681/") - assert 'ttyd - Terminal' in response, "Page didn't load successfully" - ''; - } -) + testScript = '' + for machine in [readonly, writeable]: + machine.wait_for_unit("ttyd.service") + machine.wait_for_open_port(7681) + response = machine.succeed("curl -vvv -u foo:bar -s -H 'Host: ttyd' http://127.0.0.1:7681/") + assert 'ttyd - Terminal' in response, "Page didn't load successfully" + ''; +} diff --git a/nixos/tests/web-servers/unit-perl.nix b/nixos/tests/web-servers/unit-perl.nix index e632221747cf..157e989c8bd1 100644 --- a/nixos/tests/web-servers/unit-perl.nix +++ b/nixos/tests/web-servers/unit-perl.nix @@ -1,46 +1,44 @@ -import ../make-test-python.nix ( - { pkgs, ... }: - let - testdir = pkgs.writeTextDir "www/app.psgi" '' - my $app = sub { - return [ - "200", - [ "Content-Type" => "text/plain" ], - [ "Hello, Perl on Unit!" ], - ]; - }; - ''; +{ pkgs, ... }: +let + testdir = pkgs.writeTextDir "www/app.psgi" '' + my $app = sub { + return [ + "200", + [ "Content-Type" => "text/plain" ], + [ "Hello, Perl on Unit!" ], + ]; + }; + ''; - in - { - name = "unit-perl-test"; - meta.maintainers = with pkgs.lib.maintainers; [ sgo ]; +in +{ + name = "unit-perl-test"; + meta.maintainers = with pkgs.lib.maintainers; [ sgo ]; - nodes.machine = - { - config, - lib, - pkgs, - ... - }: - { - services.unit = { - enable = true; - config = pkgs.lib.strings.toJSON { - listeners."*:8080".application = "perl"; - applications.perl = { - type = "perl"; - script = "${testdir}/www/app.psgi"; - }; + nodes.machine = + { + config, + lib, + pkgs, + ... + }: + { + services.unit = { + enable = true; + config = pkgs.lib.strings.toJSON { + listeners."*:8080".application = "perl"; + applications.perl = { + type = "perl"; + script = "${testdir}/www/app.psgi"; }; }; }; - testScript = '' - machine.wait_for_unit("unit.service") - machine.wait_for_open_port(8080) + }; + testScript = '' + machine.wait_for_unit("unit.service") + machine.wait_for_open_port(8080) - response = machine.succeed("curl -f -vvv -s http://127.0.0.1:8080/") - assert "Hello, Perl on Unit!" in response, "Hello world" - ''; - } -) + response = machine.succeed("curl -f -vvv -s http://127.0.0.1:8080/") + assert "Hello, Perl on Unit!" in response, "Hello world" + ''; +} diff --git a/nixos/tests/wg-access-server.nix b/nixos/tests/wg-access-server.nix index 63ec2a3ddbb5..cfd2c748ec1b 100644 --- a/nixos/tests/wg-access-server.nix +++ b/nixos/tests/wg-access-server.nix @@ -1,36 +1,34 @@ -import ./make-test-python.nix ( - { - pkgs, - lib, - kernelPackages ? null, - ... - }: - { - name = "wg-access-server"; - meta = with pkgs.lib.maintainers; { - maintainers = [ xanderio ]; - }; +{ + pkgs, + lib, + kernelPackages ? null, + ... +}: +{ + name = "wg-access-server"; + meta = with pkgs.lib.maintainers; { + maintainers = [ xanderio ]; + }; - nodes = { - server = { - services.wg-access-server = { - enable = true; - settings = { - adminUsername = "admin"; - }; - secretsFile = ( - pkgs.writers.writeYAML "secrets.yaml" { - adminPassword = "hunter2"; - } - ); + nodes = { + server = { + services.wg-access-server = { + enable = true; + settings = { + adminUsername = "admin"; }; + secretsFile = ( + pkgs.writers.writeYAML "secrets.yaml" { + adminPassword = "hunter2"; + } + ); }; }; + }; - testScript = '' - start_all() + testScript = '' + start_all() - server.wait_for_unit("wg-access-server.service") - ''; - } -) + server.wait_for_unit("wg-access-server.service") + ''; +} diff --git a/nixos/tests/whisparr.nix b/nixos/tests/whisparr.nix index 3d528a21c6e2..ee34aca18ce5 100644 --- a/nixos/tests/whisparr.nix +++ b/nixos/tests/whisparr.nix @@ -1,19 +1,17 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "whisparr"; - meta.maintainers = [ lib.maintainers.paveloom ]; +{ lib, ... }: +{ + name = "whisparr"; + meta.maintainers = [ lib.maintainers.paveloom ]; - nodes.machine = - { pkgs, ... }: - { - services.whisparr.enable = true; - }; + nodes.machine = + { pkgs, ... }: + { + services.whisparr.enable = true; + }; - testScript = '' - machine.wait_for_unit("whisparr.service") - machine.wait_for_open_port(6969) - machine.succeed("curl --fail http://localhost:6969/") - ''; - } -) + testScript = '' + machine.wait_for_unit("whisparr.service") + machine.wait_for_open_port(6969) + machine.succeed("curl --fail http://localhost:6969/") + ''; +} diff --git a/nixos/tests/whoogle-search.nix b/nixos/tests/whoogle-search.nix index 0f25609601ee..c85ad02bcb2f 100644 --- a/nixos/tests/whoogle-search.nix +++ b/nixos/tests/whoogle-search.nix @@ -1,24 +1,22 @@ -import ./make-test-python.nix ( - { pkgs, lib, ... }: - { - name = "whoogle-search"; - meta.maintainers = with lib.maintainers; [ malte-v ]; +{ pkgs, lib, ... }: +{ + name = "whoogle-search"; + meta.maintainers = with lib.maintainers; [ malte-v ]; - nodes.machine = - { pkgs, ... }: - { - services.whoogle-search = { - enable = true; - port = 5000; - listenAddress = "127.0.0.1"; - }; + nodes.machine = + { pkgs, ... }: + { + services.whoogle-search = { + enable = true; + port = 5000; + listenAddress = "127.0.0.1"; }; + }; - testScript = '' - machine.start() - machine.wait_for_unit("whoogle-search.service") - machine.wait_for_open_port(5000) - machine.wait_until_succeeds("curl --fail --show-error --silent --location localhost:5000/") - ''; - } -) + testScript = '' + machine.start() + machine.wait_for_unit("whoogle-search.service") + machine.wait_for_open_port(5000) + machine.wait_until_succeeds("curl --fail --show-error --silent --location localhost:5000/") + ''; +} diff --git a/nixos/tests/without-nix.nix b/nixos/tests/without-nix.nix index 2469a80e99b3..211b326f9353 100644 --- a/nixos/tests/without-nix.nix +++ b/nixos/tests/without-nix.nix @@ -1,37 +1,35 @@ -import ./make-test-python.nix ( - { lib, ... }: - { - name = "without-nix"; - meta = with lib.maintainers; { - maintainers = [ ericson2314 ]; +{ lib, ... }: +{ + name = "without-nix"; + meta = with lib.maintainers; { + maintainers = [ ericson2314 ]; + }; + + nodes.machine = + { ... }: + { + nix.enable = false; + nixpkgs.overlays = [ + (self: super: { + nix = throw "don't want to use pkgs.nix"; + nixVersions = lib.mapAttrs (k: throw "don't want to use pkgs.nixVersions.${k}") super.nixVersions; + # aliases, some deprecated + nix_2_3 = throw "don't want to use pkgs.nix_2_3"; + nix_2_4 = throw "don't want to use pkgs.nix_2_4"; + nix_2_5 = throw "don't want to use pkgs.nix_2_5"; + nix_2_6 = throw "don't want to use pkgs.nix_2_6"; + nixFlakes = throw "don't want to use pkgs.nixFlakes"; + nixStable = throw "don't want to use pkgs.nixStable"; + nixUnstable = throw "don't want to use pkgs.nixUnstable"; + nixStatic = throw "don't want to use pkgs.nixStatic"; + }) + ]; }; - nodes.machine = - { ... }: - { - nix.enable = false; - nixpkgs.overlays = [ - (self: super: { - nix = throw "don't want to use pkgs.nix"; - nixVersions = lib.mapAttrs (k: throw "don't want to use pkgs.nixVersions.${k}") super.nixVersions; - # aliases, some deprecated - nix_2_3 = throw "don't want to use pkgs.nix_2_3"; - nix_2_4 = throw "don't want to use pkgs.nix_2_4"; - nix_2_5 = throw "don't want to use pkgs.nix_2_5"; - nix_2_6 = throw "don't want to use pkgs.nix_2_6"; - nixFlakes = throw "don't want to use pkgs.nixFlakes"; - nixStable = throw "don't want to use pkgs.nixStable"; - nixUnstable = throw "don't want to use pkgs.nixUnstable"; - nixStatic = throw "don't want to use pkgs.nixStatic"; - }) - ]; - }; + testScript = '' + start_all() - testScript = '' - start_all() - - machine.succeed("which which") - machine.fail("which nix") - ''; - } -) + machine.succeed("which which") + machine.fail("which nix") + ''; +} diff --git a/nixos/tests/wmderland.nix b/nixos/tests/wmderland.nix index 44622f340ca9..4ce99b889521 100644 --- a/nixos/tests/wmderland.nix +++ b/nixos/tests/wmderland.nix @@ -1,66 +1,64 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - { - name = "wmderland"; - meta = with pkgs.lib.maintainers; { - maintainers = [ takagiy ]; +{ pkgs, ... }: +{ + name = "wmderland"; + meta = with pkgs.lib.maintainers; { + maintainers = [ takagiy ]; + }; + + nodes.machine = + { lib, ... }: + { + imports = [ + ./common/x11.nix + ./common/user-account.nix + ]; + test-support.displayManager.auto.user = "alice"; + services.displayManager.defaultSession = lib.mkForce "none+wmderland"; + services.xserver.windowManager.wmderland.enable = true; + + systemd.services.setupWmderlandConfig = { + wantedBy = [ "multi-user.target" ]; + before = [ "multi-user.target" ]; + environment = { + HOME = "/home/alice"; + }; + unitConfig = { + type = "oneshot"; + RemainAfterExit = true; + user = "alice"; + }; + script = + let + config = pkgs.writeText "config" '' + set $Mod = Mod1 + bindsym $Mod+Return exec ${pkgs.xterm}/bin/xterm -cm -pc + ''; + in + '' + mkdir -p $HOME/.config/wmderland + cp ${config} $HOME/.config/wmderland/config + ''; + }; }; - nodes.machine = - { lib, ... }: - { - imports = [ - ./common/x11.nix - ./common/user-account.nix - ]; - test-support.displayManager.auto.user = "alice"; - services.displayManager.defaultSession = lib.mkForce "none+wmderland"; - services.xserver.windowManager.wmderland.enable = true; + testScript = + { ... }: + '' + with subtest("ensure x starts"): + machine.wait_for_x() + machine.wait_for_file("/home/alice/.Xauthority") + machine.succeed("xauth merge ~alice/.Xauthority") - systemd.services.setupWmderlandConfig = { - wantedBy = [ "multi-user.target" ]; - before = [ "multi-user.target" ]; - environment = { - HOME = "/home/alice"; - }; - unitConfig = { - type = "oneshot"; - RemainAfterExit = true; - user = "alice"; - }; - script = - let - config = pkgs.writeText "config" '' - set $Mod = Mod1 - bindsym $Mod+Return exec ${pkgs.xterm}/bin/xterm -cm -pc - ''; - in - '' - mkdir -p $HOME/.config/wmderland - cp ${config} $HOME/.config/wmderland/config - ''; - }; - }; + with subtest("ensure we can open a new terminal"): + machine.send_key("alt-ret") + machine.wait_until_succeeds("pgrep xterm") + machine.wait_for_window(r"alice.*?machine") + machine.screenshot("terminal") - testScript = - { ... }: - '' - with subtest("ensure x starts"): - machine.wait_for_x() - machine.wait_for_file("/home/alice/.Xauthority") - machine.succeed("xauth merge ~alice/.Xauthority") - - with subtest("ensure we can open a new terminal"): - machine.send_key("alt-ret") - machine.wait_until_succeeds("pgrep xterm") - machine.wait_for_window(r"alice.*?machine") - machine.screenshot("terminal") - - with subtest("ensure we can communicate through ipc with wmderlandc"): - # Kills the previously open xterm - machine.succeed("pgrep xterm") - machine.execute("DISPLAY=:0 wmderlandc kill") - machine.fail("pgrep xterm") - ''; - } -) + with subtest("ensure we can communicate through ipc with wmderlandc"): + # Kills the previously open xterm + machine.succeed("pgrep xterm") + machine.execute("DISPLAY=:0 wmderlandc kill") + machine.fail("pgrep xterm") + ''; +} diff --git a/nixos/tests/workout-tracker.nix b/nixos/tests/workout-tracker.nix index 1ad509edf2d4..b937e3e6e09e 100644 --- a/nixos/tests/workout-tracker.nix +++ b/nixos/tests/workout-tracker.nix @@ -1,29 +1,27 @@ -import ./make-test-python.nix ( - { lib, pkgs, ... }: +{ lib, pkgs, ... }: - { - name = "workout-tracker"; +{ + name = "workout-tracker"; - meta.maintainers = with lib.maintainers; [ bhankas ]; + meta.maintainers = with lib.maintainers; [ bhankas ]; - nodes.machine = - { config, ... }: - { - virtualisation.memorySize = 2048; + nodes.machine = + { config, ... }: + { + virtualisation.memorySize = 2048; - services.workout-tracker.enable = true; - }; + services.workout-tracker.enable = true; + }; - testScript = '' - start_all() - machine.wait_for_unit("workout-tracker.service") - # wait for workout-tracker to fully come up + testScript = '' + start_all() + machine.wait_for_unit("workout-tracker.service") + # wait for workout-tracker to fully come up - with subtest("workout-tracker service starts"): - machine.wait_until_succeeds( - "curl -sSfL http://localhost:8080/ > /dev/null", - timeout=30 - ) - ''; - } -) + with subtest("workout-tracker service starts"): + machine.wait_until_succeeds( + "curl -sSfL http://localhost:8080/ > /dev/null", + timeout=30 + ) + ''; +} diff --git a/nixos/tests/wrappers.nix b/nixos/tests/wrappers.nix index 4766051805c7..e27e02bc4d10 100644 --- a/nixos/tests/wrappers.nix +++ b/nixos/tests/wrappers.nix @@ -1,128 +1,126 @@ -import ./make-test-python.nix ( - { pkgs, ... }: - let - userUid = 1000; - usersGid = 100; - busybox = - pkgs: - pkgs.busybox.override { - # Without this, the busybox binary drops euid to ruid for most applets, including id. - # See https://bugs.busybox.net/show_bug.cgi?id=15101 - extraConfig = "CONFIG_FEATURE_SUID n"; - }; - in - { - name = "wrappers"; +{ pkgs, ... }: +let + userUid = 1000; + usersGid = 100; + busybox = + pkgs: + pkgs.busybox.override { + # Without this, the busybox binary drops euid to ruid for most applets, including id. + # See https://bugs.busybox.net/show_bug.cgi?id=15101 + extraConfig = "CONFIG_FEATURE_SUID n"; + }; +in +{ + name = "wrappers"; - nodes.machine = - { config, pkgs, ... }: - { - ids.gids.users = usersGid; + nodes.machine = + { config, pkgs, ... }: + { + ids.gids.users = usersGid; - users.users = { - regular = { - uid = userUid; - isNormalUser = true; - }; - }; - - security.apparmor.enable = true; - - security.wrappers = { - disabled = { - enable = false; - owner = "root"; - group = "root"; - setuid = true; - source = "${busybox pkgs}/bin/busybox"; - program = "disabled_busybox"; - }; - suidRoot = { - owner = "root"; - group = "root"; - setuid = true; - source = "${busybox pkgs}/bin/busybox"; - program = "suid_root_busybox"; - }; - sgidRoot = { - owner = "root"; - group = "root"; - setgid = true; - source = "${busybox pkgs}/bin/busybox"; - program = "sgid_root_busybox"; - }; - withChown = { - owner = "root"; - group = "root"; - source = "${pkgs.libcap}/bin/capsh"; - program = "capsh_with_chown"; - capabilities = "cap_chown+ep"; - }; + users.users = { + regular = { + uid = userUid; + isNormalUser = true; }; }; - testScript = '' - def cmd_as_regular(cmd): - return "su -l regular -c '{0}'".format(cmd) + security.apparmor.enable = true; - def test_as_regular(cmd, expected): - out = machine.succeed(cmd_as_regular(cmd)).strip() - assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out) + security.wrappers = { + disabled = { + enable = false; + owner = "root"; + group = "root"; + setuid = true; + source = "${busybox pkgs}/bin/busybox"; + program = "disabled_busybox"; + }; + suidRoot = { + owner = "root"; + group = "root"; + setuid = true; + source = "${busybox pkgs}/bin/busybox"; + program = "suid_root_busybox"; + }; + sgidRoot = { + owner = "root"; + group = "root"; + setgid = true; + source = "${busybox pkgs}/bin/busybox"; + program = "sgid_root_busybox"; + }; + withChown = { + owner = "root"; + group = "root"; + source = "${pkgs.libcap}/bin/capsh"; + program = "capsh_with_chown"; + capabilities = "cap_chown+ep"; + }; + }; + }; - def test_as_regular_in_userns_mapped_as_root(cmd, expected): - out = machine.succeed(f"su -l regular -c '${pkgs.util-linux}/bin/unshare -rm {cmd}'").strip() - assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out) + testScript = '' + def cmd_as_regular(cmd): + return "su -l regular -c '{0}'".format(cmd) - test_as_regular('${busybox pkgs}/bin/busybox id -u', '${toString userUid}') - test_as_regular('${busybox pkgs}/bin/busybox id -ru', '${toString userUid}') - test_as_regular('${busybox pkgs}/bin/busybox id -g', '${toString usersGid}') - test_as_regular('${busybox pkgs}/bin/busybox id -rg', '${toString usersGid}') + def test_as_regular(cmd, expected): + out = machine.succeed(cmd_as_regular(cmd)).strip() + assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out) - test_as_regular('/run/wrappers/bin/suid_root_busybox id -u', '0') - test_as_regular('/run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}') - test_as_regular('/run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}') - test_as_regular('/run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}') + def test_as_regular_in_userns_mapped_as_root(cmd, expected): + out = machine.succeed(f"su -l regular -c '${pkgs.util-linux}/bin/unshare -rm {cmd}'").strip() + assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out) - test_as_regular('/run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}') - test_as_regular('/run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}') - test_as_regular('/run/wrappers/bin/sgid_root_busybox id -g', '0') - test_as_regular('/run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}') + test_as_regular('${busybox pkgs}/bin/busybox id -u', '${toString userUid}') + test_as_regular('${busybox pkgs}/bin/busybox id -ru', '${toString userUid}') + test_as_regular('${busybox pkgs}/bin/busybox id -g', '${toString usersGid}') + test_as_regular('${busybox pkgs}/bin/busybox id -rg', '${toString usersGid}') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -u', '0') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -ru', '0') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -g', '0') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -rg', '0') + test_as_regular('/run/wrappers/bin/suid_root_busybox id -u', '0') + test_as_regular('/run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}') + test_as_regular('/run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}') + test_as_regular('/run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -u', '0') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -ru', '0') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -g', '0') - test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -rg', '0') + test_as_regular('/run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}') + test_as_regular('/run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}') + test_as_regular('/run/wrappers/bin/sgid_root_busybox id -g', '0') + test_as_regular('/run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}') - # Test that in nonewprivs environment the wrappers simply exec their target. - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -u', '${toString userUid}') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -u', '0') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -ru', '0') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -g', '0') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -rg', '0') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -g', '${toString usersGid}') - test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -u', '0') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -ru', '0') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -g', '0') + test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -rg', '0') - # We are only testing the permitted set, because it's easiest to look at with capsh. - machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_CHOWN')) - machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_SYS_ADMIN')) - machine.succeed(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_CHOWN')) - machine.fail(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_SYS_ADMIN')) + # Test that in nonewprivs environment the wrappers simply exec their target. + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -u', '${toString userUid}') + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}') + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}') + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}') - # Test that the only user of apparmor policy includes generated by - # wrappers works. Ideally this'd be located in a test for the module that - # actually makes the apparmor policy for ping, but there's no convenient - # test for that one. - machine.succeed("ping -c 1 127.0.0.1") + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}') + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}') + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -g', '${toString usersGid}') + test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}') - # Test that the disabled wrapper is not present. - machine.fail("test -e /run/wrappers/bin/disabled_busybox") - ''; - } -) + # We are only testing the permitted set, because it's easiest to look at with capsh. + machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_CHOWN')) + machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_SYS_ADMIN')) + machine.succeed(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_CHOWN')) + machine.fail(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_SYS_ADMIN')) + + # Test that the only user of apparmor policy includes generated by + # wrappers works. Ideally this'd be located in a test for the module that + # actually makes the apparmor policy for ping, but there's no convenient + # test for that one. + machine.succeed("ping -c 1 127.0.0.1") + + # Test that the disabled wrapper is not present. + machine.fail("test -e /run/wrappers/bin/disabled_busybox") + ''; +} diff --git a/nixos/tests/xmpp/ejabberd.nix b/nixos/tests/xmpp/ejabberd.nix index a9dcdab80518..2172ae54bc91 100644 --- a/nixos/tests/xmpp/ejabberd.nix +++ b/nixos/tests/xmpp/ejabberd.nix @@ -7,315 +7,313 @@ let cp key.pem cert.pem $out ''; in -import ../make-test-python.nix ( - { pkgs, ... }: - { - name = "ejabberd"; - meta = with pkgs.lib.maintainers; { - maintainers = [ ]; - }; - nodes = { - client = - { nodes, pkgs, ... }: - { - security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ]; - networking.extraHosts = '' - ${nodes.server.networking.primaryIPAddress} example.com - ''; +{ pkgs, ... }: +{ + name = "ejabberd"; + meta = with pkgs.lib.maintainers; { + maintainers = [ ]; + }; + nodes = { + client = + { nodes, pkgs, ... }: + { + security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ]; + networking.extraHosts = '' + ${nodes.server.networking.primaryIPAddress} example.com + ''; - environment.systemPackages = [ - (pkgs.callPackage ./xmpp-sendmessage.nix { - connectTo = nodes.server.networking.primaryIPAddress; - }) - ]; + environment.systemPackages = [ + (pkgs.callPackage ./xmpp-sendmessage.nix { + connectTo = nodes.server.networking.primaryIPAddress; + }) + ]; + }; + server = + { config, pkgs, ... }: + { + security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ]; + networking.extraHosts = '' + ${config.networking.primaryIPAddress} example.com + ${config.networking.primaryIPAddress} matrix.example.com + ''; + + services.ejabberd = { + enable = true; + configFile = "/etc/ejabberd.yml"; }; - server = - { config, pkgs, ... }: - { - security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ]; - networking.extraHosts = '' - ${config.networking.primaryIPAddress} example.com - ${config.networking.primaryIPAddress} matrix.example.com - ''; - services.ejabberd = { - enable = true; - configFile = "/etc/ejabberd.yml"; - }; + systemd.services.ejabberd.serviceConfig.TimeoutStartSec = "15min"; + environment.etc."ejabberd.yml" = { + user = "ejabberd"; + mode = "0600"; + text = '' + loglevel: 3 - systemd.services.ejabberd.serviceConfig.TimeoutStartSec = "15min"; - environment.etc."ejabberd.yml" = { - user = "ejabberd"; - mode = "0600"; - text = '' - loglevel: 3 + hosts: + - "example.com" - hosts: - - "example.com" + listen: + - + port: 5222 + module: ejabberd_c2s + zlib: false + max_stanza_size: 65536 + shaper: c2s_shaper + access: c2s + starttls: true + - + port: 5269 + ip: "::" + module: ejabberd_s2s_in + - + port: 8448 + module: ejabberd_http + tls: true + request_handlers: + "/_matrix": mod_matrix_gw + - + port: 5347 + ip: "127.0.0.1" + module: ejabberd_service + access: local + shaper: fast + - + port: 5444 + module: ejabberd_http + request_handlers: + "/upload": mod_http_upload - listen: - - - port: 5222 - module: ejabberd_c2s - zlib: false - max_stanza_size: 65536 - shaper: c2s_shaper - access: c2s - starttls: true - - - port: 5269 - ip: "::" - module: ejabberd_s2s_in - - - port: 8448 - module: ejabberd_http - tls: true - request_handlers: - "/_matrix": mod_matrix_gw - - - port: 5347 - ip: "127.0.0.1" - module: ejabberd_service - access: local - shaper: fast - - - port: 5444 - module: ejabberd_http - request_handlers: - "/upload": mod_http_upload + certfiles: + - ${cert pkgs}/key.pem + - ${cert pkgs}/cert.pem - certfiles: - - ${cert pkgs}/key.pem - - ${cert pkgs}/cert.pem + ## Disabling digest-md5 SASL authentication. digest-md5 requires plain-text + ## password storage (see auth_password_format option). + disable_sasl_mechanisms: "digest-md5" - ## Disabling digest-md5 SASL authentication. digest-md5 requires plain-text - ## password storage (see auth_password_format option). - disable_sasl_mechanisms: "digest-md5" + ## Outgoing S2S options + ## Preferred address families (which to try first) and connect timeout + ## in seconds. + outgoing_s2s_families: + - ipv4 + - ipv6 - ## Outgoing S2S options - ## Preferred address families (which to try first) and connect timeout - ## in seconds. - outgoing_s2s_families: - - ipv4 - - ipv6 + ## auth_method: Method used to authenticate the users. + ## The default method is the internal. + ## If you want to use a different method, + ## comment this line and enable the correct ones. + auth_method: internal - ## auth_method: Method used to authenticate the users. - ## The default method is the internal. - ## If you want to use a different method, - ## comment this line and enable the correct ones. - auth_method: internal + ## Store the plain passwords or hashed for SCRAM: + ## auth_password_format: plain + auth_password_format: scram - ## Store the plain passwords or hashed for SCRAM: - ## auth_password_format: plain - auth_password_format: scram + ###' TRAFFIC SHAPERS + shaper: + # in B/s + normal: 1000000 + fast: 50000000 - ###' TRAFFIC SHAPERS - shaper: - # in B/s - normal: 1000000 - fast: 50000000 + ## This option specifies the maximum number of elements in the queue + ## of the FSM. Refer to the documentation for details. + max_fsm_queue: 1000 - ## This option specifies the maximum number of elements in the queue - ## of the FSM. Refer to the documentation for details. - max_fsm_queue: 1000 + ###' ACCESS CONTROL LISTS + acl: + ## The 'admin' ACL grants administrative privileges to XMPP accounts. + ## You can put here as many accounts as you want. + admin: + user: + - "root": "example.com" - ###' ACCESS CONTROL LISTS - acl: - ## The 'admin' ACL grants administrative privileges to XMPP accounts. - ## You can put here as many accounts as you want. - admin: - user: - - "root": "example.com" + ## Local users: don't modify this. + local: + user_regexp: "" - ## Local users: don't modify this. - local: - user_regexp: "" + ## Loopback network + loopback: + ip: + - "127.0.0.0/8" + - "::1/128" + - "::FFFF:127.0.0.1/128" - ## Loopback network - loopback: - ip: - - "127.0.0.0/8" - - "::1/128" - - "::FFFF:127.0.0.1/128" + ###' SHAPER RULES + shaper_rules: + ## Maximum number of simultaneous sessions allowed for a single user: + max_user_sessions: 10 + ## Maximum number of offline messages that users can have: + max_user_offline_messages: + - 5000: admin + - 1024 + ## For C2S connections, all users except admins use the "normal" shaper + c2s_shaper: + - none: admin + - normal + ## All S2S connections use the "fast" shaper + s2s_shaper: fast - ###' SHAPER RULES - shaper_rules: - ## Maximum number of simultaneous sessions allowed for a single user: - max_user_sessions: 10 - ## Maximum number of offline messages that users can have: - max_user_offline_messages: - - 5000: admin - - 1024 - ## For C2S connections, all users except admins use the "normal" shaper - c2s_shaper: - - none: admin - - normal - ## All S2S connections use the "fast" shaper - s2s_shaper: fast + ###' ACCESS RULES + access_rules: + ## This rule allows access only for local users: + local: + - allow: local + ## Only non-blocked users can use c2s connections: + c2s: + - deny: blocked + - allow + ## Only admins can send announcement messages: + announce: + - allow: admin + ## Only admins can use the configuration interface: + configure: + - allow: admin + ## Only accounts of the local ejabberd server can create rooms: + muc_create: + - allow: local + ## Only accounts on the local ejabberd server can create Pubsub nodes: + pubsub_createnode: + - allow: local + ## In-band registration allows registration of any possible username. + ## To disable in-band registration, replace 'allow' with 'deny'. + register: + - allow + ## Only allow to register from localhost + trusted_network: + - allow: loopback - ###' ACCESS RULES - access_rules: - ## This rule allows access only for local users: - local: - - allow: local - ## Only non-blocked users can use c2s connections: - c2s: - - deny: blocked + ## =============== + ## API PERMISSIONS + ## =============== + ## + ## This section allows you to define who and using what method + ## can execute commands offered by ejabberd. + ## + ## By default "console commands" section allow executing all commands + ## issued using ejabberdctl command, and "admin access" section allows + ## users in admin acl that connect from 127.0.0.1 to execute all + ## commands except start and stop with any available access method + ## (ejabberdctl, http-api, xmlrpc depending what is enabled on server). + ## + ## If you remove "console commands" there will be one added by + ## default allowing executing all commands, but if you just change + ## permissions in it, version from config file will be used instead + ## of default one. + ## + api_permissions: + "console commands": + from: + - ejabberd_ctl + who: all + what: "*" + + language: "en" + + ###' MODULES + ## Modules enabled in all ejabberd virtual hosts. + modules: + mod_adhoc: {} + mod_announce: # recommends mod_adhoc + access: announce + mod_blocking: {} # requires mod_privacy + mod_caps: {} + mod_carboncopy: {} + mod_client_state: {} + mod_configure: {} # requires mod_adhoc + ## mod_delegation: {} # for xep0356 + mod_disco: {} + #mod_irc: + # host: "irc.@HOST@" + # default_encoding: "utf-8" + ## mod_bosh: {} + ## mod_http_fileserver: + ## docroot: "/var/www" + ## accesslog: "/var/log/ejabberd/access.log" + mod_http_upload: + thumbnail: false # otherwise needs the identify command from ImageMagick installed + put_url: "http://@HOST@:5444/upload" + ## # docroot: "@HOME@/upload" + #mod_http_upload_quota: + # max_days: 14 + mod_last: {} + ## XEP-0313: Message Archive Management + ## You might want to setup a SQL backend for MAM because the mnesia database is + ## limited to 2GB which might be exceeded on large servers + mod_mam: {} + mod_muc: + host: "muc.@HOST@" + access: - allow - ## Only admins can send announcement messages: - announce: + access_admin: - allow: admin - ## Only admins can use the configuration interface: - configure: - - allow: admin - ## Only accounts of the local ejabberd server can create rooms: - muc_create: - - allow: local - ## Only accounts on the local ejabberd server can create Pubsub nodes: - pubsub_createnode: - - allow: local - ## In-band registration allows registration of any possible username. - ## To disable in-band registration, replace 'allow' with 'deny'. - register: - - allow - ## Only allow to register from localhost - trusted_network: - - allow: loopback - - ## =============== - ## API PERMISSIONS - ## =============== - ## - ## This section allows you to define who and using what method - ## can execute commands offered by ejabberd. - ## - ## By default "console commands" section allow executing all commands - ## issued using ejabberdctl command, and "admin access" section allows - ## users in admin acl that connect from 127.0.0.1 to execute all - ## commands except start and stop with any available access method - ## (ejabberdctl, http-api, xmlrpc depending what is enabled on server). - ## - ## If you remove "console commands" there will be one added by - ## default allowing executing all commands, but if you just change - ## permissions in it, version from config file will be used instead - ## of default one. - ## - api_permissions: - "console commands": - from: - - ejabberd_ctl - who: all - what: "*" - - language: "en" - - ###' MODULES - ## Modules enabled in all ejabberd virtual hosts. - modules: - mod_adhoc: {} - mod_announce: # recommends mod_adhoc - access: announce - mod_blocking: {} # requires mod_privacy - mod_caps: {} - mod_carboncopy: {} - mod_client_state: {} - mod_configure: {} # requires mod_adhoc - ## mod_delegation: {} # for xep0356 - mod_disco: {} - #mod_irc: - # host: "irc.@HOST@" - # default_encoding: "utf-8" - ## mod_bosh: {} - ## mod_http_fileserver: - ## docroot: "/var/www" - ## accesslog: "/var/log/ejabberd/access.log" - mod_http_upload: - thumbnail: false # otherwise needs the identify command from ImageMagick installed - put_url: "http://@HOST@:5444/upload" - ## # docroot: "@HOME@/upload" - #mod_http_upload_quota: - # max_days: 14 - mod_last: {} - ## XEP-0313: Message Archive Management - ## You might want to setup a SQL backend for MAM because the mnesia database is - ## limited to 2GB which might be exceeded on large servers - mod_mam: {} - mod_muc: - host: "muc.@HOST@" - access: - - allow - access_admin: - - allow: admin - access_create: muc_create - access_persistent: muc_create - mod_muc_admin: {} - mod_muc_log: {} - mod_offline: - access_max_user_messages: max_user_offline_messages - mod_ping: {} - ## mod_pres_counter: - ## count: 5 - ## interval: 60 - mod_privacy: {} - mod_private: {} - mod_roster: - versioning: true - mod_shared_roster: {} - mod_stats: {} - mod_time: {} - mod_vcard: - search: false - mod_vcard_xupdate: {} - ## Convert all avatars posted by Android clients from WebP to JPEG - mod_avatar: {} - # convert: - # webp: jpeg - mod_version: {} - mod_stream_mgmt: {} - ## The module for S2S dialback (XEP-0220). Please note that you cannot - ## rely solely on dialback if you want to federate with other servers, - ## because a lot of servers have dialback disabled and instead rely on - ## PKIX authentication. Make sure you have proper certificates installed - ## and check your accessibility at https://check.messaging.one/ - mod_s2s_dialback: {} - mod_pubsub: - plugins: - - "pep" - mod_push: {} - mod_matrix_gw: - key_name: key1 - key: MATRIX_SECRET - ''; - }; - - systemd.services.ejabberd.serviceConfig.EnvironmentFile = pkgs.writeText "ejabberd.env" '' - EJABBERD_MACRO_MATRIX_SECRET=SU4mu/j8b8A1i1EdyxIcKlFlrp+eSRBIlZwGyHP7Mfo= + access_create: muc_create + access_persistent: muc_create + mod_muc_admin: {} + mod_muc_log: {} + mod_offline: + access_max_user_messages: max_user_offline_messages + mod_ping: {} + ## mod_pres_counter: + ## count: 5 + ## interval: 60 + mod_privacy: {} + mod_private: {} + mod_roster: + versioning: true + mod_shared_roster: {} + mod_stats: {} + mod_time: {} + mod_vcard: + search: false + mod_vcard_xupdate: {} + ## Convert all avatars posted by Android clients from WebP to JPEG + mod_avatar: {} + # convert: + # webp: jpeg + mod_version: {} + mod_stream_mgmt: {} + ## The module for S2S dialback (XEP-0220). Please note that you cannot + ## rely solely on dialback if you want to federate with other servers, + ## because a lot of servers have dialback disabled and instead rely on + ## PKIX authentication. Make sure you have proper certificates installed + ## and check your accessibility at https://check.messaging.one/ + mod_s2s_dialback: {} + mod_pubsub: + plugins: + - "pep" + mod_push: {} + mod_matrix_gw: + key_name: key1 + key: MATRIX_SECRET ''; - networking.firewall.enable = false; }; - }; - testScript = - { nodes, ... }: - '' - ejabberd_prefix = "su ejabberd -s $(which ejabberdctl) " + systemd.services.ejabberd.serviceConfig.EnvironmentFile = pkgs.writeText "ejabberd.env" '' + EJABBERD_MACRO_MATRIX_SECRET=SU4mu/j8b8A1i1EdyxIcKlFlrp+eSRBIlZwGyHP7Mfo= + ''; + networking.firewall.enable = false; + }; + }; - server.wait_for_unit("ejabberd.service") + testScript = + { nodes, ... }: + '' + ejabberd_prefix = "su ejabberd -s $(which ejabberdctl) " - assert "status: started" in server.succeed(ejabberd_prefix + "status") + server.wait_for_unit("ejabberd.service") - server.succeed("curl https://matrix.example.com:8448/_matrix/key/v2/server") + assert "status: started" in server.succeed(ejabberd_prefix + "status") - server.succeed( - ejabberd_prefix + "register azurediamond example.com hunter2", - ejabberd_prefix + "register cthon98 example.com nothunter2", - ) - server.fail(ejabberd_prefix + "register asdf wrong.domain") - client.succeed("send-message") - server.succeed( - ejabberd_prefix + "unregister cthon98 example.com", - ejabberd_prefix + "unregister azurediamond example.com", - ) - ''; - } -) + server.succeed("curl https://matrix.example.com:8448/_matrix/key/v2/server") + + server.succeed( + ejabberd_prefix + "register azurediamond example.com hunter2", + ejabberd_prefix + "register cthon98 example.com nothunter2", + ) + server.fail(ejabberd_prefix + "register asdf wrong.domain") + client.succeed("send-message") + server.succeed( + ejabberd_prefix + "unregister cthon98 example.com", + ejabberd_prefix + "unregister azurediamond example.com", + ) + ''; +} From 3d8e31838f2a9ee29f38f842821a70f8cdabf34b Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 15:53:00 +0000 Subject: [PATCH 107/220] python3Packages.pyspellchecker: 0.8.2 -> 0.8.3 --- pkgs/development/python-modules/pyspellchecker/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/pyspellchecker/default.nix b/pkgs/development/python-modules/pyspellchecker/default.nix index 693863346bb7..5d7321371d74 100644 --- a/pkgs/development/python-modules/pyspellchecker/default.nix +++ b/pkgs/development/python-modules/pyspellchecker/default.nix @@ -8,14 +8,14 @@ buildPythonPackage rec { pname = "pyspellchecker"; - version = "0.8.2"; + version = "0.8.3"; format = "pyproject"; src = fetchFromGitHub { owner = "barrust"; repo = "pyspellchecker"; tag = "v${version}"; - hash = "sha256-sQNYtm+EK/F4S/Kfy87MwqDjCfV33/v8bYi48UBz+qc="; + hash = "sha256-cfYtUOXO4xzO2CYYhWMv3o40iw5/+nvA8MAzJn6LPlQ="; }; nativeBuildInputs = [ setuptools ]; From 4b516a4b7f7c9bf970ffe30e21ab02c230aa477a Mon Sep 17 00:00:00 2001 From: Malo Bourgon Date: Mon, 26 May 2025 09:08:04 -0700 Subject: [PATCH 108/220] inshellisense: 0.0.1-rc.20 -> 0.0.1-rc.21 Diff: https://github.com/microsoft/inshellisense/compare/refs/tags/0.0.1-rc.20...refs/tags/0.0.1-rc.21 --- pkgs/by-name/in/inshellisense/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/in/inshellisense/package.nix b/pkgs/by-name/in/inshellisense/package.nix index 88c7abc32697..b03751c40d40 100644 --- a/pkgs/by-name/in/inshellisense/package.nix +++ b/pkgs/by-name/in/inshellisense/package.nix @@ -8,16 +8,16 @@ buildNpmPackage rec { pname = "inshellisense"; - version = "0.0.1-rc.20"; + version = "0.0.1-rc.21"; src = fetchFromGitHub { owner = "microsoft"; repo = "inshellisense"; tag = version; - hash = "sha256-UGF7tARMnRaeIEKUhYa63hBpEoMb6qV209ECPirkgyg="; + hash = "sha256-zERwrvioPwGm/351kYuK9S3uOrrzs/6OFPRdNSSr7Tc="; }; - npmDepsHash = "sha256-ycU0vEMgiKBaGKWMBPzQfIvBx6Q7jIHxgzZyi9VGBhw="; + npmDepsHash = "sha256-iD5SvkVbrHh0Hx44y6VtNerwBA8K7vSe/yfvhgndMEw="; # Needed for dependency `@homebridge/node-pty-prebuilt-multiarch` # On Darwin systems the build fails with, From 8b1e36bd4eaff10cc731c05ae9e13c6758381e0a Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 16:46:28 +0000 Subject: [PATCH 109/220] python3Packages.bilibili-api-python: 17.1.4 -> 17.2.0 --- .../python-modules/bilibili-api-python/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/bilibili-api-python/default.nix b/pkgs/development/python-modules/bilibili-api-python/default.nix index 1f524dab9ce5..91db287eb3e9 100644 --- a/pkgs/development/python-modules/bilibili-api-python/default.nix +++ b/pkgs/development/python-modules/bilibili-api-python/default.nix @@ -24,13 +24,13 @@ }: buildPythonPackage rec { pname = "bilibili-api-python"; - version = "17.1.4"; + version = "17.2.0"; pyproject = true; src = fetchPypi { pname = "bilibili_api_python"; inherit version; - hash = "sha256-Wmfrf4rJzKk088ZoWNmoR9mI9NjLKJGjo5QYDWzwuVA="; + hash = "sha256-WzNuoVpl2ZDkuGgfWm3surJJyRVdYDUf+MqF5jUil/s="; }; # The upstream uses requirements.txt, which overly strict version constraints. From 027a9f70507d6ebc94e39f0555027a7e838c1804 Mon Sep 17 00:00:00 2001 From: melvyn Date: Mon, 26 May 2025 10:08:31 -0700 Subject: [PATCH 110/220] python3Packages.djangosaml2: update changelog url --- pkgs/development/python-modules/djangosaml2/default.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/development/python-modules/djangosaml2/default.nix b/pkgs/development/python-modules/djangosaml2/default.nix index 37db74d9c29b..27a23298d60a 100644 --- a/pkgs/development/python-modules/djangosaml2/default.nix +++ b/pkgs/development/python-modules/djangosaml2/default.nix @@ -47,7 +47,7 @@ buildPythonPackage rec { meta = { description = "Django SAML2 Service Provider based on pySAML2"; homepage = "https://github.com/IdentityPython/djangosaml2"; - changelog = "https://github.com/IdentityPython/djangosaml2/blob/${src.tag}/CHANGES"; + changelog = "https://github.com/IdentityPython/djangosaml2/releases/tag/${src.tag}"; license = lib.licenses.asl20; maintainers = with lib.maintainers; [ melvyn2 ]; }; From a578aa41cb4a6f04d7d4ca1a268a421caa665f05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=BCtz?= Date: Mon, 26 May 2025 10:43:07 -0700 Subject: [PATCH 111/220] octoprint: unpin pydantic --- pkgs/by-name/oc/octoprint/package.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/by-name/oc/octoprint/package.nix b/pkgs/by-name/oc/octoprint/package.nix index cec7024bccc3..182f7382702f 100644 --- a/pkgs/by-name/oc/octoprint/package.nix +++ b/pkgs/by-name/oc/octoprint/package.nix @@ -132,7 +132,7 @@ let zeroconf zipstream-ng class-doc - pydantic_1 + pydantic ] ++ lib.optionals stdenv.hostPlatform.isDarwin [ py.pkgs.appdirs ] ++ lib.optionals (!stdenv.hostPlatform.isDarwin) [ octoprint-pisupport ]; From 52b157cbf7c775ba953b3c23fe41af3c2512f7d1 Mon Sep 17 00:00:00 2001 From: Defelo Date: Mon, 26 May 2025 17:57:17 +0000 Subject: [PATCH 112/220] olivetin: 2025.4.22 -> 2025.5.26 --- pkgs/by-name/ol/olivetin/package.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/ol/olivetin/package.nix b/pkgs/by-name/ol/olivetin/package.nix index 24cb51325a2a..38833afae1ea 100644 --- a/pkgs/by-name/ol/olivetin/package.nix +++ b/pkgs/by-name/ol/olivetin/package.nix @@ -56,7 +56,7 @@ buildGoModule ( pname = "olivetin-webui"; inherit (finalAttrs) version src; - npmDepsHash = "sha256-VxIPjpsbxEPP15cu5Wvz0qeDGXTMb2tojdry8YaHMVI="; + npmDepsHash = "sha256-59ImpfuLtsZG2Y6B3R09ePaTEuFbIhklk2jKibaB+wg="; sourceRoot = "${finalAttrs.src.name}/webui.dev"; @@ -81,18 +81,18 @@ buildGoModule ( { pname = "olivetin"; - version = "2025.4.22"; + version = "2025.5.26"; src = fetchFromGitHub { owner = "OliveTin"; repo = "OliveTin"; tag = finalAttrs.version; - hash = "sha256-dgoYGtBsyaCTgxCT/y7rU5B9pEvIU/yiLU2/pPm/vJU="; + hash = "sha256-BD52MxIHE56y3oWuRTCYmrEYMge70/MXt4B6g84ahF0="; }; modRoot = "service"; - vendorHash = "sha256-yUW4BrC7Oqs+mvvZGp6oxdGQ9fxP5gQNacq6Pz5ZHAQ="; + vendorHash = "sha256-kfk4QFG+l+XKkKoOs2C1B6ZuMeeDz9DrzzR46S8Qnyk="; ldflags = [ "-s" From ed254818b6035215921b9e73bed28fbfdfc720ce Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Mon, 26 May 2025 20:58:51 +0200 Subject: [PATCH 113/220] python313Packages.tencentcloud-sdk-python: 3.0.1386 -> 3.0.1387 Diff: https://github.com/TencentCloud/tencentcloud-sdk-python/compare/refs/tags/3.0.1386...refs/tags/3.0.1387 Changelog: https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3.0.1387/CHANGELOG.md --- .../python-modules/tencentcloud-sdk-python/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix b/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix index 45497b38789f..e4591f77f53d 100644 --- a/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix +++ b/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix @@ -10,7 +10,7 @@ buildPythonPackage rec { pname = "tencentcloud-sdk-python"; - version = "3.0.1386"; + version = "3.0.1387"; pyproject = true; disabled = pythonOlder "3.9"; @@ -19,7 +19,7 @@ buildPythonPackage rec { owner = "TencentCloud"; repo = "tencentcloud-sdk-python"; tag = version; - hash = "sha256-Xr+i29fKfoRkvj4LX1cVqfBcJJ5pvKgvnYeZgkdx1Vo="; + hash = "sha256-kBy5fSfD7LFpFH8X6p/ZP4DCAkDlBh+Yt5P7TZbIkzc="; }; build-system = [ setuptools ]; From 60d3f119556ab3e2e9e3f10a35f106a6a7d2ab85 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Mon, 26 May 2025 21:06:08 +0200 Subject: [PATCH 114/220] python313Packages.pyexploitdb: 0.2.81 -> 0.2.82 Changelog: https://github.com/Hackman238/pyExploitDb/blob/master/ChangeLog.md --- pkgs/development/python-modules/pyexploitdb/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/pyexploitdb/default.nix b/pkgs/development/python-modules/pyexploitdb/default.nix index f0af917126a1..b283032a9ff3 100644 --- a/pkgs/development/python-modules/pyexploitdb/default.nix +++ b/pkgs/development/python-modules/pyexploitdb/default.nix @@ -10,7 +10,7 @@ buildPythonPackage rec { pname = "pyexploitdb"; - version = "0.2.81"; + version = "0.2.82"; pyproject = true; disabled = pythonOlder "3.7"; @@ -18,7 +18,7 @@ buildPythonPackage rec { src = fetchPypi { pname = "pyExploitDb"; inherit version; - hash = "sha256-dUWt8PstzKVQaY1rwc7XgZHwXwctbCRAr3X0PSXr8XI="; + hash = "sha256-SA/7tQDYcpGKQQr/eD5U3sJUqxk/JW+NR5tv18Qijhg="; }; build-system = [ setuptools ]; From 1f258501586a2270e4b026a8e99cb5be2a57eac5 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Mon, 26 May 2025 21:06:56 +0200 Subject: [PATCH 115/220] python313Packages.pygitguardian: 1.21.0 -> 1.22.0 Diff: https://github.com/GitGuardian/py-gitguardian/compare/refs/tags/v1.21.0...refs/tags/v1.22.0 Changelog: https://github.com/GitGuardian/py-gitguardian/blob/v1.22.0/CHANGELOG.md --- pkgs/development/python-modules/pygitguardian/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/pygitguardian/default.nix b/pkgs/development/python-modules/pygitguardian/default.nix index 2e46ac552fcf..c9988971da3b 100644 --- a/pkgs/development/python-modules/pygitguardian/default.nix +++ b/pkgs/development/python-modules/pygitguardian/default.nix @@ -16,7 +16,7 @@ buildPythonPackage rec { pname = "pygitguardian"; - version = "1.21.0"; + version = "1.22.0"; pyproject = true; disabled = pythonOlder "3.8"; @@ -25,7 +25,7 @@ buildPythonPackage rec { owner = "GitGuardian"; repo = "py-gitguardian"; tag = "v${version}"; - hash = "sha256-pFbryvVwAQbhjNDZ0v+edvtWpkHZ1AVTz0tEsfYBXz8="; + hash = "sha256-AXgsB1ap233o6TP6htOFuQ++AubCRUqGbs5NWT0Qv+w="; }; pythonRelaxDeps = [ From 80e53f04014ab4010d878b089c4d536fb86ded8e Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 19:31:53 +0000 Subject: [PATCH 116/220] kdsingleapplication: 1.1.0 -> 1.2.0 --- pkgs/by-name/kd/kdsingleapplication/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/kd/kdsingleapplication/package.nix b/pkgs/by-name/kd/kdsingleapplication/package.nix index ed53ce683a4a..cbf7906a0d0f 100644 --- a/pkgs/by-name/kd/kdsingleapplication/package.nix +++ b/pkgs/by-name/kd/kdsingleapplication/package.nix @@ -8,13 +8,13 @@ stdenv.mkDerivation rec { pname = "KDSingleApplication"; - version = "1.1.0"; + version = "1.2.0"; src = fetchFromGitHub { owner = "KDAB"; repo = "KDSingleApplication"; tag = "v${version}"; - hash = "sha256-Ymm+qOZMWULg7u5xEpGzcAfIrbWBQ3jsndnFSnh6/PA="; + hash = "sha256-rglt89Gw6OHXXVOEwf0TxezDzyHEvWepeGeup7fBlLs="; }; nativeBuildInputs = [ cmake ]; From c74fb2058ffb365e61b053dc42534bab8aba679d Mon Sep 17 00:00:00 2001 From: transcaffeine Date: Mon, 26 May 2025 22:02:06 +0200 Subject: [PATCH 117/220] victoriametrics: 1.117.1 -> 1.118.0 Release notes: https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.118.0 Changelog: https://github.com/VictoriaMetrics/VictoriaMetrics/compare/v1.117.1...v1.118.0 --- pkgs/by-name/vi/victoriametrics/package.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/by-name/vi/victoriametrics/package.nix b/pkgs/by-name/vi/victoriametrics/package.nix index 2cb333913c17..5de81b3498a0 100644 --- a/pkgs/by-name/vi/victoriametrics/package.nix +++ b/pkgs/by-name/vi/victoriametrics/package.nix @@ -14,7 +14,7 @@ buildGoModule (finalAttrs: { pname = "VictoriaMetrics"; - version = "1.117.1"; + version = "1.118.0"; src = fetchFromGitHub { owner = "VictoriaMetrics"; From e4244ad358bcd57b2b14ad122dc7c28bf1eb5128 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Fri, 23 May 2025 12:54:43 +0200 Subject: [PATCH 118/220] checkov: 3.2.427 -> 3.2.432 Diff: https://github.com/bridgecrewio/checkov/compare/refs/tags/3.2.427...refs/tags/3.2.432 Changelog: https://github.com/bridgecrewio/checkov/releases/tag/3.2.432 --- pkgs/by-name/ch/checkov/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ch/checkov/package.nix b/pkgs/by-name/ch/checkov/package.nix index a6c1a3310361..fd4d37ac3cbf 100644 --- a/pkgs/by-name/ch/checkov/package.nix +++ b/pkgs/by-name/ch/checkov/package.nix @@ -25,14 +25,14 @@ with py.pkgs; python3.pkgs.buildPythonApplication rec { pname = "checkov"; - version = "3.2.427"; + version = "3.2.432"; pyproject = true; src = fetchFromGitHub { owner = "bridgecrewio"; repo = "checkov"; tag = version; - hash = "sha256-jdlTSWdojaM7M5g7yWCudZeCuQqaZZWFYJ0kWWup4ts="; + hash = "sha256-bB5M595/T/gM0GiBzJBq2LBs8HT+tHo03loHhrF4coA="; }; pythonRelaxDeps = [ From 406199ba3e051ff74a09b920e05090abab95dbb7 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Mon, 26 May 2025 21:03:00 +0200 Subject: [PATCH 119/220] checkov: 3.2.432 -> 3.2.433 Diff: https://github.com/bridgecrewio/checkov/compare/refs/tags/3.2.432...refs/tags/3.2.433 Changelog: https://github.com/bridgecrewio/checkov/releases/tag/3.2.433 --- pkgs/by-name/ch/checkov/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ch/checkov/package.nix b/pkgs/by-name/ch/checkov/package.nix index fd4d37ac3cbf..997565c026e8 100644 --- a/pkgs/by-name/ch/checkov/package.nix +++ b/pkgs/by-name/ch/checkov/package.nix @@ -25,14 +25,14 @@ with py.pkgs; python3.pkgs.buildPythonApplication rec { pname = "checkov"; - version = "3.2.432"; + version = "3.2.433"; pyproject = true; src = fetchFromGitHub { owner = "bridgecrewio"; repo = "checkov"; tag = version; - hash = "sha256-bB5M595/T/gM0GiBzJBq2LBs8HT+tHo03loHhrF4coA="; + hash = "sha256-c9I3VDiKzk9Bybian3bX4OU3WUvHEHyQyGrO4//azqM="; }; pythonRelaxDeps = [ From 556709346b108ca6f60e67141533808585694db4 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 00:14:55 +0200 Subject: [PATCH 120/220] tgpt: 2.9.4 -> 2.10.0 Changelog: https://github.com/aandrew-me/tgpt/releases/tag/v2.10.0 --- pkgs/by-name/tg/tgpt/package.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/tg/tgpt/package.nix b/pkgs/by-name/tg/tgpt/package.nix index b009bb76ed5a..9917e4cac5ca 100644 --- a/pkgs/by-name/tg/tgpt/package.nix +++ b/pkgs/by-name/tg/tgpt/package.nix @@ -6,13 +6,13 @@ buildGoModule rec { pname = "tgpt"; - version = "2.9.4"; + version = "2.10.0"; src = fetchFromGitHub { owner = "aandrew-me"; repo = "tgpt"; tag = "v${version}"; - hash = "sha256-FCc1D/q+8i/ZFZDgtBoMpF+GEnHne4M5bazJWnF5GbE="; + hash = "sha256-q7dod5kKvKny4Zht6KpHpRa7N9Je+tmKVyn9PEde/+c="; }; vendorHash = "sha256-hPbvzhYHOxytQs3NkSVaZhFH0TbOlr4U/QiH+vemTrc="; @@ -24,8 +24,8 @@ buildGoModule rec { preCheck = '' # Remove test which need network access - rm providers/koboldai/koboldai_test.go - rm providers/phind/phind_test.go + rm src/providers/koboldai/koboldai_test.go + rm src/providers/phind/phind_test.go ''; meta = { From d9f2171834fa4d206fb47aa77aa05dbc295fa9ac Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 22:44:04 +0000 Subject: [PATCH 121/220] reindeer: 2025.05.12.00 -> 2025.05.26.00 --- pkgs/by-name/re/reindeer/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/re/reindeer/package.nix b/pkgs/by-name/re/reindeer/package.nix index 672152e9f1a7..97fe361b4546 100644 --- a/pkgs/by-name/re/reindeer/package.nix +++ b/pkgs/by-name/re/reindeer/package.nix @@ -9,17 +9,17 @@ rustPlatform.buildRustPackage rec { pname = "reindeer"; - version = "2025.05.12.00"; + version = "2025.05.26.00"; src = fetchFromGitHub { owner = "facebookincubator"; repo = "reindeer"; tag = "v${version}"; - hash = "sha256-e6kP2vLJURjp+iCgyQGX3gHFaHyLhGzNJ1qjEcDBjz4="; + hash = "sha256-I5I5m9UutBMgX7PygPjMgglqvRfZxuWiyJ4l+77WYAQ="; }; useFetchCargoVendor = true; - cargoHash = "sha256-hpOcPMrgUXDUrTml8nl737NFe0WYPdIY66rU+QRqyx0="; + cargoHash = "sha256-i0HAT8L9rf0r/jOqDFe60PakXwHz9lr4gwXm0ZwN4No="; nativeBuildInputs = [ pkg-config ]; From 38985a6a946a8a6bf3fa95094672c3f4a372be68 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 23:00:14 +0000 Subject: [PATCH 122/220] terraform-providers.datadog: 3.62.0 -> 3.63.0 --- .../networking/cluster/terraform-providers/providers.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/applications/networking/cluster/terraform-providers/providers.json b/pkgs/applications/networking/cluster/terraform-providers/providers.json index 1e832248c2f1..1f92924e92b3 100644 --- a/pkgs/applications/networking/cluster/terraform-providers/providers.json +++ b/pkgs/applications/networking/cluster/terraform-providers/providers.json @@ -317,13 +317,13 @@ "vendorHash": "sha256-ZCMSmOCPEMxCSpl3DjIUGPj1W/KNJgyjtHpmQ19JquA=" }, "datadog": { - "hash": "sha256-exndOGDZQqvh6m8sJg9/dIGup5SQruwE7qK9T0s/FJ8=", + "hash": "sha256-+C+pvw3ghriw3mR/lvpsSH0inTMPNwc6QAEtt6nXINw=", "homepage": "https://registry.terraform.io/providers/DataDog/datadog", "owner": "DataDog", "repo": "terraform-provider-datadog", - "rev": "v3.62.0", + "rev": "v3.63.0", "spdx": "MPL-2.0", - "vendorHash": "sha256-uLNBbgW4oA8Cq0mjjMQFh7g37V+Ea9ek+ugBYJww88I=" + "vendorHash": "sha256-FmNeAwJ/lZZEIGt5QlYm9Cqu2cgkXVa1B/ej+5/G7wg=" }, "deno": { "hash": "sha256-7IvJrhXMeAmf8e21QBdYNSJyVMEzLpat4Tm4zHWglW8=", From ff404d2d68d41685973590919e8b65fac2693493 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 26 May 2025 23:24:05 +0000 Subject: [PATCH 123/220] vi-mongo: 0.1.27 -> 0.1.28 --- pkgs/by-name/vi/vi-mongo/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/vi/vi-mongo/package.nix b/pkgs/by-name/vi/vi-mongo/package.nix index e949c68dfa51..d5bb129d22c5 100644 --- a/pkgs/by-name/vi/vi-mongo/package.nix +++ b/pkgs/by-name/vi/vi-mongo/package.nix @@ -8,16 +8,16 @@ buildGoModule rec { pname = "vi-mongo"; - version = "0.1.27"; + version = "0.1.28"; src = fetchFromGitHub { owner = "kopecmaciej"; repo = "vi-mongo"; tag = "v${version}"; - hash = "sha256-/hj2JMjBKl3HLd6Mfuz4UnaWbPKPYHYfqKPj3kjxLZg="; + hash = "sha256-vqwsFWU/DSxvmlHGG91MK8MqRYmSZrzB1ypjMNug6v0="; }; - vendorHash = "sha256-OVd2wIssVJHamWpNrK+piQFl9Lz0xgYnnz/4D5yl1D4="; + vendorHash = "sha256-Z1qvTde0TtoIKUppfDRcNx9qAFbAqFf3xIOTNQnPQng="; ldflags = [ "-s" From 7254412c80a1dce0b074895c47ffea66da55ef13 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 00:54:57 +0000 Subject: [PATCH 124/220] seaweedfs: 3.87 -> 3.88 --- pkgs/by-name/se/seaweedfs/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/se/seaweedfs/package.nix b/pkgs/by-name/se/seaweedfs/package.nix index 02f152cd6767..68f2a0c3c845 100644 --- a/pkgs/by-name/se/seaweedfs/package.nix +++ b/pkgs/by-name/se/seaweedfs/package.nix @@ -8,16 +8,16 @@ buildGoModule rec { pname = "seaweedfs"; - version = "3.87"; + version = "3.88"; src = fetchFromGitHub { owner = "seaweedfs"; repo = "seaweedfs"; rev = version; - hash = "sha256-B6MgmgxvbGTcJ4GHK+TT30sb++29gvr0Ce004YCGF2E="; + hash = "sha256-66yZtRF+gMSafcXQoUJ0bMuBJMDHXvImCWB3vMze/K8="; }; - vendorHash = "sha256-39MXGJvaKipqTRN5w9UKeRnkbsNgNx0eSdnRR/cnd4Y="; + vendorHash = "sha256-aSNmEY82ODnxtlnQG6dZWTQfZ+zKCqLtj0DfpPcu3ik="; subPackages = [ "weed" ]; From 406cdf59fd894754d80009cc480cfef02701f360 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 01:10:19 +0000 Subject: [PATCH 125/220] probe-rs-tools: 0.28.0 -> 0.29.0 --- pkgs/by-name/pr/probe-rs-tools/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/pr/probe-rs-tools/package.nix b/pkgs/by-name/pr/probe-rs-tools/package.nix index 3b72a79ef788..8da8a62ac711 100644 --- a/pkgs/by-name/pr/probe-rs-tools/package.nix +++ b/pkgs/by-name/pr/probe-rs-tools/package.nix @@ -10,17 +10,17 @@ rustPlatform.buildRustPackage rec { pname = "probe-rs-tools"; - version = "0.28.0"; + version = "0.29.0"; src = fetchFromGitHub { owner = "probe-rs"; repo = "probe-rs"; tag = "v${version}"; - hash = "sha256-CCB7NdLSg3Ve/iBSG7TuTbXKnGsevzhnELA2gN3n2t4="; + hash = "sha256-5EppB6XVUHM7TrvpdqdvojuFbjw8RTDOudpypVdLPbQ="; }; useFetchCargoVendor = true; - cargoHash = "sha256-jBkMUaC8aFCP0N8cbYDEJtJtVbagB+YAHge0IT8xm+c="; + cargoHash = "sha256-sdMRauSaDYMgpfAYhEBEqz0s9WHAZJLjijdvQqO6fMs="; buildAndTestSubdir = pname; From ee8a72746ad0c595ac809d0393cc31ddd3bd7c6a Mon Sep 17 00:00:00 2001 From: Saturn745 Date: Mon, 26 May 2025 19:53:27 -0700 Subject: [PATCH 126/220] lxc: fix nvidia runtime hook Fixes the NVIDIA runtime hook by including https://github.com/lxc/lxc/pull/4536 --- pkgs/by-name/lx/lxc/package.nix | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkgs/by-name/lx/lxc/package.nix b/pkgs/by-name/lx/lxc/package.nix index 5c14f5f64172..14b13e73527d 100644 --- a/pkgs/by-name/lx/lxc/package.nix +++ b/pkgs/by-name/lx/lxc/package.nix @@ -17,6 +17,7 @@ pkg-config, systemd, + fetchpatch, nix-update-script, }: @@ -57,6 +58,13 @@ stdenv.mkDerivation (finalAttrs: { # Fix hardcoded path of lxc-user-nic # This is needed to use unprivileged containers ./user-nic.diff + + # Fixes https://github.com/zabbly/incus/issues/81 + (fetchpatch { + name = "4536.patch"; + url = "https://patch-diff.githubusercontent.com/raw/lxc/lxc/pull/4536.patch"; + hash = "sha256-yEqK9deO2MhfPROPfBw44Z752Mc5bR8DBKl1KrGC+5c="; + }) ]; mesonFlags = [ From 56df0bf4ce3b96e6565c79adef1b8a8914b84f83 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 03:59:26 +0000 Subject: [PATCH 127/220] exploitdb: 2025-05-22 -> 2025-05-26 --- pkgs/by-name/ex/exploitdb/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ex/exploitdb/package.nix b/pkgs/by-name/ex/exploitdb/package.nix index 4aec5276f880..42ba290d79e4 100644 --- a/pkgs/by-name/ex/exploitdb/package.nix +++ b/pkgs/by-name/ex/exploitdb/package.nix @@ -7,13 +7,13 @@ stdenv.mkDerivation rec { pname = "exploitdb"; - version = "2025-05-22"; + version = "2025-05-26"; src = fetchFromGitLab { owner = "exploit-database"; repo = "exploitdb"; rev = "refs/tags/${version}"; - hash = "sha256-2dnIchOJJrqqePgOxb7tWw0j1neTlXFyxT30qwTOkrc="; + hash = "sha256-e9a0bzlyqI3lR87X+S9XIVTpEv1a9RxlfKqyk6CsDGU="; }; nativeBuildInputs = [ makeWrapper ]; From 06e5169405846a9917c7974ce2f8fbe23144a3fa Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 06:05:36 +0000 Subject: [PATCH 128/220] nuclei-templates: 10.2.1 -> 10.2.2 --- pkgs/by-name/nu/nuclei-templates/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/nu/nuclei-templates/package.nix b/pkgs/by-name/nu/nuclei-templates/package.nix index 0e910eff0737..78563010798a 100644 --- a/pkgs/by-name/nu/nuclei-templates/package.nix +++ b/pkgs/by-name/nu/nuclei-templates/package.nix @@ -6,13 +6,13 @@ stdenvNoCC.mkDerivation rec { pname = "nuclei-templates"; - version = "10.2.1"; + version = "10.2.2"; src = fetchFromGitHub { owner = "projectdiscovery"; repo = "nuclei-templates"; tag = "v${version}"; - hash = "sha256-r2c6mmW/bLUGb8n1zvNJWrxyvSWBfPuTsU7tneeGaVE="; + hash = "sha256-FWOfh/W2Hy01Z7oEItLtWidbvEfwVBDU9KAaCoFO0AI="; }; installPhase = '' From cb731c653dc6792742a92923a52d6cf2f8591b40 Mon Sep 17 00:00:00 2001 From: Ivan Kovnatsky <75213+ivankovnatsky@users.noreply.github.com> Date: Tue, 27 May 2025 09:54:11 +0300 Subject: [PATCH 129/220] netdata: use lm_sensors only on Linux in postFixup step MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ```console error: … while calling the 'derivationStrict' builtin at :37:12: 36| 37| strict = derivationStrict drvAttrs; | ^ 38| … while evaluating derivation 'darwin-system-25.05.2456ff5' whose name attribute is located at /nix/store/ci0w02z7bzdvy889jac8g45xkv3lx9i0-source/pkgs/stdenv/generic/make-derivation.nix:480:13 … while evaluating attribute 'activationScript' of derivation 'darwin-system-25.05.2456ff5' at /nix/store/xjlh0pvwqz7qbhkv7y40a508ps8r0ffv-source/modules/system/default.nix:97:7: 96| 97| activationScript = cfg.activationScripts.script.text; | ^ 98| … while evaluating the option `system.activationScripts.script.text': … while evaluating definitions from `/nix/store/xjlh0pvwqz7qbhkv7y40a508ps8r0ffv-source/modules/system/activation-scripts.nix': … while evaluating the option `system.activationScripts.applications.text': … while evaluating definitions from `/nix/store/xjlh0pvwqz7qbhkv7y40a508ps8r0ffv-source/modules/system/applications.nix': (stack trace truncated; use '--show-trace' to show the full, detailed trace) error: Package ‘lm-sensors-3.6.0’ in /nix/store/ci0w02z7bzdvy889jac8g45xkv3lx9i0-source/pkgs/by-name/lm/lm_sensors/package.nix:97 is not available on the requested hostPlatform: hostPlatform.config = "arm64-apple-darwin" package.meta.platforms = [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "loongarch64-linux" "m68k-linux" "microblaze-linux" "microblazeel-linux" "mips-linux" "mips64-linux" "mips64el-linux" "mipsel-linux" "powerpc64-linux" "powerpc64le-linux" "riscv32-linux" "riscv64-linux" "s390-linux" "s390x-linux" "x86_64-linux" ] package.meta.badPlatforms = [ ] , refusing to evaluate. a) To temporarily allow packages that are unsupported for this system, you can use an environment variable for a single invocation of the nix tools. $ export NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM=1 Note: When using `nix shell`, `nix build`, `nix develop`, etc with a flake, then pass `--impure` in order to allow use of environment variables. b) For `nixos-rebuild` you can set { nixpkgs.config.allowUnsupportedSystem = true; } in configuration.nix to override this. c) For `nix-env`, `nix-build`, `nix-shell` or any other Nix command you can add { allowUnsupportedSystem = true; } to ~/.config/nixpkgs/config.nix. ``` --- pkgs/tools/system/netdata/default.nix | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkgs/tools/system/netdata/default.nix b/pkgs/tools/system/netdata/default.nix index 1c298715b44a..9eaa89fb2307 100644 --- a/pkgs/tools/system/netdata/default.nix +++ b/pkgs/tools/system/netdata/default.nix @@ -239,7 +239,9 @@ stdenv.mkDerivation (finalAttrs: { wrapProgram $out/bin/netdata-claim.sh --prefix PATH : ${lib.makeBinPath [ openssl ]} wrapProgram $out/libexec/netdata/plugins.d/cgroup-network-helper.sh --prefix PATH : ${lib.makeBinPath [ bash ]} wrapProgram $out/bin/netdatacli --set NETDATA_PIPENAME /run/netdata/ipc - substituteInPlace $out/lib/netdata/conf.d/go.d/sensors.conf --replace-fail '/usr/bin/sensors' '${lm_sensors}/bin/sensors' + ${lib.optionalString (stdenv.hostPlatform.isLinux) '' + substituteInPlace $out/lib/netdata/conf.d/go.d/sensors.conf --replace-fail '/usr/bin/sensors' '${lm_sensors}/bin/sensors' + ''} # Time to cleanup the output directory. unlink $out/sbin From 9fdef667f9d04bd3d82bb7593f01cc704328c92c Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 07:11:04 +0000 Subject: [PATCH 130/220] python3Packages.preshed: 3.0.9 -> 3.0.10 --- pkgs/development/python-modules/preshed/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/preshed/default.nix b/pkgs/development/python-modules/preshed/default.nix index 1ca058038fbf..dcd022909e81 100644 --- a/pkgs/development/python-modules/preshed/default.nix +++ b/pkgs/development/python-modules/preshed/default.nix @@ -11,14 +11,14 @@ buildPythonPackage rec { pname = "preshed"; - version = "3.0.9"; + version = "3.0.10"; format = "setuptools"; disabled = pythonOlder "3.7"; src = fetchPypi { inherit pname version; - hash = "sha256-chhjxSRP/NJlGtCSiVGix8d7EC9OEaJRrYXTfudiFmA="; + hash = "sha256-WlyOaF6UH0/+yX8fvzJpS4EHhYiRpLw0EH+smB2Clv8="; }; nativeBuildInputs = [ cython ]; From d90442cfc0ca9be6d0898be669a6178ef370d050 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Tue, 27 May 2025 09:22:37 +0200 Subject: [PATCH 131/220] python314: 3.14.0-b1 -> 3.14.0-b2 https://docs.python.org/3.14/whatsnew/changelog.html#python-3-14-0-beta-2 --- pkgs/development/interpreters/python/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/interpreters/python/default.nix b/pkgs/development/interpreters/python/default.nix index c404bfe6d46b..65bfdfe6cfc2 100644 --- a/pkgs/development/interpreters/python/default.nix +++ b/pkgs/development/interpreters/python/default.nix @@ -93,9 +93,9 @@ major = "3"; minor = "14"; patch = "0"; - suffix = "b1"; + suffix = "b2"; }; - hash = "sha256-Ld0wp3yfYuBlzmSGZKJUubDAEbzaqMHCeHCH5kTL6zk="; + hash = "sha256-esnoSES7wKWo8feaN6aLO4yvKli0qlmZxJInyzbnDqY="; inherit passthruFun; }; # Minimal versions of Python (built without optional dependencies) From eb8c0643e91303bbcc799cf0578c46b713b9e1f4 Mon Sep 17 00:00:00 2001 From: Matthias Beyer Date: Tue, 27 May 2025 09:41:13 +0200 Subject: [PATCH 132/220] zoxide: Add myself as maintainer Signed-off-by: Matthias Beyer --- pkgs/by-name/zo/zoxide/package.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/pkgs/by-name/zo/zoxide/package.nix b/pkgs/by-name/zo/zoxide/package.nix index 140990537964..ca4ffbeb457b 100644 --- a/pkgs/by-name/zo/zoxide/package.nix +++ b/pkgs/by-name/zo/zoxide/package.nix @@ -49,6 +49,7 @@ rustPlatform.buildRustPackage rec { ysndr cole-h SuperSandro2000 + matthiasbeyer ]; mainProgram = "zoxide"; }; From adc43c75bb19be21b00b1175aaf04559fbfe12f0 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 08:00:27 +0000 Subject: [PATCH 133/220] python3Packages.aiovodafone: 0.11.0 -> 1.0.0 --- pkgs/development/python-modules/aiovodafone/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/aiovodafone/default.nix b/pkgs/development/python-modules/aiovodafone/default.nix index de779c57d848..4d1e857a25e0 100644 --- a/pkgs/development/python-modules/aiovodafone/default.nix +++ b/pkgs/development/python-modules/aiovodafone/default.nix @@ -12,7 +12,7 @@ buildPythonPackage rec { pname = "aiovodafone"; - version = "0.11.0"; + version = "1.0.0"; pyproject = true; disabled = pythonOlder "3.12"; @@ -21,7 +21,7 @@ buildPythonPackage rec { owner = "chemelli74"; repo = "aiovodafone"; tag = "v${version}"; - hash = "sha256-/H3v5ZRAaWWouow82nr/+TOuL+IX6Ez+Mjk/C120MWM="; + hash = "sha256-qjhezzetTKBaPeToQ9TSZ3+epgW/nMsADVL3Hb4kTBU="; }; build-system = [ poetry-core ]; From 6c901266830d0059f47725ab1f8c7ca703d1a813 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Tue, 27 May 2025 11:28:58 +0200 Subject: [PATCH 134/220] esphome: 2025.5.0 -> 2025.5.1 https://github.com/esphome/esphome/releases/tag/2025.5.1 --- pkgs/by-name/es/esphome/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/es/esphome/package.nix b/pkgs/by-name/es/esphome/package.nix index 42ce5755059e..b88b9dd967fa 100644 --- a/pkgs/by-name/es/esphome/package.nix +++ b/pkgs/by-name/es/esphome/package.nix @@ -33,14 +33,14 @@ let in python.pkgs.buildPythonApplication rec { pname = "esphome"; - version = "2025.5.0"; + version = "2025.5.1"; pyproject = true; src = fetchFromGitHub { owner = pname; repo = pname; tag = version; - hash = "sha256-BcPdgAvRR7zataL4KOhLAvQaQnS60z8UZ9xdIK7ydz4="; + hash = "sha256-z4FwymWFjyqNx95r2o7LLCmytRQYkogfCKiUFNyGOuA="; }; build-systems = with python.pkgs; [ From cbb618ac1524f9b7a17ce019b55dafb7cadc1a4a Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 10:06:15 +0000 Subject: [PATCH 135/220] python3Packages.aiolifx: 1.1.4 -> 1.1.5 --- pkgs/development/python-modules/aiolifx/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/aiolifx/default.nix b/pkgs/development/python-modules/aiolifx/default.nix index 1f8efb348851..062fb683e1fa 100644 --- a/pkgs/development/python-modules/aiolifx/default.nix +++ b/pkgs/development/python-modules/aiolifx/default.nix @@ -13,14 +13,14 @@ buildPythonPackage rec { pname = "aiolifx"; - version = "1.1.4"; + version = "1.1.5"; pyproject = true; disabled = pythonOlder "3.7"; src = fetchPypi { inherit pname version; - hash = "sha256-7T7nHmnK1ZLoIgi6e8VVrq6NVAmL7tVi+F/6G3Ayh2Q="; + hash = "sha256-KRUe8qn/3jYKxgKvqPeA6oXZF3IYfRBBftWxjLhe/ow="; }; build-system = [ setuptools ]; From 9ef789e7d0946e8704cd250e28d03821d0139834 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 10:30:47 +0000 Subject: [PATCH 136/220] terraform-providers.utils: 1.29.0 -> 1.30.0 --- .../networking/cluster/terraform-providers/providers.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/applications/networking/cluster/terraform-providers/providers.json b/pkgs/applications/networking/cluster/terraform-providers/providers.json index 1e832248c2f1..306504a8d773 100644 --- a/pkgs/applications/networking/cluster/terraform-providers/providers.json +++ b/pkgs/applications/networking/cluster/terraform-providers/providers.json @@ -1391,13 +1391,13 @@ "vendorHash": null }, "utils": { - "hash": "sha256-BnC5ihbOnua4ddTzM8mvWbKz5L13R2NT9c68teVLWo0=", + "hash": "sha256-vCdPG8cZUdFhs1OmqDlgCDqBdyFiL99p6I8JhL8C6lY=", "homepage": "https://registry.terraform.io/providers/cloudposse/utils", "owner": "cloudposse", "repo": "terraform-provider-utils", - "rev": "v1.29.0", + "rev": "v1.30.0", "spdx": "Apache-2.0", - "vendorHash": "sha256-rHJabyfgu3wU79h3DHHYQauFmcR/SDuikauBF+CybZA=" + "vendorHash": "sha256-giqZi1CmuyANNwzW+y9BUUUEfBhFZKkVGAvIPVvZnzE=" }, "vault": { "hash": "sha256-6gCpXzvF4p2otwo3dJ8c+EvzbsMZfzVZymb41+hEZoM=", From 0cf04bdcf56aa7ced9c678447b0c7225c19ead1d Mon Sep 17 00:00:00 2001 From: emilylange Date: Tue, 27 May 2025 13:11:10 +0200 Subject: [PATCH 137/220] miniflux: 2.2.8 -> 2.2.9 https://miniflux.app/releases/2.2.9.html diff: https://github.com/miniflux/v2/compare/2.2.8...2.2.9 --- pkgs/by-name/mi/miniflux/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/mi/miniflux/package.nix b/pkgs/by-name/mi/miniflux/package.nix index 75397b7ecc80..3b8b68c3d549 100644 --- a/pkgs/by-name/mi/miniflux/package.nix +++ b/pkgs/by-name/mi/miniflux/package.nix @@ -9,16 +9,16 @@ buildGoModule rec { pname = "miniflux"; - version = "2.2.8"; + version = "2.2.9"; src = fetchFromGitHub { owner = "miniflux"; repo = "v2"; tag = version; - hash = "sha256-AQ6HVRVlWt1D8fA4Z2FH7VIEKydDva7txwa/9Rfq0Ho="; + hash = "sha256-pp+QaofbU6Vkh2ifQLpUq+dbQTx/jX2JgM/9uji13tU="; }; - vendorHash = "sha256-9I0/dyjanuV8oZystox9RY/gGx1SZuFRTghArv5aJ64="; + vendorHash = "sha256-6tgZdwr9g/VCnLUEmwAl8vzOzQu6SEGV9PhTp47eXds="; nativeBuildInputs = [ installShellFiles ]; From 5c9a8fe104a91cc8415e3aeddd6b0dc4df28860f Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 11:59:39 +0000 Subject: [PATCH 138/220] phraze: 0.3.19 -> 0.3.23 --- pkgs/by-name/ph/phraze/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ph/phraze/package.nix b/pkgs/by-name/ph/phraze/package.nix index 4d955ec9fa12..6b54abdc47f0 100644 --- a/pkgs/by-name/ph/phraze/package.nix +++ b/pkgs/by-name/ph/phraze/package.nix @@ -10,19 +10,19 @@ rustPlatform.buildRustPackage rec { pname = "phraze"; - version = "0.3.19"; + version = "0.3.23"; src = fetchFromGitHub { owner = "sts10"; repo = "phraze"; rev = "v${version}"; - hash = "sha256-4RdPN2l0tQbxgTVxwdl0APYD8h9DrF9f5MIQkcozt48="; + hash = "sha256-CQhzH6x8Fxx0ynHbLh8FTY7urbiXHrvTbMh+/TAwS2A="; }; doCheck = true; useFetchCargoVendor = true; - cargoHash = "sha256-hXo1pdbOZ/qNDTm3CcmOIgef7u6Imn25luJdsCVeg5I="; + cargoHash = "sha256-d4qj4rvH5CyHTH3RWDV6ADSGK/kz6yQLp3JjQdb6Wyo="; nativeBuildInputs = [ installShellFiles ]; From 9aefb7e19068384cd008b7f8bf25f3ee903955ef Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 12:03:41 +0000 Subject: [PATCH 139/220] gitlab-ci-local: 4.59.0 -> 4.60.1 --- pkgs/by-name/gi/gitlab-ci-local/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/gi/gitlab-ci-local/package.nix b/pkgs/by-name/gi/gitlab-ci-local/package.nix index bfe81443feaa..6de5e0960772 100644 --- a/pkgs/by-name/gi/gitlab-ci-local/package.nix +++ b/pkgs/by-name/gi/gitlab-ci-local/package.nix @@ -12,16 +12,16 @@ buildNpmPackage rec { pname = "gitlab-ci-local"; - version = "4.59.0"; + version = "4.60.1"; src = fetchFromGitHub { owner = "firecow"; repo = "gitlab-ci-local"; rev = version; - hash = "sha256-4C+96rPtEFDJc08D5qXEuNvoDWJR5drvsvZ6mCGd5Vo="; + hash = "sha256-6v5iyQCP+3bJdG9uvPAsMaJ7mW2xj1kMhn8h2eLsl28="; }; - npmDepsHash = "sha256-brzCPG/keYOGfjqnj8mP28OdSAKTbDQWBxN4oMLHoNU="; + npmDepsHash = "sha256-P09uxOtlY9AAJyKLTdnFOfw0H6V4trr2hznEonOO58E="; nativeBuildInputs = [ makeBinaryWrapper From 95d28b0121533ee2f16e02b7144f61e12b769c67 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 12:25:41 +0000 Subject: [PATCH 140/220] easytier: 2.2.4 -> 2.3.0 --- pkgs/by-name/ea/easytier/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ea/easytier/package.nix b/pkgs/by-name/ea/easytier/package.nix index 39871a0e8bea..7c81311dc3a9 100644 --- a/pkgs/by-name/ea/easytier/package.nix +++ b/pkgs/by-name/ea/easytier/package.nix @@ -10,18 +10,18 @@ rustPlatform.buildRustPackage rec { pname = "easytier"; - version = "2.2.4"; + version = "2.3.0"; src = fetchFromGitHub { owner = "EasyTier"; repo = "EasyTier"; tag = "v${version}"; - hash = "sha256-YrWuNHpNDs1VVz6Sahi2ViPT4kcJf10UUMRWEs4Y0xc="; + hash = "sha256-F///8C7lyJZj5+u80nauDdrPFrEE40s0DeNzQeblImw="; }; useFetchCargoVendor = true; - cargoHash = "sha256-uUmF4uIhSx+byG+c4hlUuuy+O87Saw8wRJ5OGk3zaPA="; + cargoHash = "sha256-f64tOU8AKC14tqX9Q3MLa7/pmIuI4FeFGOct8ZTAe+k="; nativeBuildInputs = [ protobuf From 95d90c46e9b620c8fbc9b0d140f85bde9e7b0dc8 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 12:34:38 +0000 Subject: [PATCH 141/220] fw: 2.20.0 -> 2.21.0 --- pkgs/by-name/fw/fw/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/fw/fw/package.nix b/pkgs/by-name/fw/fw/package.nix index 1c362653909f..e178d77dbc3e 100644 --- a/pkgs/by-name/fw/fw/package.nix +++ b/pkgs/by-name/fw/fw/package.nix @@ -10,17 +10,17 @@ rustPlatform.buildRustPackage rec { pname = "fw"; - version = "2.20.0"; + version = "2.21.0"; src = fetchFromGitHub { owner = "brocode"; repo = "fw"; rev = "v${version}"; - hash = "sha256-bq8N49qArdF0EFIGiK4lCsC0CZxwmeo0R8OiehrifTg="; + hash = "sha256-tqtiAw4+bnCJMF37SluAE9NM55MAjBGkJTvGLcmYFnA="; }; useFetchCargoVendor = true; - cargoHash = "sha256-sU7PfD77Sqi1Vrq2DgkkBF1bzL8d+/csa60CtQ7itSQ="; + cargoHash = "sha256-B32GegI3rvame0Ds+8+oBVUbcNhr2kwm3oVVxng8BZY="; nativeBuildInputs = [ pkg-config From 32358e07e987fddccf713402069c378cff6afc9d Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 12:35:19 +0000 Subject: [PATCH 142/220] cargo-public-api: 0.47.0 -> 0.47.1 --- pkgs/by-name/ca/cargo-public-api/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ca/cargo-public-api/package.nix b/pkgs/by-name/ca/cargo-public-api/package.nix index 204189edcb52..2003550946e9 100644 --- a/pkgs/by-name/ca/cargo-public-api/package.nix +++ b/pkgs/by-name/ca/cargo-public-api/package.nix @@ -9,15 +9,15 @@ rustPlatform.buildRustPackage rec { pname = "cargo-public-api"; - version = "0.47.0"; + version = "0.47.1"; src = fetchCrate { inherit pname version; - hash = "sha256-g0kaJ3HPFeS5PvWQfUTanxCgm9sduW9nBx/N61kt3ZI="; + hash = "sha256-xDMOrL9yyaEEwPhcrkPugVMTyKW4T6X1yE4tN9dmPas="; }; useFetchCargoVendor = true; - cargoHash = "sha256-jQx4VCarfbdTXOE/GAAzxeXf7xVwEaXDPhw6ywBR3wA="; + cargoHash = "sha256-HhYGc0S/i6KWZsv4E1NTkZb+jdUkcKDP/c0hdVTHJXE="; nativeBuildInputs = [ pkg-config ]; From b9ecd30d81d85c47df3d8b60c3c97f479eeaabab Mon Sep 17 00:00:00 2001 From: liberodark Date: Thu, 13 Feb 2025 10:44:29 +0100 Subject: [PATCH 143/220] nixos/postfix: add slow domain --- nixos/modules/services/mail/postfix.nix | 52 +++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix index 34c236dcd2a6..4936edc3dd25 100644 --- a/nixos/modules/services/mail/postfix.nix +++ b/nixos/modules/services/mail/postfix.nix @@ -13,7 +13,7 @@ let haveAliases = cfg.postmasterAlias != "" || cfg.rootAlias != "" || cfg.extraAliases != ""; haveCanonical = cfg.canonical != ""; - haveTransport = cfg.transport != ""; + haveTransport = cfg.transport != "" || (cfg.enableSlowDomains && cfg.slowDomains != [ ]); haveVirtual = cfg.virtual != ""; haveLocalRecipients = cfg.localRecipients != null; @@ -319,13 +319,20 @@ let aliasesFile = pkgs.writeText "postfix-aliases" aliases; canonicalFile = pkgs.writeText "postfix-canonical" cfg.canonical; virtualFile = pkgs.writeText "postfix-virtual" cfg.virtual; + transportFile = pkgs.writeText "postfix-transport" ( + lib.optionalString (cfg.enableSlowDomains && cfg.slowDomains != [ ]) ( + lib.concatMapStrings (domain: '' + ${domain} slow: + '') cfg.slowDomains + ) + + cfg.transport + ); localRecipientMapFile = pkgs.writeText "postfix-local-recipient-map" ( lib.concatMapStrings (x: x + " ACCEPT\n") cfg.localRecipients ); checkClientAccessFile = pkgs.writeText "postfix-check-client-access" cfg.dnsBlacklistOverrides; mainCfFile = pkgs.writeText "postfix-main.cf" mainCf; masterCfFile = pkgs.writeText "postfix-master.cf" masterCfContent; - transportFile = pkgs.writeText "postfix-transport" cfg.transport; headerChecksFile = pkgs.writeText "postfix-header-checks" headerChecks; in @@ -550,6 +557,32 @@ in ''; }; + enableSlowDomains = lib.mkEnableOption "slow domains feature for rate limiting specific domains"; + + slowDomains = lib.mkOption { + type = with lib.types; listOf str; + default = [ ]; + example = [ + "orange.fr" + "gmail.com" + ]; + description = "List of domains to be rate-limited using the slow transport."; + }; + + slowDomainsConfig = { + defaultDestinationRateDelay = lib.mkOption { + type = lib.types.str; + default = "5s"; + description = "Default rate delay for destinations."; + }; + + defaultDestinationConcurrencyLimit = lib.mkOption { + type = lib.types.int; + default = 3; + description = "Concurrency limit for slow destinations."; + }; + }; + aliasMapType = lib.mkOption { type = with lib.types; @@ -985,7 +1018,10 @@ in smtpd_tls_key_file = cfg.sslKey; smtpd_tls_security_level = lib.mkDefault "may"; - + } + // lib.optionalAttrs cfg.enableSlowDomains { + default_destination_rate_delay = cfg.slowDomainsConfig.defaultDestinationRateDelay; + default_destination_concurrency_limit = cfg.slowDomainsConfig.defaultDestinationConcurrencyLimit; }; services.postfix.masterConfig = @@ -1077,6 +1113,14 @@ in lib.concatLists (lib.mapAttrsToList mkKeyVal cfg.submissionOptions); }; } + // lib.optionalAttrs cfg.enableSlowDomains { + slow = { + command = "smtp"; + type = "unix"; + private = true; + maxproc = 2; + }; + } // lib.optionalAttrs cfg.enableSmtp { smtp_inet = { name = "smtp"; @@ -1128,7 +1172,7 @@ in (lib.mkIf haveCanonical { services.postfix.mapFiles.canonical = canonicalFile; }) - (lib.mkIf haveTransport { + (lib.mkIf (haveTransport || (cfg.enableSlowDomains && cfg.slowDomains != [ ])) { services.postfix.mapFiles.transport = transportFile; }) (lib.mkIf haveVirtual { From ca97108e88eefa1ee6c11e9965e1daedaaf27e0c Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 12:46:25 +0000 Subject: [PATCH 144/220] terraform-providers.routeros: 1.85.0 -> 1.85.1 --- .../networking/cluster/terraform-providers/providers.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/applications/networking/cluster/terraform-providers/providers.json b/pkgs/applications/networking/cluster/terraform-providers/providers.json index 1e832248c2f1..21c0bc0424d5 100644 --- a/pkgs/applications/networking/cluster/terraform-providers/providers.json +++ b/pkgs/applications/networking/cluster/terraform-providers/providers.json @@ -1111,13 +1111,13 @@ "vendorHash": "sha256-KezwDRmQQj0MnmsVlrX1OhNG6oMgw8fCxX5VFGdUynw=" }, "routeros": { - "hash": "sha256-/tTd8EzA1/Js7S3eGg3L9AY9dyoIA3lFSU6VlT6p4us=", + "hash": "sha256-vL1ijiP+WDe8nqtSudOuOTPohHe8JRU6wF4el5P/pWg=", "homepage": "https://registry.terraform.io/providers/terraform-routeros/routeros", "owner": "terraform-routeros", "repo": "terraform-provider-routeros", - "rev": "v1.85.0", + "rev": "v1.85.1", "spdx": "MPL-2.0", - "vendorHash": "sha256-DDmJ/mEOd94E7DqLKjT6Y4XkpVdXyKeAaE/Wj6M3vuw=" + "vendorHash": "sha256-ysmNy+xojcHPSs++HofOxBKg1AlUO7taYVIcsmW/2kM=" }, "rundeck": { "hash": "sha256-cf+0qXpgxIsc/JbB7+u3MpmWFwUmpsinp1uARRhuBw0=", From 6ce89676d08a75134af8196c12465aef811980bd Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 12:46:36 +0000 Subject: [PATCH 145/220] trickest-cli: 2.0.2 -> 2.1.0 --- pkgs/by-name/tr/trickest-cli/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/tr/trickest-cli/package.nix b/pkgs/by-name/tr/trickest-cli/package.nix index 293cd3470ec2..1692571a4208 100644 --- a/pkgs/by-name/tr/trickest-cli/package.nix +++ b/pkgs/by-name/tr/trickest-cli/package.nix @@ -6,13 +6,13 @@ buildGoModule rec { pname = "trickest-cli"; - version = "2.0.2"; + version = "2.1.0"; src = fetchFromGitHub { owner = "trickest"; repo = "trickest-cli"; tag = "v${version}"; - hash = "sha256-b0UiZEuuNqjY43xhwm01PtHTe2YMx6AHLJk336NB0no="; + hash = "sha256-EyUeYlWQWCGmCoQpuYXa9h93rXmTRmtSqIDrQRrTQgA="; }; vendorHash = "sha256-Ae0fNzYOAeCMrNFVhw4VvG/BkOMcguIMiBvLGt7wxEo="; From cce9ebb6e819a883d15e25dcbba25216933b1a0f Mon Sep 17 00:00:00 2001 From: liberodark Date: Tue, 27 May 2025 15:00:18 +0200 Subject: [PATCH 146/220] glpi-agent: 1.11 -> 1.14 --- pkgs/by-name/gl/glpi-agent/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/gl/glpi-agent/package.nix b/pkgs/by-name/gl/glpi-agent/package.nix index 2e9ffec1a146..d886bfdf9c38 100644 --- a/pkgs/by-name/gl/glpi-agent/package.nix +++ b/pkgs/by-name/gl/glpi-agent/package.nix @@ -15,13 +15,13 @@ perlPackages.buildPerlPackage rec { pname = "glpi-agent"; - version = "1.11"; + version = "1.14"; src = fetchFromGitHub { owner = "glpi-project"; repo = "glpi-agent"; tag = version; - hash = "sha256-WdQ+/ZnMCRqLZK64oJNoR9dtMPq+CghsA8NUwt3EpjA="; + hash = "sha256-6q+JcTFZlZjtMaQKUvCwE9Sjw9662ZXl78kha0tEFv4="; }; postPatch = '' From c7f86ba1322f90ea747d652fac9daf0a31d7342d Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 13:09:36 +0000 Subject: [PATCH 147/220] pgroll: 0.12.0 -> 0.13.0 --- pkgs/by-name/pg/pgroll/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/pg/pgroll/package.nix b/pkgs/by-name/pg/pgroll/package.nix index 18f4647e8484..a68235ea8f98 100644 --- a/pkgs/by-name/pg/pgroll/package.nix +++ b/pkgs/by-name/pg/pgroll/package.nix @@ -8,13 +8,13 @@ buildGoModule rec { pname = "pgroll"; - version = "0.12.0"; + version = "0.13.0"; src = fetchFromGitHub { owner = "xataio"; repo = "pgroll"; tag = "v${version}"; - hash = "sha256-hmFCXVlcqvOXbDkHcsWcdVoxZoMe+Gaji011kCqU0lI="; + hash = "sha256-mueZ2x7yB9jrmyCI2Doox4rLUDUhwqvUUcotDvcLqvo="; }; proxyVendor = true; From 6e75ca1faa4128a3538ca1639491dd0ce31b0567 Mon Sep 17 00:00:00 2001 From: Thomas Heijligen Date: Fri, 16 May 2025 22:04:44 +0200 Subject: [PATCH 148/220] gnatprove: fix building with gcc-14.3 - Fix extracting of gnat sources When the gnat.cc.version does not match the gcc version within the tarball, the postPatch phase could't extract the needed sources. Use wildcards to overcome this issue. - Changes for gcc-14.3 broke the build Backporting a patch from spark2014 master to fsf-14 https://github.com/AdaCore/spark2014/issues/58 --- ...egory-change-for-N_Formal_Package_De.patch | 33 +++++++++++++++++++ .../ada-modules/gnatprove/default.nix | 7 ++-- 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 pkgs/development/ada-modules/gnatprove/0003-Adjust-after-category-change-for-N_Formal_Package_De.patch diff --git a/pkgs/development/ada-modules/gnatprove/0003-Adjust-after-category-change-for-N_Formal_Package_De.patch b/pkgs/development/ada-modules/gnatprove/0003-Adjust-after-category-change-for-N_Formal_Package_De.patch new file mode 100644 index 000000000000..aea33821f045 --- /dev/null +++ b/pkgs/development/ada-modules/gnatprove/0003-Adjust-after-category-change-for-N_Formal_Package_De.patch @@ -0,0 +1,33 @@ +From 3c06fb993ae628b5069c1f3e23f11c53815e1cbe Mon Sep 17 00:00:00 2001 +From: Eric Botcazou +Date: Sat, 8 Mar 2025 00:09:57 +0100 +Subject: [PATCH] Adjust after category change for N_Formal_Package_Declaration + +Issue: eng/toolchain/gnat#1354 +--- + src/why/gnat2why-borrow_checker.adb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/why/gnat2why-borrow_checker.adb b/src/why/gnat2why-borrow_checker.adb +index a97f225b06..f3ab8be3e9 100644 +--- a/src/why/gnat2why-borrow_checker.adb ++++ b/src/why/gnat2why-borrow_checker.adb +@@ -1693,6 +1693,7 @@ procedure Check_Declaration (Decl : Node_Id) is + -- Ignored constructs for pointer checking + + when N_Formal_Object_Declaration ++ | N_Formal_Package_Declaration + | N_Formal_Type_Declaration + | N_Incomplete_Type_Declaration + | N_Private_Extension_Declaration +@@ -3326,7 +3327,6 @@ procedure Check_Node (N : Node_Id) is + | N_Empty + | N_Enumeration_Representation_Clause + | N_Exception_Renaming_Declaration +- | N_Formal_Package_Declaration + | N_Formal_Subprogram_Declaration + | N_Freeze_Entity + | N_Freeze_Generic_Entity +-- +2.48.1 + diff --git a/pkgs/development/ada-modules/gnatprove/default.nix b/pkgs/development/ada-modules/gnatprove/default.nix index c9377237be8c..c59c77c3a4cb 100644 --- a/pkgs/development/ada-modules/gnatprove/default.nix +++ b/pkgs/development/ada-modules/gnatprove/default.nix @@ -64,6 +64,9 @@ let # Suppress warnings on aarch64: https://github.com/AdaCore/spark2014/issues/54 ./0002-mute-aarch64-warnings.patch + + # Changes to the GNAT frontend: https://github.com/AdaCore/spark2014/issues/58 + ./0003-Adjust-after-category-change-for-N_Formal_Package_De.patch ]; commit_date = "2024-01-11"; }; @@ -121,8 +124,8 @@ stdenv.mkDerivation { postPatch = '' # gnat2why/gnat_src points to the GNAT sources - tar xf ${gnat.cc.src} gcc-${gnat.cc.version}/gcc/ada - mv gcc-${gnat.cc.version}/gcc/ada gnat2why/gnat_src + tar xf ${gnat.cc.src} --wildcards 'gcc-*/gcc/ada' + mv gcc-*/gcc/ada gnat2why/gnat_src ''; configurePhase = '' From 2dff575035f9706350b2733e247335e7f4eabc3d Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 13:11:36 +0000 Subject: [PATCH 149/220] python3Packages.murmurhash: 1.0.12 -> 1.0.13 --- pkgs/development/python-modules/murmurhash/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/murmurhash/default.nix b/pkgs/development/python-modules/murmurhash/default.nix index dc3aa8a32468..a508e3257ff8 100644 --- a/pkgs/development/python-modules/murmurhash/default.nix +++ b/pkgs/development/python-modules/murmurhash/default.nix @@ -8,14 +8,14 @@ buildPythonPackage rec { pname = "murmurhash"; - version = "1.0.12"; + version = "1.0.13"; format = "setuptools"; disabled = pythonOlder "3.6"; src = fetchPypi { inherit pname version; - hash = "sha256-Rnt+4xwfefRtAENqGVf8UqDlgBNp3S8w63ZV84BzW18="; + hash = "sha256-c3JG1B7gD/dLB7C9HwiIvjBNIDzmaOZCyGqmTt4w+Lc="; }; postPatch = '' From 48b6bcdac80d6cb1f3a8e34ca7afb8a43cb88a97 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:23:21 +0200 Subject: [PATCH 150/220] python313Packages.tencentcloud-sdk-python: 3.0.1387 -> 3.0.1388 Diff: https://github.com/TencentCloud/tencentcloud-sdk-python/compare/refs/tags/3.0.1387...refs/tags/3.0.1388 Changelog: https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3.0.1388/CHANGELOG.md --- .../python-modules/tencentcloud-sdk-python/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix b/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix index e4591f77f53d..7557d4b46eb2 100644 --- a/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix +++ b/pkgs/development/python-modules/tencentcloud-sdk-python/default.nix @@ -10,7 +10,7 @@ buildPythonPackage rec { pname = "tencentcloud-sdk-python"; - version = "3.0.1387"; + version = "3.0.1388"; pyproject = true; disabled = pythonOlder "3.9"; @@ -19,7 +19,7 @@ buildPythonPackage rec { owner = "TencentCloud"; repo = "tencentcloud-sdk-python"; tag = version; - hash = "sha256-kBy5fSfD7LFpFH8X6p/ZP4DCAkDlBh+Yt5P7TZbIkzc="; + hash = "sha256-gUioBNnOGZgoURnkO4Yh4uok80rTMZcZY1M2lCdHJJ0="; }; build-system = [ setuptools ]; From c8fc769d9e234d098e2de3676f5562fdab18130e Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:24:35 +0200 Subject: [PATCH 151/220] python312Packages.identify: 2.6.10 -> 2.6.12 Diff: https://github.com/pre-commit/identify/compare/refs/tags/v2.6.10...refs/tags/v2.6.12 --- pkgs/development/python-modules/identify/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/identify/default.nix b/pkgs/development/python-modules/identify/default.nix index 2de77a776357..f59607f9981e 100644 --- a/pkgs/development/python-modules/identify/default.nix +++ b/pkgs/development/python-modules/identify/default.nix @@ -11,7 +11,7 @@ buildPythonPackage rec { pname = "identify"; - version = "2.6.10"; + version = "2.6.12"; pyproject = true; disabled = pythonOlder "3.9"; @@ -20,7 +20,7 @@ buildPythonPackage rec { owner = "pre-commit"; repo = "identify"; tag = "v${version}"; - hash = "sha256-PI7EDA6CZs6nGcggCt008LaQtJLlkAHj0Pnx44plggY="; + hash = "sha256-zV9NRHFh/bfbg+pO0xX5aXunc1y4aGfKDugyCFLj/xA="; }; build-system = [ setuptools ]; From 89d004d46b80987a2462bbf690d5a42496cb9df0 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:25:12 +0200 Subject: [PATCH 152/220] python312Packages.http-sf: 1.0.3 -> 1.0.4 Diff: https://github.com/mnot/http-sf/compare/refs/tags/v1.0.3...refs/tags/v1.0.4 Changelog: https://github.com/mnot/http-sf/releases/tag/v1.0.4 --- pkgs/development/python-modules/http-sf/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/http-sf/default.nix b/pkgs/development/python-modules/http-sf/default.nix index cddce17259ec..fcbde1e7d21d 100644 --- a/pkgs/development/python-modules/http-sf/default.nix +++ b/pkgs/development/python-modules/http-sf/default.nix @@ -9,7 +9,7 @@ buildPythonPackage rec { pname = "http-sf"; - version = "1.0.3"; + version = "1.0.4"; pyproject = true; disabled = pythonOlder "3.9"; @@ -18,7 +18,7 @@ buildPythonPackage rec { owner = "mnot"; repo = "http-sf"; tag = "v${version}"; - hash = "sha256-P4dr0prd4FeMdHP4vu6SBWNjYLF/gPAkkL8xyBMtDxQ="; + hash = "sha256-swstFntoox9cafLZ5tj02uFoReIRf+9xGOxHvga+Q5o="; }; build-system = [ setuptools ]; From 3ade2e5c695a33f9e31782b0cf807227e4cbec97 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 13:27:40 +0000 Subject: [PATCH 153/220] amp-cli: 0.0.1747886591-g90f24f -> 0.0.1748347293-g7a57b5 --- pkgs/by-name/am/amp-cli/package-lock.json | 28 +++++++++++++++++++---- pkgs/by-name/am/amp-cli/package.nix | 6 ++--- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/pkgs/by-name/am/amp-cli/package-lock.json b/pkgs/by-name/am/amp-cli/package-lock.json index 4af359c48208..13a4e829e76e 100644 --- a/pkgs/by-name/am/amp-cli/package-lock.json +++ b/pkgs/by-name/am/amp-cli/package-lock.json @@ -5,7 +5,7 @@ "packages": { "": { "dependencies": { - "@sourcegraph/amp": "^0.0.1747886591-g90f24f" + "@sourcegraph/amp": "^0.0.1748347293-g7a57b5" } }, "node_modules/@colors/colors": { @@ -29,12 +29,14 @@ } }, "node_modules/@sourcegraph/amp": { - "version": "0.0.1747886591-g90f24f", - "resolved": "https://registry.npmjs.org/@sourcegraph/amp/-/amp-0.0.1747886591-g90f24f.tgz", - "integrity": "sha512-therl4OchUfqcVPhG3YNJKjcZUvXadnfowKzJeZtVNZAcJMWz2+u0gZoWE+V8FPgrMaX/crYcYwPmiBl5NM6lg==", + "version": "0.0.1748347293-g7a57b5", + "resolved": "https://registry.npmjs.org/@sourcegraph/amp/-/amp-0.0.1748347293-g7a57b5.tgz", + "integrity": "sha512-gftyQ7jHEo1AmyP8khbTYfkGWlAzfk6TyVw11WdO3bsjVT16yWnp0nWqW1W1X0BCbDiKdyRNasR1JadZH4+gGQ==", "dependencies": { "@types/runes": "^0.4.3", "@vscode/ripgrep": "1.15.11", + "commander": "^11.1.0", + "fuse.js": "^7.0.0", "runes": "^0.4.3", "string-width": "^6.1.0", "winston": "^3.17.0", @@ -152,6 +154,15 @@ "text-hex": "1.0.x" } }, + "node_modules/commander": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz", + "integrity": "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==", + "license": "MIT", + "engines": { + "node": ">=16" + } + }, "node_modules/debug": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", @@ -208,6 +219,15 @@ "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==", "license": "MIT" }, + "node_modules/fuse.js": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-7.1.0.tgz", + "integrity": "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=10" + } + }, "node_modules/https-proxy-agent": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", diff --git a/pkgs/by-name/am/amp-cli/package.nix b/pkgs/by-name/am/amp-cli/package.nix index b1a7664d338b..ba3834819581 100644 --- a/pkgs/by-name/am/amp-cli/package.nix +++ b/pkgs/by-name/am/amp-cli/package.nix @@ -8,11 +8,11 @@ buildNpmPackage rec { pname = "amp-cli"; - version = "0.0.1747886591-g90f24f"; + version = "0.0.1748347293-g7a57b5"; src = fetchzip { url = "https://registry.npmjs.org/@sourcegraph/amp/-/amp-${version}.tgz"; - hash = "sha256-knNzJYGXmLuerlw6j+lbIf45uv0tYtMOfsIQVfpJ0Kc="; + hash = "sha256-15R7RojQgF9B5voQfsB0aLNIBR2M7qqLNpMa950pAKM="; }; postPatch = '' @@ -44,7 +44,7 @@ buildNpmPackage rec { chmod +x bin/amp-wrapper.js ''; - npmDepsHash = "sha256-ir13FuVQtxEcryqmSh5BOdrCUWeXAUUX72BYZweUNBU="; + npmDepsHash = "sha256-4CSXRNCKgRunMZvFM2w6wrAcTb03iPjPprTm67fHZ9Q="; propagatedBuildInputs = [ ripgrep From 67326ea5f281e595d37e31ae410438c8d64b20bc Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:29:24 +0200 Subject: [PATCH 154/220] python312Packages.opower: 0.12.1 -> 0.12.2 Diff: https://github.com/tronikos/opower/compare/refs/tags/v0.12.1...refs/tags/v0.12.2 Changelog: https://github.com/tronikos/opower/releases/tag/v0.12.2 --- pkgs/development/python-modules/opower/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/opower/default.nix b/pkgs/development/python-modules/opower/default.nix index 00f656dfab46..84d5348a85b6 100644 --- a/pkgs/development/python-modules/opower/default.nix +++ b/pkgs/development/python-modules/opower/default.nix @@ -15,7 +15,7 @@ buildPythonPackage rec { pname = "opower"; - version = "0.12.1"; + version = "0.12.2"; pyproject = true; disabled = pythonOlder "3.9"; @@ -24,7 +24,7 @@ buildPythonPackage rec { owner = "tronikos"; repo = "opower"; tag = "v${version}"; - hash = "sha256-4Sbx7qvQFY/9yKXF4CiRqKh3CNTsHZrmS5BjJ80UaXM="; + hash = "sha256-9qDkhhx96+Zu2yr+EWJc0m2ntNlCo0luFOr6NxWtJSw="; }; build-system = [ setuptools ]; From 8eeae99dc5a537f14ad8d8385b63b27f8a9695bb Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:32:43 +0200 Subject: [PATCH 155/220] python312Packages.minikerberos: 0.4.4 -> 0.4.6 Changelog: https://github.com/skelsec/minikerberos/releases/tag/0.4.6 --- pkgs/development/python-modules/minikerberos/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/minikerberos/default.nix b/pkgs/development/python-modules/minikerberos/default.nix index 44d44f3b969d..03b560720c13 100644 --- a/pkgs/development/python-modules/minikerberos/default.nix +++ b/pkgs/development/python-modules/minikerberos/default.nix @@ -13,14 +13,14 @@ buildPythonPackage rec { pname = "minikerberos"; - version = "0.4.4"; + version = "0.4.6"; format = "setuptools"; disabled = pythonOlder "3.7"; src = fetchPypi { inherit pname version; - hash = "sha256-GweGHGxAOLZqOnVdzOtw0xuvsrKsaB1gf1xZ/WuFR7w="; + hash = "sha256-Vv04ngYZcEO32J7ucT6aXyu1RgINtqBk4ZIddA+VcpA="; }; propagatedBuildInputs = [ From e1fe20f28078f4f5298fa8b73a0618f54c8b4f28 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:37:25 +0200 Subject: [PATCH 156/220] checkov: 3.2.433 -> 3.2.435 Diff: https://github.com/bridgecrewio/checkov/compare/refs/tags/3.2.433...refs/tags/3.2.435 Changelog: https://github.com/bridgecrewio/checkov/releases/tag/3.2.435 --- pkgs/by-name/ch/checkov/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ch/checkov/package.nix b/pkgs/by-name/ch/checkov/package.nix index 997565c026e8..85c2a75ac2d1 100644 --- a/pkgs/by-name/ch/checkov/package.nix +++ b/pkgs/by-name/ch/checkov/package.nix @@ -25,14 +25,14 @@ with py.pkgs; python3.pkgs.buildPythonApplication rec { pname = "checkov"; - version = "3.2.433"; + version = "3.2.435"; pyproject = true; src = fetchFromGitHub { owner = "bridgecrewio"; repo = "checkov"; tag = version; - hash = "sha256-c9I3VDiKzk9Bybian3bX4OU3WUvHEHyQyGrO4//azqM="; + hash = "sha256-zV430pGFkfyf0oznXe69lTsMkGUrrA5TTyGobE4AK9I="; }; pythonRelaxDeps = [ From 8ef41657bb0fcb3bfef309aca870b2568e5fbeca Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 13:47:33 +0000 Subject: [PATCH 157/220] werf: 2.35.8 -> 2.36.3 --- pkgs/by-name/we/werf/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/we/werf/package.nix b/pkgs/by-name/we/werf/package.nix index cc144e717e26..4765870a926a 100644 --- a/pkgs/by-name/we/werf/package.nix +++ b/pkgs/by-name/we/werf/package.nix @@ -10,17 +10,17 @@ }: buildGoModule (finalAttrs: { pname = "werf"; - version = "2.35.8"; + version = "2.36.3"; src = fetchFromGitHub { owner = "werf"; repo = "werf"; tag = "v${finalAttrs.version}"; - hash = "sha256-akrhVRjoWrBdSrYWe4MZnCAMdT2KfXxAI4oBvrHBwC8="; + hash = "sha256-vTw2kUaCemRwZwzF6NGkqJ67A88AakbynQn55e4cnGw="; }; proxyVendor = true; - vendorHash = "sha256-9caBSJ/eMEdVQ55eebjibtsZJOMZk4OcP1D/NckWxCQ="; + vendorHash = "sha256-wo7hXzSUwT+gdtu82S88bPr2DUvBGAHIcRQ75j4mY2o="; subPackages = [ "cmd/werf" ]; From 9b4ee8516adc29c2dff478a0b223a92e8e84724f Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:51:25 +0200 Subject: [PATCH 158/220] python312Packages.aiolifx: 1.1.4 -> 1.1.5 Changelog: https://github.com/aiolifx/aiolifx/releases/tag/1.1.5 --- pkgs/development/python-modules/aiolifx/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/aiolifx/default.nix b/pkgs/development/python-modules/aiolifx/default.nix index 1f8efb348851..062fb683e1fa 100644 --- a/pkgs/development/python-modules/aiolifx/default.nix +++ b/pkgs/development/python-modules/aiolifx/default.nix @@ -13,14 +13,14 @@ buildPythonPackage rec { pname = "aiolifx"; - version = "1.1.4"; + version = "1.1.5"; pyproject = true; disabled = pythonOlder "3.7"; src = fetchPypi { inherit pname version; - hash = "sha256-7T7nHmnK1ZLoIgi6e8VVrq6NVAmL7tVi+F/6G3Ayh2Q="; + hash = "sha256-KRUe8qn/3jYKxgKvqPeA6oXZF3IYfRBBftWxjLhe/ow="; }; build-system = [ setuptools ]; From 0f506e0fea24ff7cb7b32b12f34667e7ddfa1861 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 15:51:56 +0200 Subject: [PATCH 159/220] python313Packages.aiolifx-themes: 0.6.10 -> 0.6.11 Diff: https://github.com/Djelibeybi/aiolifx-themes/compare/refs/tags/v0.6.10...refs/tags/v0.6.11 Changelog: https://github.com/Djelibeybi/aiolifx-themes/releases/tag/v0.6.11 --- pkgs/development/python-modules/aiolifx-themes/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/aiolifx-themes/default.nix b/pkgs/development/python-modules/aiolifx-themes/default.nix index 011dde9cef09..20cf615b03ee 100644 --- a/pkgs/development/python-modules/aiolifx-themes/default.nix +++ b/pkgs/development/python-modules/aiolifx-themes/default.nix @@ -13,7 +13,7 @@ buildPythonPackage rec { pname = "aiolifx-themes"; - version = "0.6.10"; + version = "0.6.11"; pyproject = true; disabled = pythonOlder "3.9"; @@ -22,7 +22,7 @@ buildPythonPackage rec { owner = "Djelibeybi"; repo = "aiolifx-themes"; tag = "v${version}"; - hash = "sha256-fn8of81RvNWyCgobF/QuI7lXvv/AdCSHXlbmGXPoMRA="; + hash = "sha256-H5fjmGfvC/d1qTzEHtmGDPlZ0aY1UaTYPOqUAxDmb3I="; }; build-system = [ poetry-core ]; From 42f65491df518cdad42965b4ec485312bf9f21e7 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 13:54:21 +0000 Subject: [PATCH 160/220] expr: 1.17.3 -> 1.17.4 --- pkgs/by-name/ex/expr/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ex/expr/package.nix b/pkgs/by-name/ex/expr/package.nix index ee5916854ad6..469cc2511e05 100644 --- a/pkgs/by-name/ex/expr/package.nix +++ b/pkgs/by-name/ex/expr/package.nix @@ -6,18 +6,18 @@ buildGoModule rec { pname = "expr"; - version = "1.17.3"; + version = "1.17.4"; src = fetchFromGitHub { owner = "expr-lang"; repo = "expr"; rev = "v${version}"; - hash = "sha256-oi5dMTuirAnUFOC8zBlu7YErp13DZPoSGNpueKXdNtE="; + hash = "sha256-Ss1rs4BiKFOSzfL6VXKZA2Z/LYJ9N+AYkgdVCeintOk="; }; sourceRoot = "${src.name}/repl"; - vendorHash = "sha256-tSerrcRS7Nl0rZQqGfUKgdHsGBXEAFFF+Cn7HqFyfqA="; + vendorHash = "sha256-mjqbO3qgX7ak8VRFHnz9UYNoOd+bbHBImDLvnaJhdqI="; ldflags = [ "-s" From 82d81bcf53996e8e10330e9fdb37b7abe952c960 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 13:54:59 +0000 Subject: [PATCH 161/220] falcoctl: 0.11.1 -> 0.11.2 --- pkgs/by-name/fa/falcoctl/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/fa/falcoctl/package.nix b/pkgs/by-name/fa/falcoctl/package.nix index 6f36d1ebf140..056c15dc889e 100644 --- a/pkgs/by-name/fa/falcoctl/package.nix +++ b/pkgs/by-name/fa/falcoctl/package.nix @@ -6,16 +6,16 @@ buildGoModule (finalAttrs: { pname = "falcoctl"; - version = "0.11.1"; + version = "0.11.2"; src = fetchFromGitHub { owner = "falcosecurity"; repo = "falcoctl"; tag = "v${finalAttrs.version}"; - hash = "sha256-3YsxshQh+LQBpKt7YG52rwOdWyjkfn+kTa6nsHpkA+A="; + hash = "sha256-cbrlFxRRHwrK1+mkvEktrOCbg5bhKG7GXvv+YJ6un7I="; }; - vendorHash = "sha256-uIs3e2E8ThW7n9SXX2lu63KFmsy/QrVGQ4NgY1J+Qr0="; + vendorHash = "sha256-L7VXGMWs2eRQUT37CCtQsiYZnsDi/a8QSwAw/f/mydc="; ldflags = [ "-s" From 010d46e615382388e5531ddd67220224ea6fb2f0 Mon Sep 17 00:00:00 2001 From: Matthias Beyer Date: Tue, 27 May 2025 09:42:10 +0200 Subject: [PATCH 162/220] watcher: Add myself as maintainer Signed-off-by: Matthias Beyer --- pkgs/by-name/wa/watcher/package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/wa/watcher/package.nix b/pkgs/by-name/wa/watcher/package.nix index 6676bf66e046..2c67ebb49208 100644 --- a/pkgs/by-name/wa/watcher/package.nix +++ b/pkgs/by-name/wa/watcher/package.nix @@ -25,7 +25,10 @@ stdenv.mkDerivation rec { homepage = "https://github.com/e-dant/watcher"; changelog = "https://github.com/e-dant/watcher/releases/tag/${src.tag}"; license = lib.licenses.mit; - maintainers = with lib.maintainers; [ gaelreyrol ]; + maintainers = with lib.maintainers; [ + gaelreyrol + matthiasbeyer + ]; mainProgram = "tw"; platforms = lib.platforms.all; }; From ca82b5839fe180898f30b9c4e5b92c1c1b38bed0 Mon Sep 17 00:00:00 2001 From: Matthias Beyer Date: Tue, 27 May 2025 09:43:18 +0200 Subject: [PATCH 163/220] snips-sh: Add myself as maintainer Signed-off-by: Matthias Beyer --- pkgs/by-name/sn/snips-sh/package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/sn/snips-sh/package.nix b/pkgs/by-name/sn/snips-sh/package.nix index 2c0650c0a0e0..857ec8a4c64b 100644 --- a/pkgs/by-name/sn/snips-sh/package.nix +++ b/pkgs/by-name/sn/snips-sh/package.nix @@ -27,7 +27,10 @@ buildGoModule rec { license = lib.licenses.mit; platforms = lib.platforms.linux; homepage = "https://snips.sh"; - maintainers = with lib.maintainers; [ jeremiahs ]; + maintainers = with lib.maintainers; [ + jeremiahs + matthiasbeyer + ]; mainProgram = "snips.sh"; }; } From cae5c734ab30a439d645f21ffa1fb8c520dac88d Mon Sep 17 00:00:00 2001 From: Matthias Beyer Date: Tue, 27 May 2025 09:45:31 +0200 Subject: [PATCH 164/220] nixpkgs-track: Add myself as maintainer Signed-off-by: Matthias Beyer --- pkgs/by-name/ni/nixpkgs-track/package.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/pkgs/by-name/ni/nixpkgs-track/package.nix b/pkgs/by-name/ni/nixpkgs-track/package.nix index 4994f776d6c6..8085126d79d1 100644 --- a/pkgs/by-name/ni/nixpkgs-track/package.nix +++ b/pkgs/by-name/ni/nixpkgs-track/package.nix @@ -39,6 +39,7 @@ rustPlatform.buildRustPackage (finalAttrs: { maintainers = with lib.maintainers; [ isabelroses uncenter + matthiasbeyer ]; }; }) From f26bd9d630661a511ad75d35752db80947263ee7 Mon Sep 17 00:00:00 2001 From: Matthias Beyer Date: Tue, 27 May 2025 11:34:42 +0200 Subject: [PATCH 165/220] uutils-coreutils: Add myself as maintainer Signed-off-by: Matthias Beyer --- pkgs/by-name/uu/uutils-coreutils/package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/uu/uutils-coreutils/package.nix b/pkgs/by-name/uu/uutils-coreutils/package.nix index 1a43ed4ae530..6c4929e771c4 100644 --- a/pkgs/by-name/uu/uutils-coreutils/package.nix +++ b/pkgs/by-name/uu/uutils-coreutils/package.nix @@ -70,7 +70,10 @@ stdenv.mkDerivation (finalAttrs: { ''; homepage = "https://github.com/uutils/coreutils"; changelog = "https://github.com/uutils/coreutils/releases/tag/${finalAttrs.version}"; - maintainers = with lib.maintainers; [ siraben ]; + maintainers = with lib.maintainers; [ + siraben + matthiasbeyer + ]; license = lib.licenses.mit; platforms = lib.platforms.unix; }; From c9f192da9242f66cd9314b90632682d0326afa93 Mon Sep 17 00:00:00 2001 From: Masum Reza <50095635+JohnRTitor@users.noreply.github.com> Date: Tue, 27 May 2025 19:45:13 +0530 Subject: [PATCH 166/220] nixos/amdgpu: add overdrive and ppfeaturemask option (#411155) - `programs.corectrl.gpuOverclock.enable` -> `hardware.amdgpu.overdrive.enable` - `programs.corectrl.gpuOverclock.ppfeaturemask` -> `hardware.amdgpu.overdrive.ppfeaturemask` - `programs.tuxclocker.enableAMD` -> `hardware.amdgpu.overdrive.enable` --- .../manual/release-notes/rl-2511.section.md | 3 ++ nixos/modules/hardware/corectrl.nix | 35 ++++++------------- nixos/modules/services/hardware/amdgpu.nix | 35 +++++++++++++++---- nixos/modules/services/misc/tuxclocker.nix | 16 ++++----- 4 files changed, 50 insertions(+), 39 deletions(-) diff --git a/nixos/doc/manual/release-notes/rl-2511.section.md b/nixos/doc/manual/release-notes/rl-2511.section.md index d86c9b80de3a..21994daff24e 100644 --- a/nixos/doc/manual/release-notes/rl-2511.section.md +++ b/nixos/doc/manual/release-notes/rl-2511.section.md @@ -27,3 +27,6 @@ - `services.clamsmtp` is unmaintained and was removed from Nixpkgs. + +- `amdgpu` kernel driver overdrive mode can now be enabled by setting [hardware.amdgpu.overdrive.enable](#opt-hardware.amdgpu.overdrive.enable) and customized through [hardware.amdgpu.overdrive.ppfeaturemask](#opt-hardware.amdgpu.overdrive.ppfeaturemask). + This allows for fine-grained control over the GPU's performance and maybe required by overclocking softwares like Corectrl and Lact. These new options replace old options such as {option}`programs.corectrl.gpuOverclock.enable` and {option}`programs.tuxclocker.enableAMD`. diff --git a/nixos/modules/hardware/corectrl.nix b/nixos/modules/hardware/corectrl.nix index 6e680ddc846e..bbe59d1b5b9d 100644 --- a/nixos/modules/hardware/corectrl.nix +++ b/nixos/modules/hardware/corectrl.nix @@ -8,13 +8,23 @@ let inherit (lib) mkEnableOption mkIf - mkOption mkPackageOption ; cfg = config.programs.corectrl; in { + imports = [ + (lib.mkRenamedOptionModule + [ "programs" "corectrl" "gpuOverclock" "enable" ] + [ "hardware" "amdgpu" "overdrive" "enable" ] + ) + (lib.mkRenamedOptionModule + [ "programs" "corectrl" "gpuOverclock" "ppfeaturemask" ] + [ "hardware" "amdgpu" "overdrive" "ppfeaturemask" ] + ) + ]; + options.programs.corectrl = { enable = mkEnableOption '' CoreCtrl, a tool to overclock amd graphics cards and processors. @@ -24,23 +34,6 @@ in package = mkPackageOption pkgs "corectrl" { extraDescription = "Useful for overriding the configuration options used for the package."; }; - - gpuOverclock = { - enable = mkEnableOption '' - GPU overclocking - ''; - ppfeaturemask = mkOption { - type = lib.types.str; - default = "0xfffd7fff"; - example = "0xffffffff"; - description = '' - Sets the `amdgpu.ppfeaturemask` kernel option. - In particular, it is used here to set the overdrive bit. - Default is `0xfffd7fff` as it is less likely to cause flicker issues. - Setting it to `0xffffffff` enables all features. - ''; - }; - }; }; config = mkIf cfg.enable { @@ -61,12 +54,6 @@ in } }); ''; - - # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/amd/include/amd_shared.h#n169 - # The overdrive bit - boot.kernelParams = mkIf cfg.gpuOverclock.enable [ - "amdgpu.ppfeaturemask=${cfg.gpuOverclock.ppfeaturemask}" - ]; }; meta.maintainers = with lib.maintainers; [ diff --git a/nixos/modules/services/hardware/amdgpu.nix b/nixos/modules/services/hardware/amdgpu.nix index 78f47d858105..4aff9cdd077d 100644 --- a/nixos/modules/services/hardware/amdgpu.nix +++ b/nixos/modules/services/hardware/amdgpu.nix @@ -16,21 +16,44 @@ in series cards. Note: this removes support for analog video outputs, which is only available in the `radeon` driver ''; + initrd.enable = lib.mkEnableOption '' loading `amdgpu` kernelModule in stage 1. Can fix lower resolution in boot screen during initramfs phase ''; + + overdrive = { + enable = lib.mkEnableOption ''`amdgpu` overdrive mode for overclocking''; + + ppfeaturemask = lib.mkOption { + type = lib.types.str; + default = "0xfffd7fff"; + example = "0xffffffff"; + description = '' + Sets the `amdgpu.ppfeaturemask` kernel option. It can be used to enable the overdrive bit. + Default is `0xfffd7fff` as it is less likely to cause flicker issues. Setting it to + `0xffffffff` enables all features, but also can be unstable. See + [the kernel documentation](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/amd/include/amd_shared.h#n169) + for more information. + ''; + }; + }; + opencl.enable = lib.mkEnableOption ''OpenCL support using ROCM runtime library''; # cfg.amdvlk option is defined in ./amdvlk.nix module }; config = { - boot.kernelParams = lib.optionals cfg.legacySupport.enable [ - "amdgpu.si_support=1" - "amdgpu.cik_support=1" - "radeon.si_support=0" - "radeon.cik_support=0" - ]; + boot.kernelParams = + lib.optionals cfg.legacySupport.enable [ + "amdgpu.si_support=1" + "amdgpu.cik_support=1" + "radeon.si_support=0" + "radeon.cik_support=0" + ] + ++ lib.optionals cfg.overdrive.enable [ + "amdgpu.ppfeaturemask=${cfg.overdrive.ppfeaturemask}" + ]; boot.initrd.kernelModules = lib.optionals cfg.initrd.enable [ "amdgpu" ]; diff --git a/nixos/modules/services/misc/tuxclocker.nix b/nixos/modules/services/misc/tuxclocker.nix index f316994314be..b5d1eea767db 100644 --- a/nixos/modules/services/misc/tuxclocker.nix +++ b/nixos/modules/services/misc/tuxclocker.nix @@ -8,16 +8,18 @@ let cfg = config.programs.tuxclocker; in { + imports = [ + (lib.mkRenamedOptionModule + [ "programs" "tuxclocker" "enableAMD" ] + [ "hardware" "amdgpu" "overdrive" "enable" ] + ) + ]; + options.programs.tuxclocker = { enable = lib.mkEnableOption '' TuxClocker, a hardware control and monitoring program ''; - enableAMD = lib.mkEnableOption '' - AMD GPU controls. - Sets the `amdgpu.ppfeaturemask` kernel parameter to 0xfffd7fff to enable all TuxClocker controls - ''; - enabledNVIDIADevices = lib.mkOption { type = lib.types.listOf lib.types.int; default = [ ]; @@ -72,9 +74,5 @@ in ); in lib.concatStrings (map configSection cfg.enabledNVIDIADevices); - - # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/amd/include/amd_shared.h#n207 - # Enable everything modifiable in TuxClocker - boot.kernelParams = lib.mkIf cfg.enableAMD [ "amdgpu.ppfeaturemask=0xfffd7fff" ]; }; } From 357d9ebfeeb2491d30779e77ebbca155253e51b7 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 16:18:23 +0200 Subject: [PATCH 167/220] python312Packages.cement: 3.0.12 -> 3.0.14 Diff: https://github.com/datafolklabs/cement/compare/refs/tags/3.0.12...refs/tags/3.0.14 Changelog: https://github.com/datafolklabs/cement/blob/3.0.14/CHANGELOG.md --- pkgs/development/python-modules/cement/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/cement/default.nix b/pkgs/development/python-modules/cement/default.nix index 85071fbb0301..ea70f7895ab0 100644 --- a/pkgs/development/python-modules/cement/default.nix +++ b/pkgs/development/python-modules/cement/default.nix @@ -21,7 +21,7 @@ buildPythonPackage rec { pname = "cement"; - version = "3.0.12"; + version = "3.0.14"; pyproject = true; disabled = pythonOlder "3.8"; @@ -30,7 +30,7 @@ buildPythonPackage rec { owner = "datafolklabs"; repo = "cement"; tag = version; - hash = "sha256-weBqmNEjeSh5YQfHK48VVFW3UbZQmV4MiIQ3UPQKTTI="; + hash = "sha256-hZ9kKQmMomjy5nnHKQ2RWB+6vIID8XMn3qutg0wCBq8="; }; build-system = [ pdm-backend ]; From 7fe55ece6100be2ba00e9c633181a938a980c8e9 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 14:25:42 +0000 Subject: [PATCH 168/220] bustools: 0.45.0 -> 0.45.1 --- pkgs/by-name/bu/bustools/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/bu/bustools/package.nix b/pkgs/by-name/bu/bustools/package.nix index 38ae60fa0ab9..31e66e6542f4 100644 --- a/pkgs/by-name/bu/bustools/package.nix +++ b/pkgs/by-name/bu/bustools/package.nix @@ -10,13 +10,13 @@ stdenv.mkDerivation rec { pname = "bustools"; - version = "0.45.0"; + version = "0.45.1"; src = fetchFromGitHub { owner = "BUStools"; repo = "bustools"; rev = "v${version}"; - sha256 = "sha256-Af2WUryx4HQuAlNJ1RWJK1Mj2M7X+4Ckap3rqEJ3vto="; + sha256 = "sha256-G+ZMoUmhINp18XKmXpdb5GT7YMsiK/XX2zrjt56CbLg="; }; nativeBuildInputs = [ cmake ]; From d09d8ce1954414fb11c21b82e18c1fa53f08f265 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 27 May 2025 16:22:37 +0200 Subject: [PATCH 169/220] nixosTests.login: Fix eval --- nixos/tests/all-tests.nix | 5 ++++- nixos/tests/login.nix | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix index 2ba9260afff2..dd1a099ac1f2 100644 --- a/nixos/tests/all-tests.nix +++ b/nixos/tests/all-tests.nix @@ -710,7 +710,10 @@ in ladybird = runTest ./ladybird.nix; languagetool = runTest ./languagetool.nix; lanraragi = runTest ./lanraragi.nix; - latestKernel.login = handleTest ./login.nix { latestKernel = true; }; + latestKernel.login = runTest { + imports = [ ./login.nix ]; + _module.args.latestKernel = true; + }; lasuite-docs = runTest ./web-apps/lasuite-docs.nix; lavalink = runTest ./lavalink.nix; leaps = runTest ./leaps.nix; diff --git a/nixos/tests/login.nix b/nixos/tests/login.nix index b5b710215ca2..5703528f69de 100644 --- a/nixos/tests/login.nix +++ b/nixos/tests/login.nix @@ -1,10 +1,12 @@ { + lib, pkgs, - latestKernel ? false, + latestKernel, ... }: { + _module.args.latestKernel = lib.mkDefault false; name = "login"; meta = with pkgs.lib.maintainers; { maintainers = [ ]; From 6041b78d8b4572d184157465fe3d615f4a27cfea Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 27 May 2025 16:22:51 +0200 Subject: [PATCH 170/220] nixosTests.containers-imperative: Fix eval --- nixos/tests/containers-imperative.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix index 746e72f54a32..630bcbdf64be 100644 --- a/nixos/tests/containers-imperative.nix +++ b/nixos/tests/containers-imperative.nix @@ -32,7 +32,7 @@ let emptyContainer = import ../lib/eval-config.nix { modules = lib.singleton { - nixpkgs = { inherit (config.nixpkgs) localSystem; }; + nixpkgs.hostPlatform = { inherit (pkgs.stdenv.hostPlatform) system; }; containers.foo.config = { }; }; From 9ba0040bb5ee69c7a18293750d37775c9caf8054 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 16:31:59 +0200 Subject: [PATCH 171/220] python312Packages.gcal-sync: 7.0.1 -> 7.1.0 Diff: https://github.com/allenporter/gcal_sync/compare/refs/tags/7.0.1...refs/tags/7.1.0 Changelog: https://github.com/allenporter/gcal_sync/releases/tag/7.1.0 --- pkgs/development/python-modules/gcal-sync/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/gcal-sync/default.nix b/pkgs/development/python-modules/gcal-sync/default.nix index b1378dcb6586..16860b78d8e6 100644 --- a/pkgs/development/python-modules/gcal-sync/default.nix +++ b/pkgs/development/python-modules/gcal-sync/default.nix @@ -15,7 +15,7 @@ buildPythonPackage rec { pname = "gcal-sync"; - version = "7.0.1"; + version = "7.1.0"; pyproject = true; disabled = pythonOlder "3.10"; @@ -24,7 +24,7 @@ buildPythonPackage rec { owner = "allenporter"; repo = "gcal_sync"; tag = version; - hash = "sha256-MeXmVQ1NIlxccWM1fvmM8up+oOGEWKwC8GRx9NDxdyQ="; + hash = "sha256-jdhPoZdkgMg9TBIV9j3dvaEnEOpOoa1OKBeR1YAWWKs="; }; build-system = [ setuptools ]; From e69360347bcb06ed3946d63210269af3ffc88be3 Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 16:39:35 +0200 Subject: [PATCH 172/220] python312Packages.influxdb-client: 1.48.0 -> 1.49.0 Diff: https://github.com/influxdata/influxdb-client-python/compare/refs/tags/v1.48.0...refs/tags/v1.49.0 Changelog: https://github.com/influxdata/influxdb-client-python/blob/v1.49.0/CHANGELOG.md --- pkgs/development/python-modules/influxdb-client/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/influxdb-client/default.nix b/pkgs/development/python-modules/influxdb-client/default.nix index f9054acb542d..e081e6e9d02b 100644 --- a/pkgs/development/python-modules/influxdb-client/default.nix +++ b/pkgs/development/python-modules/influxdb-client/default.nix @@ -17,7 +17,7 @@ buildPythonPackage rec { pname = "influxdb-client"; - version = "1.48.0"; + version = "1.49.0"; pyproject = true; disabled = pythonOlder "3.7"; @@ -26,7 +26,7 @@ buildPythonPackage rec { owner = "influxdata"; repo = "influxdb-client-python"; tag = "v${version}"; - hash = "sha256-MKTfRsqizZDXPMqJAypUDQBm+a2s6FaEG3TM30wucrI="; + hash = "sha256-lu3we/KXwP3oC9bfv6gzbwacOVLGSuPBf9giwmsHXgI="; }; build-system = [ setuptools ]; From fdf99a6c7d58ff08ed4769654959a2fcfcba1bed Mon Sep 17 00:00:00 2001 From: Fabian Affolter Date: Tue, 27 May 2025 16:49:05 +0200 Subject: [PATCH 173/220] python312Packages.mypy-boto3-builder: 8.10.1 -> 8.11.0 Changelog: https://github.com/youtype/mypy_boto3_builder/releases/tag/8.11.0 --- .../python-modules/mypy-boto3-builder/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/development/python-modules/mypy-boto3-builder/default.nix b/pkgs/development/python-modules/mypy-boto3-builder/default.nix index 7afc3c03d8ab..532c9a93b5de 100644 --- a/pkgs/development/python-modules/mypy-boto3-builder/default.nix +++ b/pkgs/development/python-modules/mypy-boto3-builder/default.nix @@ -22,7 +22,7 @@ buildPythonPackage rec { pname = "mypy-boto3-builder"; - version = "8.10.1"; + version = "8.11.0"; pyproject = true; disabled = pythonOlder "3.12"; @@ -31,12 +31,12 @@ buildPythonPackage rec { owner = "youtype"; repo = "mypy_boto3_builder"; tag = version; - hash = "sha256-2XWFmmTkT5LpGqzHK3h8KeqQXP1+nwNEPeGHt/gowRo="; + hash = "sha256-7NrN42DcM+NNTjRnOdDzPBTKFRex8Ph4bVjdVgJa4Po="; }; postPatch = '' substituteInPlace pyproject.toml \ - --replace-fail 'version = "8.10.0"' 'version = "${version}"' + --replace-fail 'version = "8.10.1"' 'version = "${version}"' ''; build-system = [ setuptools ]; From 646bebe3be8004e578842745136b8196f4f1fced Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Sat, 10 May 2025 00:06:32 +0000 Subject: [PATCH 174/220] cudaPackages: add cudaNamePrefix Signed-off-by: Connor Baker --- pkgs/top-level/cuda-packages.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkgs/top-level/cuda-packages.nix b/pkgs/top-level/cuda-packages.nix index 740efe03b346..70aaf15362f0 100644 --- a/pkgs/top-level/cuda-packages.nix +++ b/pkgs/top-level/cuda-packages.nix @@ -64,6 +64,9 @@ let nvccCompatibilities pkgs ; + + cudaNamePrefix = "cuda${cudaMajorMinorVersion}"; + cudaMajorVersion = versions.major cudaMajorMinorVersion; cudaOlder = strings.versionOlder cudaMajorMinorVersion; cudaAtLeast = strings.versionAtLeast cudaMajorMinorVersion; From a018d736978adf55e1b8f7bf79e736cd6d042573 Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Sat, 10 May 2025 15:01:52 +0000 Subject: [PATCH 175/220] cudaPackages.driver_assistant: mark as unsupported Signed-off-by: Connor Baker --- pkgs/development/cuda-modules/fixups/driver_assistant.nix | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 pkgs/development/cuda-modules/fixups/driver_assistant.nix diff --git a/pkgs/development/cuda-modules/fixups/driver_assistant.nix b/pkgs/development/cuda-modules/fixups/driver_assistant.nix new file mode 100644 index 000000000000..e9c50b2f4eaf --- /dev/null +++ b/pkgs/development/cuda-modules/fixups/driver_assistant.nix @@ -0,0 +1,5 @@ +_: prevAttrs: { + badPlatformsConditions = prevAttrs.badPlatformsConditions or { } // { + "Package is not supported; use drivers from linuxPackages" = true; + }; +} From 0ac3a73b6a997ea5e05067577a17c613ecddcd7d Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Fri, 9 May 2025 21:54:54 +0000 Subject: [PATCH 176/220] cudaLib: init Signed-off-by: Connor Baker --- .../cuda-modules/lib/data/cuda.nix | 357 +++++++++++++++++ .../cuda-modules/lib/data/default.nix | 32 ++ .../cuda-modules/lib/data/nvcc.nix | 268 +++++++++++++ .../cuda-modules/lib/data/redist.nix | 56 +++ pkgs/development/cuda-modules/lib/default.nix | 13 + .../cuda-modules/lib/utils/assertions.nix | 139 +++++++ .../cuda-modules/lib/utils/cuda.nix | 129 ++++++ .../cuda-modules/lib/utils/default.nix | 49 +++ .../cuda-modules/lib/utils/meta.nix | 71 ++++ .../cuda-modules/lib/utils/redist.nix | 196 +++++++++ .../cuda-modules/lib/utils/strings.nix | 379 ++++++++++++++++++ .../cuda-modules/lib/utils/versions.nix | 76 ++++ pkgs/top-level/all-packages.nix | 2 + 13 files changed, 1767 insertions(+) create mode 100644 pkgs/development/cuda-modules/lib/data/cuda.nix create mode 100644 pkgs/development/cuda-modules/lib/data/default.nix create mode 100644 pkgs/development/cuda-modules/lib/data/nvcc.nix create mode 100644 pkgs/development/cuda-modules/lib/data/redist.nix create mode 100644 pkgs/development/cuda-modules/lib/default.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/assertions.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/cuda.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/default.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/meta.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/redist.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/strings.nix create mode 100644 pkgs/development/cuda-modules/lib/utils/versions.nix diff --git a/pkgs/development/cuda-modules/lib/data/cuda.nix b/pkgs/development/cuda-modules/lib/data/cuda.nix new file mode 100644 index 000000000000..0b97645c420d --- /dev/null +++ b/pkgs/development/cuda-modules/lib/data/cuda.nix @@ -0,0 +1,357 @@ +{ cudaLib, lib }: +{ + /** + All CUDA capabilities, sorted by version. + + NOTE: Since the capabilities are sorted by version and architecture/family-specific features are + appended to the minor version component, the sorted list groups capabilities by baseline feature + set. + + # Type + + ``` + allSortedCudaCapabilities :: [CudaCapability] + ``` + + # Example + + ``` + allSortedCudaCapabilities = [ + "5.0" + "5.2" + "6.0" + "6.1" + "7.0" + "7.2" + "7.5" + "8.0" + "8.6" + "8.7" + "8.9" + "9.0" + "9.0a" + "10.0" + "10.0a" + "10.0f" + "10.1" + "10.1a" + "10.1f" + "10.3" + "10.3a" + "10.3f" + ]; + ``` + */ + allSortedCudaCapabilities = lib.sort lib.versionOlder ( + lib.attrNames cudaLib.data.cudaCapabilityToInfo + ); + + /** + Mapping of CUDA micro-architecture name to capabilities belonging to that micro-architecture. + + # Type + + ``` + cudaArchNameToCapabilities :: AttrSet NonEmptyStr (NonEmptyListOf CudaCapability) + ``` + */ + cudaArchNameToCapabilities = lib.groupBy ( + cudaCapability: cudaLib.data.cudaCapabilityToInfo.${cudaCapability}.archName + ) cudaLib.data.allSortedCudaCapabilities; + + /** + Attribute set of supported CUDA capability mapped to information about that capability. + + NOTE: For more on baseline, architecture-specific, and family-specific feature sets, see + https://developer.nvidia.com/blog/nvidia-blackwell-and-nvidia-cuda-12-9-introduce-family-specific-architecture-features. + + NOTE: For information on when support for a given architecture was added, see + https://docs.nvidia.com/cuda/parallel-thread-execution/#release-notes + + NOTE: For baseline feature sets, `dontDefaultAfterCudaMajorMinorVersion` is generally set to the CUDA release + immediately prior to TensorRT removing support for that architecture. + + Many thanks to Arnon Shimoni for maintaining a list of these architectures and capabilities. + Without your work, this would have been much more difficult. + https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + + # Type + + ``` + cudaCapabilityToInfo :: + AttrSet + CudaCapability + { archName :: String + , cudaCapability :: CudaCapability + , isJetson :: Bool + , isArchitectureSpecific :: Bool + , isFamilySpecific :: Bool + , minCudaMajorMinorVersion :: MajorMinorVersion + , maxCudaMajorMinorVersion :: MajorMinorVersion + , dontDefaultAfterCudaMajorMinorVersion :: Null | MajorMinorVersion + } + ``` + + `archName` + + : The name of the microarchitecture + + `cudaCapability` + + : The CUDA capability + + `isJetson` + + : Whether this capability is part of NVIDIA's line of Jetson embedded computers. This field is notable + because it tells us what architecture to build for (as Jetson devices are aarch64). + More on Jetson devices here: https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/ + NOTE: These architectures are only built upon request. + + `isArchitectureSpecific` + + : Whether this capability is an architecture-specific feature set. + NOTE: These architectures are only built upon request. + + `isFamilySpecific` + + : Whether this capability is a family-specific feature set. + NOTE: These architectures are only built upon request. + + `minCudaMajorMinorVersion` + + : The minimum (inclusive) CUDA version that supports this capability. + + `maxCudaMajorMinorVersion` + + : The maximum (exclusive) CUDA version that supports this capability. + `null` means there is no maximum. + + `dontDefaultAfterCudaMajorMinorVersion` + + : The CUDA version after which to exclude this capability from the list of default capabilities we build. + */ + cudaCapabilityToInfo = + lib.mapAttrs + ( + cudaCapability: + # Supplies default values. + { + archName, + isJetson ? false, + isArchitectureSpecific ? (lib.hasSuffix "a" cudaCapability), + isFamilySpecific ? (lib.hasSuffix "f" cudaCapability), + minCudaMajorMinorVersion, + maxCudaMajorMinorVersion ? null, + dontDefaultAfterCudaMajorMinorVersion ? null, + }: + { + inherit + archName + cudaCapability + isJetson + isArchitectureSpecific + isFamilySpecific + minCudaMajorMinorVersion + maxCudaMajorMinorVersion + dontDefaultAfterCudaMajorMinorVersion + ; + } + ) + { + # Tesla K40 + "3.5" = { + archName = "Kepler"; + minCudaMajorMinorVersion = "10.0"; + dontDefaultAfterCudaMajorMinorVersion = "11.0"; + maxCudaMajorMinorVersion = "11.8"; + }; + + # Tesla K80 + "3.7" = { + archName = "Kepler"; + minCudaMajorMinorVersion = "10.0"; + dontDefaultAfterCudaMajorMinorVersion = "11.0"; + maxCudaMajorMinorVersion = "11.8"; + }; + + # Tesla/Quadro M series + "5.0" = { + archName = "Maxwell"; + minCudaMajorMinorVersion = "10.0"; + dontDefaultAfterCudaMajorMinorVersion = "11.0"; + }; + + # Quadro M6000 , GeForce 900, GTX-970, GTX-980, GTX Titan X + "5.2" = { + archName = "Maxwell"; + minCudaMajorMinorVersion = "10.0"; + dontDefaultAfterCudaMajorMinorVersion = "11.0"; + }; + + # Quadro GP100, Tesla P100, DGX-1 (Generic Pascal) + "6.0" = { + archName = "Pascal"; + minCudaMajorMinorVersion = "10.0"; + # Removed from TensorRT 10.0, which corresponds to CUDA 12.4 release. + # https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-1001/support-matrix/index.html + dontDefaultAfterCudaMajorMinorVersion = "12.3"; + }; + + # GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030 (GP108), GT 1010 (GP108) Titan Xp, Tesla + # P40, Tesla P4, Discrete GPU on the NVIDIA Drive PX2 + "6.1" = { + archName = "Pascal"; + minCudaMajorMinorVersion = "10.0"; + # Removed from TensorRT 10.0, which corresponds to CUDA 12.4 release. + # https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-1001/support-matrix/index.html + dontDefaultAfterCudaMajorMinorVersion = "12.3"; + }; + + # DGX-1 with Volta, Tesla V100, GTX 1180 (GV104), Titan V, Quadro GV100 + "7.0" = { + archName = "Volta"; + minCudaMajorMinorVersion = "10.0"; + # Removed from TensorRT 10.5, which corresponds to CUDA 12.6 release. + # https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-1050/support-matrix/index.html + dontDefaultAfterCudaMajorMinorVersion = "12.5"; + }; + + # Jetson AGX Xavier, Drive AGX Pegasus, Xavier NX + "7.2" = { + archName = "Volta"; + minCudaMajorMinorVersion = "10.0"; + # Note: without `cuda_compat`, maxCudaMajorMinorVersion is 11.8 + # https://docs.nvidia.com/cuda/cuda-for-tegra-appnote/index.html#deployment-considerations-for-cuda-upgrade-package + maxCudaMajorMinorVersion = "12.2"; + isJetson = true; + }; + + # GTX/RTX Turing – GTX 1660 Ti, RTX 2060, RTX 2070, RTX 2080, Titan RTX, Quadro RTX 4000, + # Quadro RTX 5000, Quadro RTX 6000, Quadro RTX 8000, Quadro T1000/T2000, Tesla T4 + "7.5" = { + archName = "Turing"; + minCudaMajorMinorVersion = "10.0"; + }; + + # NVIDIA A100 (the name “Tesla” has been dropped – GA100), NVIDIA DGX-A100 + "8.0" = { + archName = "Ampere"; + minCudaMajorMinorVersion = "11.2"; + }; + + # Tesla GA10x cards, RTX Ampere – RTX 3080, GA102 – RTX 3090, RTX A2000, A3000, RTX A4000, + # A5000, A6000, NVIDIA A40, GA106 – RTX 3060, GA104 – RTX 3070, GA107 – RTX 3050, RTX A10, RTX + # A16, RTX A40, A2 Tensor Core GPU + "8.6" = { + archName = "Ampere"; + minCudaMajorMinorVersion = "11.2"; + }; + + # Jetson AGX Orin and Drive AGX Orin only + "8.7" = { + archName = "Ampere"; + minCudaMajorMinorVersion = "11.5"; + isJetson = true; + }; + + # NVIDIA GeForce RTX 4090, RTX 4080, RTX 6000, Tesla L40 + "8.9" = { + archName = "Ada"; + minCudaMajorMinorVersion = "11.8"; + }; + + # NVIDIA H100 (GH100) + "9.0" = { + archName = "Hopper"; + minCudaMajorMinorVersion = "11.8"; + }; + + "9.0a" = { + archName = "Hopper"; + minCudaMajorMinorVersion = "12.0"; + }; + + # NVIDIA B100 + "10.0" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.7"; + }; + + "10.0a" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.7"; + }; + + "10.0f" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + # NVIDIA Jetson Thor Blackwell + "10.1" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.7"; + isJetson = true; + }; + + "10.1a" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.7"; + isJetson = true; + }; + + "10.1f" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + isJetson = true; + }; + + # NVIDIA ??? + "10.3" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + "10.3a" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + "10.3f" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + # NVIDIA GeForce RTX 5090 (GB202) etc. + "12.0" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.8"; + }; + + "12.0a" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.8"; + }; + + "12.0f" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + # NVIDIA ??? + "12.1" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + "12.1a" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + + "12.1f" = { + archName = "Blackwell"; + minCudaMajorMinorVersion = "12.9"; + }; + }; +} diff --git a/pkgs/development/cuda-modules/lib/data/default.nix b/pkgs/development/cuda-modules/lib/data/default.nix new file mode 100644 index 000000000000..60924cbb4f9e --- /dev/null +++ b/pkgs/development/cuda-modules/lib/data/default.nix @@ -0,0 +1,32 @@ +{ cudaLib, lib }: +{ + # See ./cuda.nix for documentation. + inherit (import ./cuda.nix { inherit cudaLib lib; }) + allSortedCudaCapabilities + cudaArchNameToCapabilities + cudaCapabilityToInfo + ; + + # See ./nvcc.nix for documentation. + inherit (import ./nvcc.nix) + nvccCompatibilities + ; + + # See ./redist.nix for documentation. + inherit (import ./redist.nix) + redistNames + redistSystems + redistUrlPrefix + ; + + /** + The path to the CUDA packages root directory, for use with `callPackage` to create new package sets. + + # Type + + ``` + cudaPackagesPath :: Path + ``` + */ + cudaPackagesPath = ./..; +} diff --git a/pkgs/development/cuda-modules/lib/data/nvcc.nix b/pkgs/development/cuda-modules/lib/data/nvcc.nix new file mode 100644 index 000000000000..535f64b40991 --- /dev/null +++ b/pkgs/development/cuda-modules/lib/data/nvcc.nix @@ -0,0 +1,268 @@ +{ + /** + Mapping of CUDA versions to NVCC compatibilities + + Taken from + https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#host-compiler-support-policy + + NVCC performs a version check on the host compiler's major version and so newer minor versions + of the compilers listed below will be supported, but major versions falling outside the range + will not be supported. + + NOTE: These constraints don't apply to Jetson, which uses something else. + + NOTE: NVIDIA can and will add support for newer compilers even during patch releases. + E.g.: CUDA 12.2.1 maxxed out with support for Clang 15.0; 12.2.2 added support for Clang 16.0. + + NOTE: Because all platforms NVIDIA supports use GCC and Clang, we omit the architectures here. + + # Type + + ``` + nvccCompatibilities :: + AttrSet + String + { clang :: { maxMajorVersion :: String, minMajorVersion :: String } + , gcc :: { maxMajorVersion :: String, minMajorVersion :: String } + } + ``` + */ + nvccCompatibilities = { + # Our baseline + # https://docs.nvidia.com/cuda/archive/11.0/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features + "11.0" = { + clang = { + maxMajorVersion = "9"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "9"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 10 and GCC 10 + # https://docs.nvidia.com/cuda/archive/11.1.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features + "11.1" = { + clang = { + maxMajorVersion = "10"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "10"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 11 + # https://docs.nvidia.com/cuda/archive/11.2.2/cuda-installation-guide-linux/index.html#system-requirements + "11.2" = { + clang = { + maxMajorVersion = "11"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "10"; + minMajorVersion = "6"; + }; + }; + + # No changes from 11.2 to 11.3 + "11.3" = { + clang = { + maxMajorVersion = "11"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "10"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 12 and GCC 11 + # https://docs.nvidia.com/cuda/archive/11.4.4/cuda-toolkit-release-notes/index.html#cuda-general-new-features + # NOTE: There is a bug in the version of GLIBC that GCC 11 uses which causes it to fail to compile some CUDA + # code. As such, we skip it for this release, and do the bump in 11.6 (skipping 11.5). + # https://forums.developer.nvidia.com/t/cuda-11-5-samples-throw-multiple-error-attribute-malloc-does-not-take-arguments/192750/15 + "11.4" = { + clang = { + maxMajorVersion = "12"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "10"; + minMajorVersion = "6"; + }; + }; + + # No changes from 11.4 to 11.5 + "11.5" = { + clang = { + maxMajorVersion = "12"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "10"; + minMajorVersion = "6"; + }; + }; + + # No changes from 11.5 to 11.6 + # However, as mentioned above, we add GCC 11 this release. + "11.6" = { + clang = { + maxMajorVersion = "12"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "11"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 13 + # https://docs.nvidia.com/cuda/archive/11.7.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features + "11.7" = { + clang = { + maxMajorVersion = "13"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "11"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 14 + # https://docs.nvidia.com/cuda/archive/11.8.0/cuda-installation-guide-linux/index.html#system-requirements + "11.8" = { + clang = { + maxMajorVersion = "14"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "11"; + minMajorVersion = "6"; + }; + }; + + # Added support for GCC 12 + # https://docs.nvidia.com/cuda/archive/12.0.1/cuda-installation-guide-linux/index.html#system-requirements + "12.0" = { + clang = { + maxMajorVersion = "14"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "12"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 15 + # https://docs.nvidia.com/cuda/archive/12.1.1/cuda-toolkit-release-notes/index.html#cuda-compilers-new-features + "12.1" = { + clang = { + maxMajorVersion = "15"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "12"; + minMajorVersion = "6"; + }; + }; + + # Added support for Clang 16 + # https://docs.nvidia.com/cuda/archive/12.2.2/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.2" = { + clang = { + maxMajorVersion = "16"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "12"; + minMajorVersion = "6"; + }; + }; + + # No changes from 12.2 to 12.3 + # https://docs.nvidia.com/cuda/archive/12.3.2/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.3" = { + clang = { + maxMajorVersion = "16"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "12"; + minMajorVersion = "6"; + }; + }; + + # Maximum Clang version is 17 + # Minimum GCC version is still 6, but all versions prior to GCC 7.3 are deprecated. + # Maximum GCC version is 13.2 + # https://docs.nvidia.com/cuda/archive/12.4.1/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.4" = { + clang = { + maxMajorVersion = "17"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "13"; + minMajorVersion = "6"; + }; + }; + + # No changes from 12.4 to 12.5 + # https://docs.nvidia.com/cuda/archive/12.5.1/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.5" = { + clang = { + maxMajorVersion = "17"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "13"; + minMajorVersion = "6"; + }; + }; + + # Maximum Clang version is 18 + # https://docs.nvidia.com/cuda/archive/12.6.0/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.6" = { + clang = { + maxMajorVersion = "18"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "13"; + minMajorVersion = "6"; + }; + }; + + # Maximum Clang version is 19, maximum GCC version is 14 + # https://docs.nvidia.com/cuda/archive/12.8.1/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.8" = { + clang = { + maxMajorVersion = "19"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "14"; + minMajorVersion = "6"; + }; + }; + + # No changes from 12.8 to 12.9 + # https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#host-compiler-support-policy + "12.9" = { + clang = { + maxMajorVersion = "19"; + minMajorVersion = "7"; + }; + gcc = { + maxMajorVersion = "14"; + minMajorVersion = "6"; + }; + }; + }; +} diff --git a/pkgs/development/cuda-modules/lib/data/redist.nix b/pkgs/development/cuda-modules/lib/data/redist.nix new file mode 100644 index 000000000000..52e91f81c35c --- /dev/null +++ b/pkgs/development/cuda-modules/lib/data/redist.nix @@ -0,0 +1,56 @@ +{ + /** + A list of redistributable names to use in creation of the `redistName` option type. + + # Type + + ``` + redistNames :: [String] + ``` + */ + redistNames = [ + "cublasmp" + "cuda" + "cudnn" + "cudss" + "cuquantum" + "cusolvermp" + "cusparselt" + "cutensor" + "nppplus" + "nvcomp" + # "nvidia-driver", # NOTE: Some of the earlier manifests don't follow our scheme. + "nvjpeg2000" + "nvpl" + "nvtiff" + "tensorrt" # NOTE: not truly a redist; uses different naming convention + ]; + + /** + A list of redistributable systems to use in creation of the `redistSystem` option type. + + # Type + + ``` + redistSystems :: [String] + ``` + */ + redistSystems = [ + "linux-aarch64" + "linux-all" # Taken to mean all other linux systems + "linux-sbsa" + "linux-x86_64" + "source" # Source-agnostic platform + ]; + + /** + The prefix of the URL for redistributable files. + + # Type + + ``` + redistUrlPrefix :: String + ``` + */ + redistUrlPrefix = "https://developer.download.nvidia.com/compute"; +} diff --git a/pkgs/development/cuda-modules/lib/default.nix b/pkgs/development/cuda-modules/lib/default.nix new file mode 100644 index 000000000000..6918d9ca5053 --- /dev/null +++ b/pkgs/development/cuda-modules/lib/default.nix @@ -0,0 +1,13 @@ +let + lib = import ../../../../lib; +in +lib.fixedPoints.makeExtensible (final: { + data = import ./data { + inherit lib; + cudaLib = final; + }; + utils = import ./utils { + inherit lib; + cudaLib = final; + }; +}) diff --git a/pkgs/development/cuda-modules/lib/utils/assertions.nix b/pkgs/development/cuda-modules/lib/utils/assertions.nix new file mode 100644 index 000000000000..5fc940889b7f --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/assertions.nix @@ -0,0 +1,139 @@ +{ cudaLib, lib }: +{ + /** + Evaluate assertions and add error context to return value. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _evaluateAssertions + :: (assertions :: List { assertion :: Bool, message :: String }) + -> Bool + ``` + */ + _evaluateAssertions = + assertions: + let + failedAssertionsString = cudaLib.utils._mkFailedAssertionsString assertions; + in + if failedAssertionsString == "" then + true + else + lib.addErrorContext "with failed assertions:${failedAssertionsString}" false; + + /** + Function to generate a string of failed assertions. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _mkFailedAssertionsString + :: (assertions :: List { assertion :: Bool, message :: String }) + -> String + ``` + + # Inputs + + `assertions` + + : A list of assertions to evaluate + + # Examples + + :::{.example} + ## `cudaLib.utils._mkFailedAssertionsString` usage examples + + ```nix + _mkFailedAssertionsString [ + { assertion = false; message = "Assertion 1 failed"; } + { assertion = true; message = "Assertion 2 failed"; } + ] + => "\n- Assertion 1 failed" + ``` + + ```nix + _mkFailedAssertionsString [ + { assertion = false; message = "Assertion 1 failed"; } + { assertion = false; message = "Assertion 2 failed"; } + ] + => "\n- Assertion 1 failed\n- Assertion 2 failed" + ``` + ::: + */ + _mkFailedAssertionsString = lib.foldl' ( + failedAssertionsString: + { assertion, message }: + failedAssertionsString + lib.optionalString (!assertion) ("\n- " + message) + ) ""; + + /** + Utility function to generate assertions for missing packages. + + Used to mark a package as unsupported if any of its required packages are missing (null). + + Expects a set of attributes. + + Most commonly used in overrides files on a callPackage-provided attribute set of packages. + + NOTE: We typically use platfromAssertions instead of brokenAssertions because the presence of packages set to null + means evaluation will fail if package attributes are accessed without checking for null first. OfBorg evaluation + sets allowBroken to true, which means we can't rely on brokenAssertions to prevent evaluation of a package with + missing dependencies. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _mkMissingPackagesAssertions + :: (attrs :: AttrSet) + -> (assertions :: List { assertion :: Bool, message :: String }) + ``` + + # Inputs + + `attrs` + + : The attributes to check for null + + # Examples + + :::{.example} + ## `cudaLib.utils._mkMissingPackagesAssertions` usage examples + + ```nix + { + lib, + libcal ? null, + libcublas, + utils, + }: + let + inherit (lib.attrsets) recursiveUpdate; + inherit (cudaLib.utils) _mkMissingPackagesAssertions; + in + prevAttrs: { + passthru = prevAttrs.passthru or { } // { + platformAssertions = + prevAttrs.passthru.platformAssertions or [ ] + ++ _mkMissingPackagesAssertions { inherit libcal; }; + }; + } + ``` + ::: + */ + _mkMissingPackagesAssertions = lib.flip lib.pipe [ + # Take the attributes that are null. + (lib.filterAttrs (_: value: value == null)) + lib.attrNames + # Map them to assertions. + (lib.map (name: { + message = "${name} is available"; + assertion = false; + })) + ]; +} diff --git a/pkgs/development/cuda-modules/lib/utils/cuda.nix b/pkgs/development/cuda-modules/lib/utils/cuda.nix new file mode 100644 index 000000000000..e372fda0d1b5 --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/cuda.nix @@ -0,0 +1,129 @@ +{ lib }: +{ + /** + Returns whether a capability should be built by default for a particular CUDA version. + + Capabilities built by default are baseline, non-Jetson capabilities with relatively recent CUDA support. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _cudaCapabilityIsDefault + :: (cudaMajorMinorVersion :: Version) + -> (cudaCapabilityInfo :: CudaCapabilityInfo) + -> Bool + ``` + + # Inputs + + `cudaMajorMinorVersion` + + : The CUDA version to check + + `cudaCapabilityInfo` + + : The capability information to check + */ + _cudaCapabilityIsDefault = + cudaMajorMinorVersion: cudaCapabilityInfo: + let + recentCapability = + cudaCapabilityInfo.dontDefaultAfterCudaMajorMinorVersion == null + || lib.versionAtLeast cudaCapabilityInfo.dontDefaultAfterCudaMajorMinorVersion cudaMajorMinorVersion; + in + recentCapability + && !cudaCapabilityInfo.isJetson + && !cudaCapabilityInfo.isArchitectureSpecific + && !cudaCapabilityInfo.isFamilySpecific; + + /** + Returns whether a capability is supported for a particular CUDA version. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _cudaCapabilityIsSupported + :: (cudaMajorMinorVersion :: Version) + -> (cudaCapabilityInfo :: CudaCapabilityInfo) + -> Bool + ``` + + # Inputs + + `cudaMajorMinorVersion` + + : The CUDA version to check + + `cudaCapabilityInfo` + + : The capability information to check + */ + _cudaCapabilityIsSupported = + cudaMajorMinorVersion: cudaCapabilityInfo: + let + lowerBoundSatisfied = lib.versionAtLeast cudaMajorMinorVersion cudaCapabilityInfo.minCudaMajorMinorVersion; + upperBoundSatisfied = + cudaCapabilityInfo.maxCudaMajorMinorVersion == null + || lib.versionAtLeast cudaCapabilityInfo.maxCudaMajorMinorVersion cudaMajorMinorVersion; + in + lowerBoundSatisfied && upperBoundSatisfied; + + /** + Generates a CUDA variant name from a version. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _mkCudaVariant :: (version :: String) -> String + ``` + + # Inputs + + `version` + + : The version string + + # Examples + + :::{.example} + ## `cudaLib.utils._mkCudaVariant` usage examples + + ```nix + _mkCudaVariant "11.0" + => "cuda11" + ``` + ::: + */ + _mkCudaVariant = version: "cuda${lib.versions.major version}"; + + /** + A predicate which, given a package, returns true if the package has a free license or one of NVIDIA's licenses. + + This function is intended to be provided as `config.allowUnfreePredicate` when `import`-ing Nixpkgs. + + # Type + + ``` + allowUnfreeCudaPredicate :: (package :: Package) -> Bool + ``` + */ + allowUnfreeCudaPredicate = + package: + lib.all ( + license: + license.free + || lib.elem license.shortName [ + "CUDA EULA" + "cuDNN EULA" + "cuSPARSELt EULA" + "cuTENSOR EULA" + "NVidia OptiX EULA" + ] + ) (lib.toList package.meta.license); +} diff --git a/pkgs/development/cuda-modules/lib/utils/default.nix b/pkgs/development/cuda-modules/lib/utils/default.nix new file mode 100644 index 000000000000..ee9ace015fae --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/default.nix @@ -0,0 +1,49 @@ +{ cudaLib, lib }: +{ + # See ./assertions.nix for documentation. + inherit (import ./assertions.nix { inherit cudaLib lib; }) + _evaluateAssertions + _mkFailedAssertionsString + _mkMissingPackagesAssertions + ; + + # See ./cuda.nix for documentation. + inherit (import ./cuda.nix { inherit lib; }) + _cudaCapabilityIsDefault + _cudaCapabilityIsSupported + _mkCudaVariant + allowUnfreeCudaPredicate + ; + + # See ./meta.nix for documentation. + inherit (import ./meta.nix { inherit cudaLib lib; }) + _mkMetaBadPlatforms + _mkMetaBroken + ; + + # See ./redist.nix for documentation. + inherit (import ./redist.nix { inherit cudaLib lib; }) + _redistSystemIsSupported + getNixSystems + getRedistSystem + mkRedistUrl + ; + + # See ./strings.nix for documentation. + inherit (import ./strings.nix { inherit cudaLib lib; }) + dotsToUnderscores + dropDots + formatCapabilities + mkCmakeCudaArchitecturesString + mkGencodeFlag + mkRealArchitecture + mkVersionedName + mkVirtualArchitecture + ; + + # See ./versions.nix for documentation. + inherit (import ./versions.nix { inherit cudaLib lib; }) + majorMinorPatch + trimComponents + ; +} diff --git a/pkgs/development/cuda-modules/lib/utils/meta.nix b/pkgs/development/cuda-modules/lib/utils/meta.nix new file mode 100644 index 000000000000..327d4ae97d95 --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/meta.nix @@ -0,0 +1,71 @@ +{ cudaLib, lib }: +{ + /** + Returns a list of bad platforms for a given package if assertsions in `finalAttrs.passthru.platformAssertions` + fail, optionally logging evaluation warnings for each reason. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + NOTE: This function requires `finalAttrs.passthru.platformAssertions` to be a list of assertions and + `finalAttrs.finalPackage.name` and `finalAttrs.finalPackage.stdenv` to be available. + + # Type + + ``` + _mkMetaBadPlatforms :: (warn :: Bool) -> (finalAttrs :: AttrSet) -> List String + ``` + */ + _mkMetaBadPlatforms = + warn: finalAttrs: + let + failedAssertionsString = cudaLib.utils._mkFailedAssertionsString finalAttrs.passthru.platformAssertions; + hasFailedAssertions = failedAssertionsString != ""; + finalStdenv = finalAttrs.finalPackage.stdenv; + in + lib.warnIf (warn && hasFailedAssertions) + "Package ${finalAttrs.finalPackage.name} is unsupported on this platform due to the following failed assertions:${failedAssertionsString}" + ( + lib.optionals hasFailedAssertions ( + lib.unique [ + finalStdenv.buildPlatform.system + finalStdenv.hostPlatform.system + finalStdenv.targetPlatform.system + ] + ) + ); + + /** + Returns a boolean indicating whether the package is broken as a result of `finalAttrs.passthru.brokenAssertions`, + optionally logging evaluation warnings for each reason. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + NOTE: This function requires `finalAttrs.passthru.brokenAssertions` to be a list of assertions and + `finalAttrs.finalPackage.name` to be available. + + # Type + + ``` + _mkMetaBroken :: (warn :: Bool) -> (finalAttrs :: AttrSet) -> Bool + ``` + + # Inputs + + `warn` + + : A boolean indicating whether to log warnings + + `finalAttrs` + + : The final attributes of the package + */ + _mkMetaBroken = + warn: finalAttrs: + let + failedAssertionsString = cudaLib.utils._mkFailedAssertionsString finalAttrs.passthru.brokenAssertions; + hasFailedAssertions = failedAssertionsString != ""; + in + lib.warnIf (warn && hasFailedAssertions) + "Package ${finalAttrs.finalPackage.name} is marked as broken due to the following failed assertions:${failedAssertionsString}" + hasFailedAssertions; +} diff --git a/pkgs/development/cuda-modules/lib/utils/redist.nix b/pkgs/development/cuda-modules/lib/utils/redist.nix new file mode 100644 index 000000000000..b9cd2a3b1e0d --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/redist.nix @@ -0,0 +1,196 @@ +{ cudaLib, lib }: +{ + /** + Returns a boolean indicating whether the provided redist system is supported by any of the provided redist systems. + + NOTE: No guarantees are made about this function's stability. You may use it at your own risk. + + # Type + + ``` + _redistSystemIsSupported + :: (redistSystem :: RedistSystem) + -> (redistSystems :: List RedistSystem) + -> Bool + ``` + + # Inputs + + `redistSystem` + + : The redist system to check + + `redistSystems` + + : The list of redist systems to check against + + # Examples + + :::{.example} + ## `cudaLib.utils._redistSystemIsSupported` usage examples + + ```nix + _redistSystemIsSupported "linux-x86_64" [ "linux-x86_64" ] + => true + ``` + + ```nix + _redistSystemIsSupported "linux-x86_64" [ "linux-aarch64" ] + => false + ``` + + ```nix + _redistSystemIsSupported "linux-x86_64" [ "linux-aarch64" "linux-x86_64" ] + => true + ``` + + ```nix + _redistSystemIsSupported "linux-x86_64" [ "linux-aarch64" "linux-all" ] + => true + ``` + ::: + */ + _redistSystemIsSupported = + redistSystem: redistSystems: + lib.findFirst ( + redistSystem': + redistSystem' == redistSystem || redistSystem' == "linux-all" || redistSystem' == "source" + ) null redistSystems != null; + + /** + Maps a NVIDIA redistributable system to Nix systems. + + NOTE: This function returns a list of systems because the redistributable systems `"linux-all"` and `"source"` can + be built on multiple systems. + + NOTE: This function *will* be called by unsupported systems because `cudaPackages` is evaluated on all systems. As + such, we need to handle unsupported systems gracefully. + + # Type + + ``` + getNixSystems :: (redistSystem :: RedistSystem) -> [String] + ``` + + # Inputs + + `redistSystem` + + : The NVIDIA redistributable system + + # Examples + + :::{.example} + ## `cudaLib.utils.getNixSystems` usage examples + + ```nix + getNixSystems "linux-sbsa" + => [ "aarch64-linux" ] + ``` + + ```nix + getNixSystems "linux-aarch64" + => [ "aarch64-linux" ] + ``` + ::: + */ + getNixSystems = + redistSystem: + if redistSystem == "linux-x86_64" then + [ "x86_64-linux" ] + else if redistSystem == "linux-sbsa" || redistSystem == "linux-aarch64" then + [ "aarch64-linux" ] + else if redistSystem == "linux-all" || redistSystem == "source" then + [ + "aarch64-linux" + "x86_64-linux" + ] + else + [ ]; + + /** + Maps a Nix system to a NVIDIA redistributable system. + + NOTE: We swap out the default `linux-sbsa` redist (for server-grade ARM chips) with the `linux-aarch64` redist + (which is for Jetson devices) if we're building any Jetson devices. Since both are based on aarch64, we can only + have one or the other, otherwise there's an ambiguity as to which should be used. + + NOTE: This function *will* be called by unsupported systems because `cudaPackages` is evaluated on all systems. As + such, we need to handle unsupported systems gracefully. + + # Type + + ``` + getRedistSystem :: (hasJetsonCudaCapability :: Bool) -> (nixSystem :: String) -> String + ``` + + # Inputs + + `hasJetsonCudaCapability` + + : If configured for a Jetson device + + `nixSystem` + + : The Nix system + + # Examples + + :::{.example} + ## `cudaLib.utils.getRedistSystem` usage examples + + ```nix + getRedistSystem true "aarch64-linux" + => "linux-aarch64" + ``` + + ```nix + getRedistSystem false "aarch64-linux" + => "linux-sbsa" + ``` + ::: + */ + getRedistSystem = + hasJetsonCudaCapability: nixSystem: + if nixSystem == "x86_64-linux" then + "linux-x86_64" + else if nixSystem == "aarch64-linux" then + if hasJetsonCudaCapability then "linux-aarch64" else "linux-sbsa" + else + "unsupported"; + + /** + Function to generate a URL for something in the redistributable tree. + + # Type + + ``` + mkRedistUrl :: (redistName :: RedistName) -> (relativePath :: NonEmptyStr) -> RedistUrl + ``` + + # Inputs + + `redistName` + + : The name of the redistributable + + `relativePath` + + : The relative path to a file in the redistributable tree + */ + mkRedistUrl = + redistName: relativePath: + lib.concatStringsSep "/" ( + [ cudaLib.data.redistUrlPrefix ] + ++ ( + if redistName != "tensorrt" then + [ + redistName + "redist" + ] + else + [ "machine-learning" ] + ) + ++ [ relativePath ] + ); +} diff --git a/pkgs/development/cuda-modules/lib/utils/strings.nix b/pkgs/development/cuda-modules/lib/utils/strings.nix new file mode 100644 index 000000000000..47a71b395f4f --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/strings.nix @@ -0,0 +1,379 @@ +{ cudaLib, lib }: +{ + /** + Replaces dots in a string with underscores. + + # Type + + ``` + dotsToUnderscores :: (str :: String) -> String + ``` + + # Inputs + + `str` + + : The string for which dots shall be replaced by underscores + + # Examples + + :::{.example} + ## `cudaLib.utils.dotsToUnderscores` usage examples + + ```nix + dotsToUnderscores "1.2.3" + => "1_2_3" + ``` + ::: + */ + dotsToUnderscores = lib.replaceStrings [ "." ] [ "_" ]; + + /** + Removes the dots from a string. + + # Type + + ``` + dropDots :: (str :: String) -> String + ``` + + # Inputs + + `str` + + : The string to remove dots from + + # Examples + + :::{.example} + ## `cudaLib.utils.dropDots` usage examples + + ```nix + dropDots "1.2.3" + => "123" + ``` + ::: + */ + dropDots = lib.replaceStrings [ "." ] [ "" ]; + + /** + Produces an attribute set of useful data and functionality for packaging CUDA software within Nixpkgs. + + # Type + + ``` + formatCapabilities + :: { cudaCapabilityToInfo :: AttrSet CudaCapability CudaCapabilityInfo + , cudaCapabilities :: List CudaCapability + , cudaForwardCompat :: Bool + } + -> { cudaCapabilities :: List CudaCapability + , cudaForwardCompat :: Bool + , gencode :: List String + , realArches :: List String + , virtualArches :: List String + , archNames :: List String + , arches :: List String + , gencodeString :: String + , cmakeCudaArchitecturesString :: String + } + ``` + + # Inputs + + `cudaCapabilityToInfo` + + : A mapping of CUDA capabilities to their information + + `cudaCapabilities` + + : A list of CUDA capabilities to use + + `cudaForwardCompat` + + : A boolean indicating whether to include the forward compatibility gencode (+PTX) to support future GPU + generations + */ + formatCapabilities = + { + cudaCapabilityToInfo, + cudaCapabilities, + cudaForwardCompat, + }: + let + /** + The real architectures for the given CUDA capabilities. + + # Type + + ``` + realArches :: List String + ``` + */ + realArches = lib.map cudaLib.utils.mkRealArchitecture cudaCapabilities; + + /** + The virtual architectures for the given CUDA capabilities. + + These are typically used for forward compatibility, when trying to support an architecture newer than the CUDA + version allows. + + # Type + + ``` + virtualArches :: List String + ``` + */ + virtualArches = lib.map cudaLib.utils.mkVirtualArchitecture cudaCapabilities; + + /** + The gencode flags for the given CUDA capabilities. + + # Type + + ``` + gencode :: List String + ``` + */ + gencode = + let + base = lib.map (cudaLib.utils.mkGencodeFlag "sm") cudaCapabilities; + forward = cudaLib.utils.mkGencodeFlag "compute" (lib.last cudaCapabilities); + in + base ++ lib.optionals cudaForwardCompat [ forward ]; + in + { + inherit + cudaCapabilities + cudaForwardCompat + gencode + realArches + virtualArches + ; + + /** + The architecture names for the given CUDA capabilities. + + # Type + + ``` + archNames :: List String + ``` + */ + # E.g. [ "Ampere" "Turing" ] + archNames = lib.pipe cudaCapabilities [ + (lib.map (cudaCapability: cudaCapabilityToInfo.${cudaCapability}.archName)) + lib.unique + lib.naturalSort + ]; + + /** + The architectures for the given CUDA capabilities, including both real and virtual architectures. + + When `cudaForwardCompat` is enabled, the last architecture in the list is used as the forward compatibility architecture. + + # Type + + ``` + arches :: List String + ``` + */ + # E.g. [ "sm_75" "sm_86" "compute_86" ] + arches = realArches ++ lib.optionals cudaForwardCompat [ (lib.last virtualArches) ]; + + /** + The CMake-compatible CUDA architectures string for the given CUDA capabilities. + + # Type + + ``` + cmakeCudaArchitecturesString :: String + ``` + */ + cmakeCudaArchitecturesString = cudaLib.utils.mkCmakeCudaArchitecturesString cudaCapabilities; + + /** + The gencode string for the given CUDA capabilities. + + # Type + + ``` + gencodeString :: String + ``` + */ + gencodeString = lib.concatStringsSep " " gencode; + }; + + /** + Produces a CMake-compatible CUDA architecture string from a list of CUDA capabilities. + + # Type + + ``` + mkCmakeCudaArchitecturesString :: (cudaCapabilities :: List String) -> String + ``` + + # Inputs + + `cudaCapabilities` + + : The CUDA capabilities to convert + + # Examples + + :::{.example} + ## `cudaLib.utils.mkCmakeCudaArchitecturesString` usage examples + + ```nix + mkCmakeCudaArchitecturesString [ "8.9" "10.0a" ] + => "89;100a" + ``` + ::: + */ + mkCmakeCudaArchitecturesString = lib.concatMapStringsSep ";" cudaLib.utils.dropDots; + + /** + Produces a gencode flag from a CUDA capability. + + # Type + + ``` + mkGencodeFlag :: (archPrefix :: String) -> (cudaCapability :: String) -> String + ``` + + # Inputs + + `archPrefix` + + : The architecture prefix to use for the `code` field + + `cudaCapability` + + : The CUDA capability to convert + + # Examples + + :::{.example} + ## `cudaLib.utils.mkGencodeFlag` usage examples + + ```nix + mkGencodeFlag "sm" "8.9" + => "-gencode=arch=compute_89,code=sm_89" + ``` + + ```nix + mkGencodeFlag "compute" "10.0a" + => "-gencode=arch=compute_100a,code=compute_100a" + ``` + ::: + */ + mkGencodeFlag = + archPrefix: cudaCapability: + let + cap = cudaLib.utils.dropDots cudaCapability; + in + "-gencode=arch=compute_${cap},code=${archPrefix}_${cap}"; + + /** + Produces a real architecture string from a CUDA capability. + + # Type + + ``` + mkRealArchitecture :: (cudaCapability :: String) -> String + ``` + + # Inputs + + `cudaCapability` + + : The CUDA capability to convert + + # Examples + + :::{.example} + ## `cudaLib.utils.mkRealArchitecture` usage examples + + ```nix + mkRealArchitecture "8.9" + => "sm_89" + ``` + + ```nix + mkRealArchitecture "10.0a" + => "sm_100a" + ``` + ::: + */ + mkRealArchitecture = cudaCapability: "sm_" + cudaLib.utils.dropDots cudaCapability; + + /** + Create a versioned attribute name from a version by replacing dots with underscores. + + # Type + + ``` + mkVersionedName :: (name :: String) -> (version :: Version) -> String + ``` + + # Inputs + + `name` + + : The name to use + + `version` + + : The version to use + + # Examples + + :::{.example} + ## `cudaLib.utils.mkVersionedName` usage examples + + ```nix + mkVersionedName "hello" "1.2.3" + => "hello_1_2_3" + ``` + + ```nix + mkVersionedName "cudaPackages" "12.8" + => "cudaPackages_12_8" + ``` + ::: + */ + mkVersionedName = name: version: "${name}_${cudaLib.utils.dotsToUnderscores version}"; + + /** + Produces a virtual architecture string from a CUDA capability. + + # Type + + ``` + mkVirtualArchitecture :: (cudaCapability :: String) -> String + ``` + + # Inputs + + `cudaCapability` + + : The CUDA capability to convert + + # Examples + + :::{.example} + ## `cudaLib.utils.mkVirtualArchitecture` usage examples + + ```nix + mkVirtualArchitecture "8.9" + => "compute_89" + ``` + + ```nix + mkVirtualArchitecture "10.0a" + => "compute_100a" + ``` + ::: + */ + mkVirtualArchitecture = cudaCapability: "compute_" + cudaLib.utils.dropDots cudaCapability; +} diff --git a/pkgs/development/cuda-modules/lib/utils/versions.nix b/pkgs/development/cuda-modules/lib/utils/versions.nix new file mode 100644 index 000000000000..976ad65b2b65 --- /dev/null +++ b/pkgs/development/cuda-modules/lib/utils/versions.nix @@ -0,0 +1,76 @@ +{ cudaLib, lib }: +{ + /** + Extracts the major, minor, and patch version from a string. + + # Type + + ``` + majorMinorPatch :: (version :: String) -> String + ``` + + # Inputs + + `version` + + : The version string + + # Examples + + :::{.example} + ## `cudaLib.utils.majorMinorPatch` usage examples + + ```nix + majorMinorPatch "11.0.3.4" + => "11.0.3" + ``` + ::: + */ + majorMinorPatch = cudaLib.utils.trimComponents 3; + + /** + Get a version string with no more than than the specified number of components. + + # Type + + ``` + trimComponents :: (numComponents :: Integer) -> (version :: String) -> String + ``` + + # Inputs + + `numComponents` + : A positive integer corresponding to the maximum number of components to keep + + `version` + : A version string + + # Examples + + :::{.example} + ## `cudaLib.utils.trimComponents` usage examples + + ```nix + trimComponents 1 "1.2.3.4" + => "1" + ``` + + ```nix + trimComponents 3 "1.2.3.4" + => "1.2.3" + ``` + + ```nix + trimComponents 9 "1.2.3.4" + => "1.2.3.4" + ``` + ::: + */ + trimComponents = + n: v: + lib.pipe v [ + lib.splitVersion + (lib.take n) + (lib.concatStringsSep ".") + ]; +} diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix index 76949cbd03f0..59d64e99c884 100644 --- a/pkgs/top-level/all-packages.nix +++ b/pkgs/top-level/all-packages.nix @@ -2719,6 +2719,8 @@ with pkgs; cron = isc-cron; + cudaLib = import ../development/cuda-modules/lib; + cudaPackages_11_0 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.0"; }; cudaPackages_11_1 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.1"; }; cudaPackages_11_2 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.2"; }; From 629ae4e42c4764f1e56cd64746a3f19e28ff62a0 Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Fri, 9 May 2025 23:07:38 +0000 Subject: [PATCH 177/220] cudaPackages: rewrite backendStdenv Signed-off-by: Connor Baker --- .../cuda-modules/packages/backendStdenv.nix | 148 ++++++++++++++++-- 1 file changed, 135 insertions(+), 13 deletions(-) diff --git a/pkgs/development/cuda-modules/packages/backendStdenv.nix b/pkgs/development/cuda-modules/packages/backendStdenv.nix index b3595835fe38..ac9a8ebaf44f 100644 --- a/pkgs/development/cuda-modules/packages/backendStdenv.nix +++ b/pkgs/development/cuda-modules/packages/backendStdenv.nix @@ -1,4 +1,3 @@ -# Exposed as cudaPackages.backendStdenv. # This is what nvcc uses as a backend, # and it has to be an officially supported one (e.g. gcc11 for cuda11). # @@ -7,26 +6,149 @@ # E.g. for cudaPackages_11_8 we use gcc11 with gcc12's libstdc++ # Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context { + config, + cudaLib, cudaMajorMinorVersion, lib, - nvccCompatibilities, pkgs, stdenv, stdenvAdapters, }: - let - gccMajorVersion = nvccCompatibilities.${cudaMajorMinorVersion}.gccMaxMajorVersion; - cudaStdenv = stdenvAdapters.useLibsFrom stdenv pkgs."gcc${gccMajorVersion}Stdenv"; + inherit (builtins) toJSON; + inherit (cudaLib.data) allSortedCudaCapabilities cudaCapabilityToInfo nvccCompatibilities; + inherit (cudaLib.utils) + _cudaCapabilityIsDefault + _cudaCapabilityIsSupported + _evaluateAssertions + getRedistSystem + mkVersionedName + ; + inherit (lib) addErrorContext; + inherit (lib.customisation) extendDerivation; + inherit (lib.lists) filter intersectLists subtractLists; + + # NOTE: By virtue of processing a sorted list (allSortedCudaCapabilities), our groups will be sorted. + + architectureSpecificCudaCapabilities = filter ( + cudaCapability: cudaCapabilityToInfo.${cudaCapability}.isArchitectureSpecific + ) allSortedCudaCapabilities; + + familySpecificCudaCapabilities = filter ( + cudaCapability: cudaCapabilityToInfo.${cudaCapability}.isFamilySpecific + ) allSortedCudaCapabilities; + + jetsonCudaCapabilities = filter ( + cudaCapability: cudaCapabilityToInfo.${cudaCapability}.isJetson + ) allSortedCudaCapabilities; + passthruExtra = { - # cudaPackages.backendStdenv.nixpkgsCompatibleLibstdcxx has been removed, - # if you need it you're likely doing something wrong. There has been a - # warning here for a month or so. Now we can no longer return any - # meaningful value in its place and drop the attribute entirely. + nvccHostCCMatchesStdenvCC = backendStdenv.cc == stdenv.cc; + + # The Nix system of the host platform. + hostNixSystem = stdenv.hostPlatform.system; + + # The Nix system of the host platform for the CUDA redistributable. + hostRedistSystem = getRedistSystem passthruExtra.hasJetsonCudaCapability stdenv.hostPlatform.system; + + # Sets whether packages should be built with forward compatibility. + # TODO(@connorbaker): If the requested CUDA capabilities are not supported by the current CUDA version, + # should we throw an evaluation warning and build with forward compatibility? + cudaForwardCompat = config.cudaForwardCompat or true; + + # CUDA capabilities which are supported by the current CUDA version. + supportedCudaCapabilities = filter ( + cudaCapability: + _cudaCapabilityIsSupported cudaMajorMinorVersion cudaCapabilityToInfo.${cudaCapability} + ) allSortedCudaCapabilities; + + # Find the default set of capabilities for this CUDA version using the list of supported capabilities. + # Includes only baseline capabilities. + defaultCudaCapabilities = filter ( + cudaCapability: + _cudaCapabilityIsDefault cudaMajorMinorVersion cudaCapabilityToInfo.${cudaCapability} + ) passthruExtra.supportedCudaCapabilities; + + # The resolved requested or default CUDA capabilities. + cudaCapabilities = + if config.cudaCapabilities or [ ] != [ ] then + config.cudaCapabilities + else + passthruExtra.defaultCudaCapabilities; + + # Requested architecture-specific CUDA capabilities. + requestedArchitectureSpecificCudaCapabilities = intersectLists architectureSpecificCudaCapabilities passthruExtra.cudaCapabilities; + + # Whether the requested CUDA capabilities include architecture-specific CUDA capabilities. + hasArchitectureSpecificCudaCapability = + passthruExtra.requestedArchitectureSpecificCudaCapabilities != [ ]; + + # Requested family-specific CUDA capabilities. + requestedFamilySpecificCudaCapabilities = intersectLists familySpecificCudaCapabilities passthruExtra.cudaCapabilities; + + # Whether the requested CUDA capabilities include family-specific CUDA capabilities. + hasFamilySpecificCudaCapability = passthruExtra.requestedFamilySpecificCudaCapabilities != [ ]; + + # Requested Jetson CUDA capabilities. + requestedJetsonCudaCapabilities = intersectLists jetsonCudaCapabilities passthruExtra.cudaCapabilities; + + # Whether the requested CUDA capabilities include Jetson CUDA capabilities. + hasJetsonCudaCapability = passthruExtra.requestedJetsonCudaCapabilities != [ ]; }; - assertCondition = true; + + assertions = + let + # Jetson devices cannot be targeted by the same binaries which target non-Jetson devices. While + # NVIDIA provides both `linux-aarch64` and `linux-sbsa` packages, which both target `aarch64`, + # they are built with different settings and cannot be mixed. + jetsonMesssagePrefix = "Jetson CUDA capabilities (${toJSON passthruExtra.requestedJetsonCudaCapabilities})"; + + # Remove all known capabilities from the user's list to find unrecognized capabilities. + unrecognizedCudaCapabilities = subtractLists allSortedCudaCapabilities passthruExtra.cudaCapabilities; + + # Remove all supported capabilities from the user's list to find unsupported capabilities. + unsupportedCudaCapabilities = subtractLists passthruExtra.supportedCudaCapabilities passthruExtra.cudaCapabilities; + in + [ + { + message = "Unrecognized CUDA capabilities: ${toJSON unrecognizedCudaCapabilities}"; + assertion = unrecognizedCudaCapabilities == [ ]; + } + { + message = "Unsupported CUDA capabilities: ${toJSON unsupportedCudaCapabilities}"; + assertion = unsupportedCudaCapabilities == [ ]; + } + { + message = + "${jetsonMesssagePrefix} require hostPlatform (currently ${passthruExtra.hostNixSystem}) " + + "to be aarch64-linux"; + assertion = passthruExtra.hasJetsonCudaCapability -> passthruExtra.hostNixSystem == "aarch64-linux"; + } + { + message = + let + # Find the capabilities which are not Jetson capabilities. + requestedNonJetsonCudaCapabilities = subtractLists ( + passthruExtra.requestedJetsonCudaCapabilities + ++ passthruExtra.requestedArchitectureSpecificCudaCapabilities + ++ passthruExtra.requestedFamilySpecificCudaCapabilities + ) passthruExtra.cudaCapabilities; + in + "${jetsonMesssagePrefix} cannot be specified with non-Jetson capabilities " + + "(${toJSON requestedNonJetsonCudaCapabilities})"; + assertion = + passthruExtra.hasJetsonCudaCapability + -> passthruExtra.requestedJetsonCudaCapabilities == passthruExtra.cudaCapabilities; + } + ]; + + assertCondition = addErrorContext "while evaluating ${mkVersionedName "cudaPackages" cudaMajorMinorVersion}.backendStdenv" ( + _evaluateAssertions assertions + ); + + backendStdenv = + stdenvAdapters.useLibsFrom stdenv + pkgs."gcc${nvccCompatibilities.${cudaMajorMinorVersion}.gcc.maxMajorVersion}Stdenv"; in - # TODO: Consider testing whether we in fact use the newer libstdc++ - -lib.extendDerivation assertCondition passthruExtra cudaStdenv +extendDerivation assertCondition passthruExtra backendStdenv From c5dad2886a5623fc5e41054ab9ed9ff8e5f7ac91 Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Fri, 9 May 2025 22:16:23 +0000 Subject: [PATCH 178/220] cudaPackages: switch to cudaLib Signed-off-by: Connor Baker --- doc/languages-frameworks/cuda.section.md | 4 +- pkgs/development/cuda-modules/README.md | 3 - pkgs/development/cuda-modules/aliases.nix | 5 +- .../cuda-modules/cuda/extension.nix | 2 +- pkgs/development/cuda-modules/cudnn/shims.nix | 6 +- .../cuda-modules/cusparselt/extension.nix | 18 +- .../cuda-modules/cutensor/extension.nix | 14 +- .../cuda-modules/fixups/tensorrt.nix | 6 +- pkgs/development/cuda-modules/flags.nix | 399 ------------------ .../generic-builders/manifest.nix | 40 +- .../generic-builders/multiplex.nix | 21 +- pkgs/development/cuda-modules/gpus.nix | 244 ----------- .../cuda-modules/nvcc-compatibilities.nix | 125 ------ .../cuda-modules/tensorrt/shims.nix | 6 +- pkgs/development/cuda-modules/tests/flags.nix | 78 ++++ pkgs/top-level/cuda-packages.nix | 82 ++-- pkgs/top-level/release-cuda.nix | 17 +- 17 files changed, 186 insertions(+), 884 deletions(-) delete mode 100644 pkgs/development/cuda-modules/flags.nix delete mode 100644 pkgs/development/cuda-modules/gpus.nix delete mode 100644 pkgs/development/cuda-modules/nvcc-compatibilities.nix create mode 100644 pkgs/development/cuda-modules/tests/flags.nix diff --git a/doc/languages-frameworks/cuda.section.md b/doc/languages-frameworks/cuda.section.md index 091fc57a53a0..7e489acdbd64 100644 --- a/doc/languages-frameworks/cuda.section.md +++ b/doc/languages-frameworks/cuda.section.md @@ -115,8 +115,8 @@ All new projects should use the CUDA redistributables available in [`cudaPackage ### Updating supported compilers and GPUs {#updating-supported-compilers-and-gpus} -1. Update `nvcc-compatibilities.nix` in `pkgs/development/cuda-modules/` to include the newest release of NVCC, as well as any newly supported host compilers. -2. Update `gpus.nix` in `pkgs/development/cuda-modules/` to include any new GPUs supported by the new release of CUDA. +1. Update `nvccCompatibilities` in `pkgs/development/cuda-modules/lib/data/nvcc.nix` to include the newest release of NVCC, as well as any newly supported host compilers. +2. Update `cudaCapabilityToInfo` in `pkgs/development/cuda-modules/lib/data/cuda.nix` to include any new GPUs supported by the new release of CUDA. ### Updating the CUDA Toolkit runfile installer {#updating-the-cuda-toolkit} diff --git a/pkgs/development/cuda-modules/README.md b/pkgs/development/cuda-modules/README.md index d79f5bede0bb..3e6168e52be8 100644 --- a/pkgs/development/cuda-modules/README.md +++ b/pkgs/development/cuda-modules/README.md @@ -16,9 +16,6 @@ scope. These are typically required for the creation of the finalized - `backend-stdenv.nix`: Standard environment for CUDA packages. - `flags.nix`: Flags set, or consumed by, NVCC in order to build packages. -- `gpus.nix`: A list of supported NVIDIA GPUs. -- `nvcc-compatibilities.nix`: NVCC releases and the version range of GCC/Clang - they support. ## Top-level directories diff --git a/pkgs/development/cuda-modules/aliases.nix b/pkgs/development/cuda-modules/aliases.nix index 14975410d378..1c8e9c1d5fab 100644 --- a/pkgs/development/cuda-modules/aliases.nix +++ b/pkgs/development/cuda-modules/aliases.nix @@ -1,11 +1,12 @@ # Packages which have been deprecated or removed from cudaPackages -final: _: +{ lib }: let mkRenamed = oldName: { path, package }: - final.lib.warn "cudaPackages.${oldName} is deprecated, use ${path} instead" package; + lib.warn "cudaPackages.${oldName} is deprecated, use ${path} instead" package; in +final: _: builtins.mapAttrs mkRenamed { # A comment to prevent empty { } from collapsing into a single line diff --git a/pkgs/development/cuda-modules/cuda/extension.nix b/pkgs/development/cuda-modules/cuda/extension.nix index 09d8999270a2..c46c39dcea2d 100644 --- a/pkgs/development/cuda-modules/cuda/extension.nix +++ b/pkgs/development/cuda-modules/cuda/extension.nix @@ -43,7 +43,7 @@ let }; # Generally we prefer to do things involving getting attribute names with feature_manifest instead - # of redistrib_manifest because the feature manifest will have *only* the redist architecture + # of redistrib_manifest because the feature manifest will have *only* the redist system # names as the keys, whereas the redistrib manifest will also have things like version, name, license, # and license_path. featureManifest = evaluatedModules.config.cuda.manifests.feature; diff --git a/pkgs/development/cuda-modules/cudnn/shims.nix b/pkgs/development/cuda-modules/cudnn/shims.nix index 0a7f09bc9f0f..01918e88f07b 100644 --- a/pkgs/development/cuda-modules/cudnn/shims.nix +++ b/pkgs/development/cuda-modules/cudnn/shims.nix @@ -1,14 +1,14 @@ # Shims to mimic the shape of ../modules/generic/manifests/{feature,redistrib}/release.nix { package, - # redistArch :: String + # redistSystem :: String # String is "unsupported" if the given architecture is unsupported. - redistArch, + redistSystem, }: { featureRelease = { inherit (package) minCudaVersion maxCudaVersion; - ${redistArch}.outputs = { + ${redistSystem}.outputs = { lib = true; static = true; dev = true; diff --git a/pkgs/development/cuda-modules/cusparselt/extension.nix b/pkgs/development/cuda-modules/cusparselt/extension.nix index e72be0d7cb99..56308973c341 100644 --- a/pkgs/development/cuda-modules/cusparselt/extension.nix +++ b/pkgs/development/cuda-modules/cusparselt/extension.nix @@ -1,11 +1,9 @@ # Support matrix can be found at # https://docs.nvidia.com/deeplearning/cudnn/archives/cudnn-880/support-matrix/index.html { + cudaLib, lib, - stdenv, - cudaMajorMinorVersion, - flags, - mkVersionedPackageName, + redistSystem, }: let inherit (lib) @@ -15,8 +13,6 @@ let trivial ; - inherit (stdenv) hostPlatform; - redistName = "cusparselt"; pname = "libcusparse_lt"; @@ -54,17 +50,12 @@ let releaseGrabber ]) cusparseltVersions; - # A release is supported if it has a libPath that matches our CUDA version for our platform. - # LibPath are not constant across the same release -- one platform may support fewer - # CUDA versions than another. - # redistArch :: String - redistArch = flags.getRedistArch hostPlatform.system; # platformIsSupported :: Manifests -> Boolean platformIsSupported = { feature, redistrib, ... }: (attrsets.attrByPath [ pname - redistArch + redistSystem ] null feature) != null; # TODO(@connorbaker): With an auxiliary file keeping track of the CUDA versions each release supports, @@ -77,7 +68,8 @@ let # Compute versioned attribute name to be used in this package set # Patch version changes should not break the build, so we only use major and minor # computeName :: RedistribRelease -> String - computeName = { version, ... }: mkVersionedPackageName redistName version; + computeName = + { version, ... }: cudaLib.utils.mkVersionedName redistName (lib.versions.majorMinor version); in final: _: let diff --git a/pkgs/development/cuda-modules/cutensor/extension.nix b/pkgs/development/cuda-modules/cutensor/extension.nix index deb6cc7e8e51..5f59cfb15bb6 100644 --- a/pkgs/development/cuda-modules/cutensor/extension.nix +++ b/pkgs/development/cuda-modules/cutensor/extension.nix @@ -13,11 +13,10 @@ # - Instead of providing different releases for each version of CUDA, CuTensor has multiple subdirectories in `lib` # -- one for each version of CUDA. { + cudaLib, cudaMajorMinorVersion, - flags, lib, - mkVersionedPackageName, - stdenv, + redistSystem, }: let inherit (lib) @@ -28,8 +27,6 @@ let trivial ; - inherit (stdenv) hostPlatform; - redistName = "cutensor"; pname = "libcutensor"; @@ -92,14 +89,12 @@ let # A release is supported if it has a libPath that matches our CUDA version for our platform. # LibPath are not constant across the same release -- one platform may support fewer # CUDA versions than another. - # redistArch :: String - redistArch = flags.getRedistArch hostPlatform.system; # platformIsSupported :: Manifests -> Boolean platformIsSupported = { feature, redistrib, ... }: (attrsets.attrByPath [ pname - redistArch + redistSystem ] null feature) != null; # TODO(@connorbaker): With an auxiliary file keeping track of the CUDA versions each release supports, @@ -112,7 +107,8 @@ let # Compute versioned attribute name to be used in this package set # Patch version changes should not break the build, so we only use major and minor # computeName :: RedistribRelease -> String - computeName = { version, ... }: mkVersionedPackageName redistName version; + computeName = + { version, ... }: cudaLib.utils.mkVersionedName redistName (lib.versions.majorMinor version); in final: _: let diff --git a/pkgs/development/cuda-modules/fixups/tensorrt.nix b/pkgs/development/cuda-modules/fixups/tensorrt.nix index a836debf0604..c55844eac0d9 100644 --- a/pkgs/development/cuda-modules/fixups/tensorrt.nix +++ b/pkgs/development/cuda-modules/fixups/tensorrt.nix @@ -1,9 +1,9 @@ { + cudaLib, cudaOlder, cudaPackages, cudaMajorMinorVersion, lib, - mkVersionedPackageName, patchelf, requireFile, stdenv, @@ -103,7 +103,9 @@ finalAttrs: prevAttrs: { # unless it is not available, in which case the default cudnn derivation will be used. cudnn = let - desiredName = mkVersionedPackageName "cudnn" finalAttrs.passthru.featureRelease.cudnnVersion; + desiredName = cudaLib.utils.mkVersionedName "cudnn" ( + lib.versions.majorMinor finalAttrs.passthru.featureRelease.cudnnVersion + ); in if finalAttrs.passthru.featureRelease.cudnnVersion == null || (cudaPackages ? desiredName) then cudaPackages.cudnn diff --git a/pkgs/development/cuda-modules/flags.nix b/pkgs/development/cuda-modules/flags.nix deleted file mode 100644 index 980f33c7863f..000000000000 --- a/pkgs/development/cuda-modules/flags.nix +++ /dev/null @@ -1,399 +0,0 @@ -# Type aliases -# Gpu :: AttrSet -# - See the documentation in ./gpus.nix. -{ - config, - cudaCapabilities ? (config.cudaCapabilities or [ ]), - cudaForwardCompat ? (config.cudaForwardCompat or true), - lib, - cudaMajorMinorVersion, - stdenv, - # gpus :: List Gpu - gpus, -}: -let - inherit (lib) - asserts - attrsets - lists - strings - trivial - ; - - inherit (stdenv) hostPlatform; - - # Flags are determined based on your CUDA toolkit by default. You may benefit - # from improved performance, reduced file size, or greater hardware support by - # passing a configuration based on your specific GPU environment. - # - # cudaCapabilities :: List Capability - # List of hardware generations to build. - # E.g. [ "8.0" ] - # Currently, the last item is considered the optional forward-compatibility arch, - # but this may change in the future. - # - # cudaForwardCompat :: Bool - # Whether to include the forward compatibility gencode (+PTX) - # to support future GPU generations. - # E.g. true - # - # Please see the accompanying documentation or https://github.com/NixOS/nixpkgs/pull/205351 - - # isSupported :: Gpu -> Bool - isSupported = - gpu: - let - inherit (gpu) minCudaVersion maxCudaVersion; - lowerBoundSatisfied = strings.versionAtLeast cudaMajorMinorVersion minCudaVersion; - upperBoundSatisfied = - (maxCudaVersion == null) || !(strings.versionOlder maxCudaVersion cudaMajorMinorVersion); - in - lowerBoundSatisfied && upperBoundSatisfied; - - # NOTE: Jetson is never built by default. - # isDefault :: Gpu -> Bool - isDefault = - gpu: - let - inherit (gpu) dontDefaultAfter isJetson; - newGpu = dontDefaultAfter == null; - recentGpu = newGpu || strings.versionAtLeast dontDefaultAfter cudaMajorMinorVersion; - in - recentGpu && !isJetson; - - # supportedGpus :: List Gpu - # GPUs which are supported by the provided CUDA version. - supportedGpus = builtins.filter isSupported gpus; - - # defaultGpus :: List Gpu - # GPUs which are supported by the provided CUDA version and we want to build for by default. - defaultGpus = builtins.filter isDefault supportedGpus; - - # supportedCapabilities :: List Capability - supportedCapabilities = lists.map (gpu: gpu.computeCapability) supportedGpus; - - # defaultCapabilities :: List Capability - # The default capabilities to target, if not overridden by the user. - defaultCapabilities = lists.map (gpu: gpu.computeCapability) defaultGpus; - - # cudaArchNameToVersions :: AttrSet String (List String) - # Maps the name of a GPU architecture to different versions of that architecture. - # For example, "Ampere" maps to [ "8.0" "8.6" "8.7" ]. - cudaArchNameToVersions = lists.groupBy' (versions: gpu: versions ++ [ gpu.computeCapability ]) [ ] ( - gpu: gpu.archName - ) supportedGpus; - - # cudaComputeCapabilityToName :: AttrSet String String - # Maps the version of a GPU architecture to the name of that architecture. - # For example, "8.0" maps to "Ampere". - cudaComputeCapabilityToName = builtins.listToAttrs ( - lists.map (gpu: attrsets.nameValuePair gpu.computeCapability gpu.archName) supportedGpus - ); - - # cudaComputeCapabilityToIsJetson :: AttrSet String Boolean - cudaComputeCapabilityToIsJetson = builtins.listToAttrs ( - lists.map (attrs: attrsets.nameValuePair attrs.computeCapability attrs.isJetson) supportedGpus - ); - - # jetsonComputeCapabilities :: List String - jetsonComputeCapabilities = trivial.pipe cudaComputeCapabilityToIsJetson [ - (attrsets.filterAttrs (_: isJetson: isJetson)) - builtins.attrNames - ]; - - # Find the intersection with the user-specified list of cudaCapabilities. - # NOTE: Jetson devices are never built by default because they cannot be targeted along with - # non-Jetson devices and require an aarch64 host platform. As such, if they're present anywhere, - # they must be in the user-specified cudaCapabilities. - # NOTE: We don't need to worry about mixes of Jetson and non-Jetson devices here -- there's - # sanity-checking for all that in below. - jetsonTargets = lists.intersectLists jetsonComputeCapabilities cudaCapabilities; - - # dropDot :: String -> String - dropDot = ver: builtins.replaceStrings [ "." ] [ "" ] ver; - - # archMapper :: String -> List String -> List String - # Maps a feature across a list of architecture versions to produce a list of architectures. - # For example, "sm" and [ "8.0" "8.6" "8.7" ] produces [ "sm_80" "sm_86" "sm_87" ]. - archMapper = feat: lists.map (computeCapability: "${feat}_${dropDot computeCapability}"); - - # gencodeMapper :: String -> List String -> List String - # Maps a feature across a list of architecture versions to produce a list of gencode arguments. - # For example, "sm" and [ "8.0" "8.6" "8.7" ] produces [ "-gencode=arch=compute_80,code=sm_80" - # "-gencode=arch=compute_86,code=sm_86" "-gencode=arch=compute_87,code=sm_87" ]. - gencodeMapper = - feat: - lists.map ( - computeCapability: - "-gencode=arch=compute_${dropDot computeCapability},code=${feat}_${dropDot computeCapability}" - ); - - # Maps Nix system to NVIDIA redist arch. - # NOTE: We swap out the default `linux-sbsa` redist (for server-grade ARM chips) with the - # `linux-aarch64` redist (which is for Jetson devices) if we're building any Jetson devices. - # Since both are based on aarch64, we can only have one or the other, otherwise there's an - # ambiguity as to which should be used. - # NOTE: This function *will* be called by unsupported systems because `cudaPackages` is part of - # `all-packages.nix`, which is evaluated on all systems. As such, we need to handle unsupported - # systems gracefully. - # getRedistArch :: String -> String - getRedistArch = - nixSystem: - attrsets.attrByPath [ nixSystem ] "unsupported" { - aarch64-linux = if jetsonTargets != [ ] then "linux-aarch64" else "linux-sbsa"; - x86_64-linux = "linux-x86_64"; - ppc64le-linux = "linux-ppc64le"; - x86_64-windows = "windows-x86_64"; - }; - - # Maps NVIDIA redist arch to Nix system. - # NOTE: This function *will* be called by unsupported systems because `cudaPackages` is part of - # `all-packages.nix`, which is evaluated on all systems. As such, we need to handle unsupported - # systems gracefully. - # getNixSystem :: String -> String - getNixSystem = - redistArch: - attrsets.attrByPath [ redistArch ] "unsupported-${redistArch}" { - linux-sbsa = "aarch64-linux"; - linux-aarch64 = "aarch64-linux"; - linux-x86_64 = "x86_64-linux"; - linux-ppc64le = "ppc64le-linux"; - windows-x86_64 = "x86_64-windows"; - }; - - formatCapabilities = - { - cudaCapabilities, - enableForwardCompat ? true, - }: - rec { - inherit cudaCapabilities enableForwardCompat; - - # archNames :: List String - # E.g. [ "Turing" "Ampere" ] - # - # Unknown architectures are rendered as sm_XX gencode flags. - archNames = lists.unique ( - lists.map (cap: cudaComputeCapabilityToName.${cap} or "sm_${dropDot cap}") cudaCapabilities - ); - - # realArches :: List String - # The real architectures are physical architectures supported by the CUDA version. - # E.g. [ "sm_75" "sm_86" ] - realArches = archMapper "sm" cudaCapabilities; - - # virtualArches :: List String - # The virtual architectures are typically used for forward compatibility, when trying to support - # an architecture newer than the CUDA version allows. - # E.g. [ "compute_75" "compute_86" ] - virtualArches = archMapper "compute" cudaCapabilities; - - # arches :: List String - # By default, build for all supported architectures and forward compatibility via a virtual - # architecture for the newest supported architecture. - # E.g. [ "sm_75" "sm_86" "compute_86" ] - arches = realArches ++ lists.optional enableForwardCompat (lists.last virtualArches); - - # gencode :: List String - # A list of CUDA gencode arguments to pass to NVCC. - # E.g. [ "-gencode=arch=compute_75,code=sm_75" ... "-gencode=arch=compute_86,code=compute_86" ] - gencode = - let - base = gencodeMapper "sm" cudaCapabilities; - forward = gencodeMapper "compute" [ (lists.last cudaCapabilities) ]; - in - base ++ lib.optionals enableForwardCompat forward; - - # gencodeString :: String - # A space-separated string of CUDA gencode arguments to pass to NVCC. - # E.g. "-gencode=arch=compute_75,code=sm_75 ... -gencode=arch=compute_86,code=compute_86" - gencodeString = strings.concatStringsSep " " gencode; - - # cmakeCudaArchitecturesString :: String - # A semicolon-separated string of CUDA capabilities without dots, suitable for passing to CMake. - # E.g. "75;86" - cmakeCudaArchitecturesString = strings.concatMapStringsSep ";" dropDot cudaCapabilities; - - # Jetson devices cannot be targeted by the same binaries which target non-Jetson devices. While - # NVIDIA provides both `linux-aarch64` and `linux-sbsa` packages, which both target `aarch64`, - # they are built with different settings and cannot be mixed. - # isJetsonBuild :: Boolean - isJetsonBuild = - let - requestedJetsonDevices = lists.filter ( - cap: cudaComputeCapabilityToIsJetson.${cap} or false - ) cudaCapabilities; - requestedNonJetsonDevices = lists.filter ( - cap: !(builtins.elem cap requestedJetsonDevices) - ) cudaCapabilities; - jetsonBuildSufficientCondition = requestedJetsonDevices != [ ]; - jetsonBuildNecessaryCondition = requestedNonJetsonDevices == [ ] && hostPlatform.isAarch64; - in - trivial.throwIf (jetsonBuildSufficientCondition && !jetsonBuildNecessaryCondition) '' - Jetson devices cannot be targeted with non-Jetson devices. Additionally, they require hostPlatform to be aarch64. - You requested ${builtins.toJSON cudaCapabilities} for host platform ${hostPlatform.system}. - Requested Jetson devices: ${builtins.toJSON requestedJetsonDevices}. - Requested non-Jetson devices: ${builtins.toJSON requestedNonJetsonDevices}. - Exactly one of the following must be true: - - All CUDA capabilities belong to Jetson devices and hostPlatform is aarch64. - - No CUDA capabilities belong to Jetson devices. - See ${./gpus.nix} for a list of architectures supported by this version of Nixpkgs. - '' jetsonBuildSufficientCondition - && jetsonBuildNecessaryCondition; - }; -in -# When changing names or formats: pause, validate, and update the assert -assert - let - expected = { - cudaCapabilities = [ - "7.5" - "8.6" - ]; - enableForwardCompat = true; - - archNames = [ - "Turing" - "Ampere" - ]; - realArches = [ - "sm_75" - "sm_86" - ]; - virtualArches = [ - "compute_75" - "compute_86" - ]; - arches = [ - "sm_75" - "sm_86" - "compute_86" - ]; - - gencode = [ - "-gencode=arch=compute_75,code=sm_75" - "-gencode=arch=compute_86,code=sm_86" - "-gencode=arch=compute_86,code=compute_86" - ]; - gencodeString = "-gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_86,code=compute_86"; - - cmakeCudaArchitecturesString = "75;86"; - - isJetsonBuild = false; - }; - actual = formatCapabilities { - cudaCapabilities = [ - "7.5" - "8.6" - ]; - }; - actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value; - in - asserts.assertMsg - ((strings.versionAtLeast cudaMajorMinorVersion "11.2") -> (expected == actualWrapped)) - '' - This test should only fail when using a version of CUDA older than 11.2, the first to support - 8.6. - Expected: ${builtins.toJSON expected} - Actual: ${builtins.toJSON actualWrapped} - ''; -# Check mixed Jetson and non-Jetson devices -assert - let - expected = false; - actual = formatCapabilities { - cudaCapabilities = [ - "7.2" - "7.5" - ]; - }; - actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value; - in - asserts.assertMsg (expected == actualWrapped) '' - Jetson devices capabilities cannot be mixed with non-jetson devices. - Capability 7.5 is non-Jetson and should not be allowed with Jetson 7.2. - Expected: ${builtins.toJSON expected} - Actual: ${builtins.toJSON actualWrapped} - ''; -# Check Jetson-only -assert - let - expected = { - cudaCapabilities = [ - "6.2" - "7.2" - ]; - enableForwardCompat = true; - - archNames = [ - "Pascal" - "Volta" - ]; - realArches = [ - "sm_62" - "sm_72" - ]; - virtualArches = [ - "compute_62" - "compute_72" - ]; - arches = [ - "sm_62" - "sm_72" - "compute_72" - ]; - - gencode = [ - "-gencode=arch=compute_62,code=sm_62" - "-gencode=arch=compute_72,code=sm_72" - "-gencode=arch=compute_72,code=compute_72" - ]; - gencodeString = "-gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_72,code=sm_72 -gencode=arch=compute_72,code=compute_72"; - - cmakeCudaArchitecturesString = "62;72"; - - isJetsonBuild = true; - }; - actual = formatCapabilities { - cudaCapabilities = [ - "6.2" - "7.2" - ]; - }; - actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value; - in - asserts.assertMsg - # We can't do this test unless we're targeting aarch64 - (hostPlatform.isAarch64 -> (expected == actualWrapped)) - '' - Jetson devices can only be built with other Jetson devices. - Both 6.2 and 7.2 are Jetson devices. - Expected: ${builtins.toJSON expected} - Actual: ${builtins.toJSON actualWrapped} - ''; -{ - # formatCapabilities :: { cudaCapabilities: List Capability, enableForwardCompat: Boolean } -> { ... } - inherit formatCapabilities; - - # cudaArchNameToVersions :: String => String - inherit cudaArchNameToVersions; - - # cudaComputeCapabilityToName :: String => String - inherit cudaComputeCapabilityToName; - - # dropDot :: String -> String - inherit dropDot; - - inherit - defaultCapabilities - supportedCapabilities - jetsonComputeCapabilities - jetsonTargets - getNixSystem - getRedistArch - ; -} -// formatCapabilities { - cudaCapabilities = if cudaCapabilities == [ ] then defaultCapabilities else cudaCapabilities; - enableForwardCompat = cudaForwardCompat; -} diff --git a/pkgs/development/cuda-modules/generic-builders/manifest.nix b/pkgs/development/cuda-modules/generic-builders/manifest.nix index f36cc2d989ad..f4eecc29f4dc 100644 --- a/pkgs/development/cuda-modules/generic-builders/manifest.nix +++ b/pkgs/development/cuda-modules/generic-builders/manifest.nix @@ -5,6 +5,7 @@ autoPatchelfHook, backendStdenv, callPackage, + cudaLib, fetchurl, fixups, lib, @@ -47,15 +48,15 @@ let maybeFixup = fixups.${pname} or null; fixup = if maybeFixup != null then callPackage maybeFixup { } else { }; - # Get the redist architectures for which package provides distributables. + # Get the redist systems for which package provides distributables. # These are used by meta.platforms. - supportedRedistArchs = builtins.attrNames featureRelease; - # redistArch :: String - # The redistArch is the name of the architecture for which the redistributable is built. - # It is `"unsupported"` if the redistributable is not supported on the target platform. - redistArch = flags.getRedistArch hostPlatform.system; + supportedRedistSystems = builtins.attrNames featureRelease; + # redistSystem :: String + # The redistSystem is the name of the system for which the redistributable is built. + # It is `"unsupported"` if the redistributable is not supported on the target system. + redistSystem = cudaLib.utils.getRedistSystem backendStdenv.hasJetsonCudaCapability hostPlatform.system; - sourceMatchesHost = flags.getNixSystem redistArch == hostPlatform.system; + sourceMatchesHost = lib.elem hostPlatform.system (cudaLib.utils.getNixSystems redistSystem); in (backendStdenv.mkDerivation (finalAttrs: { # NOTE: Even though there's no actual buildPhase going on here, the derivations of the @@ -81,7 +82,7 @@ in hasOutput = output: attrsets.attrByPath [ - redistArch + redistSystem "outputs" output ] false featureRelease; @@ -99,12 +100,15 @@ in # NOTE: In the case the redistributable isn't supported on the target platform, # we will have `outputs = [ "out" ] ++ possibleOutputs`. This is of note because platforms which # aren't supported would otherwise have evaluation errors when trying to access outputs other than `out`. - # The alternative would be to have `outputs = [ "out" ]` when`redistArch = "unsupported"`, but that would + # The alternative would be to have `outputs = [ "out" ]` when`redistSystem = "unsupported"`, but that would # require adding guards throughout the entirety of the CUDA package set to ensure `cudaSupport` is true -- # recall that OfBorg will evaluate packages marked as broken and that `cudaPackages` will be evaluated with # `cudaSupport = false`! additionalOutputs = - if redistArch == "unsupported" then possibleOutputs else builtins.filter hasOutput possibleOutputs; + if redistSystem == "unsupported" then + possibleOutputs + else + builtins.filter hasOutput possibleOutputs; # The out output is special -- it's the default output and we always include it. outputs = [ "out" ] ++ additionalOutputs; in @@ -155,14 +159,14 @@ in }; # src :: Optional Derivation - # If redistArch doesn't exist in redistribRelease, return null. + # If redistSystem doesn't exist in redistribRelease, return null. src = trivial.mapNullable ( { relative_path, sha256, ... }: fetchurl { url = "https://developer.download.nvidia.com/compute/${redistName}/redist/${relative_path}"; inherit sha256; } - ) (redistribRelease.${redistArch} or null); + ) (redistribRelease.${redistSystem} or null); postPatch = # Pkg-config's setup hook expects configuration files in $out/share/pkgconfig @@ -321,11 +325,13 @@ in description = "${redistribRelease.name}. By downloading and using the packages you accept the terms and conditions of the ${finalAttrs.meta.license.shortName}"; sourceProvenance = [ sourceTypes.binaryNativeCode ]; broken = lists.any trivial.id (attrsets.attrValues finalAttrs.brokenConditions); - platforms = trivial.pipe supportedRedistArchs [ - # Map each redist arch to the equivalent nix system or null if there is no equivalent. - (builtins.map flags.getNixSystem) - # Filter out unsupported systems - (builtins.filter (nixSystem: !(strings.hasPrefix "unsupported-" nixSystem))) + platforms = trivial.pipe supportedRedistSystems [ + # Map each redist system to the equivalent nix systems. + (lib.concatMap cudaLib.utils.getNixSystems) + # Take all the unique values. + lib.unique + # Sort the list. + lib.naturalSort ]; badPlatforms = let diff --git a/pkgs/development/cuda-modules/generic-builders/multiplex.nix b/pkgs/development/cuda-modules/generic-builders/multiplex.nix index 816a375e620f..0a6d101eaf9d 100644 --- a/pkgs/development/cuda-modules/generic-builders/multiplex.nix +++ b/pkgs/development/cuda-modules/generic-builders/multiplex.nix @@ -1,11 +1,9 @@ { - # callPackage-provided arguments lib, + cudaLib, cudaMajorMinorVersion, - flags, + redistSystem, stdenv, - # Expected to be passed by the caller - mkVersionedPackageName, # Builder-specific arguments # Short package name (e.g., "cuda_cccl") # pname : String @@ -26,7 +24,7 @@ # The featureRelease is used to populate meta.platforms (by way of looking at the attribute names), determine the # outputs of the package, and provide additional package-specific constraints (e.g., min/max supported CUDA versions, # required versions of other packages, etc.). - # shimFn :: {package, redistArch} -> AttrSet + # shimFn :: {package, redistSystem} -> AttrSet shimsFn ? (throw "shimsFn must be provided"), }: let @@ -41,10 +39,6 @@ let # - Releases: ../modules/${pname}/releases/releases.nix # - Package: ../modules/${pname}/releases/package.nix - # redistArch :: String - # Value is `"unsupported"` if the platform is not supported. - redistArch = flags.getRedistArch stdenv.hostPlatform.system; - # Check whether a package supports our CUDA version. # satisfiesCudaVersion :: Package -> Bool satisfiesCudaVersion = @@ -53,7 +47,7 @@ let && lib.versionAtLeast package.maxCudaVersion cudaMajorMinorVersion; # FIXME: do this at the module system level - propagatePlatforms = lib.mapAttrs (redistArch: lib.map (p: { inherit redistArch; } // p)); + propagatePlatforms = lib.mapAttrs (redistSystem: lib.map (p: { inherit redistSystem; } // p)); # Releases for all platforms and all CUDA versions. allReleases = propagatePlatforms evaluatedModules.config.${pname}.releases; @@ -65,12 +59,13 @@ let allPackages = lib.concatLists (lib.attrValues allReleases'); packageOlder = p1: p2: lib.versionOlder p1.version p2.version; - packageSupportedPlatform = p: p.redistArch == redistArch; + packageSupportedPlatform = p: p.redistSystem == redistSystem; # Compute versioned attribute name to be used in this package set # Patch version changes should not break the build, so we only use major and minor # computeName :: Package -> String - computeName = package: mkVersionedPackageName pname package.version; + computeName = + { version, ... }: cudaLib.utils.mkVersionedName pname (lib.versions.majorMinor version); # The newest package for each major-minor version, with newest first. # newestPackages :: List Package @@ -113,7 +108,7 @@ let buildPackage = package: let - shims = final.callPackage shimsFn { inherit package redistArch; }; + shims = final.callPackage shimsFn { inherit package redistSystem; }; name = computeName package; drv = final.callPackage ./manifest.nix { inherit pname redistName; diff --git a/pkgs/development/cuda-modules/gpus.nix b/pkgs/development/cuda-modules/gpus.nix deleted file mode 100644 index a5466a2ef15f..000000000000 --- a/pkgs/development/cuda-modules/gpus.nix +++ /dev/null @@ -1,244 +0,0 @@ -# Type aliases -# -# Gpu = { -# archName: String -# - The name of the microarchitecture. -# computeCapability: String -# - The compute capability of the GPU. -# isJetson: Boolean -# - Whether a GPU is part of NVIDIA's line of Jetson embedded computers. This field is -# notable because it tells us what architecture to build for (as Jetson devices are -# aarch64). -# More on Jetson devices here: -# https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/ -# NOTE: These architectures are only built upon request. -# minCudaVersion: String -# - The minimum (inclusive) CUDA version that supports this GPU. -# dontDefaultAfter: null | String -# - The CUDA version after which to exclude this GPU from the list of default capabilities -# we build. null means we always include this GPU in the default capabilities if it is -# supported. -# maxCudaVersion: null | String -# - The maximum (exclusive) CUDA version that supports this GPU. null means there is no -# maximum. -# } -# -# Many thanks to Arnon Shimoni for maintaining a list of these architectures and capabilities. -# Without your work, this would have been much more difficult. -# https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ -# -# https://en.wikipedia.org/wiki/CUDA#GPUs_supported - -[ - { - # Tesla K40 - archName = "Kepler"; - computeCapability = "3.5"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = "11.0"; - maxCudaVersion = "11.8"; - } - { - # Tesla K80 - archName = "Kepler"; - computeCapability = "3.7"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = "11.0"; - maxCudaVersion = "11.8"; - } - { - # Tesla/Quadro M series - archName = "Maxwell"; - computeCapability = "5.0"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = "11.0"; - maxCudaVersion = null; - } - { - # Quadro M6000, GeForce 900, GTX-970, GTX-980, GTX Titan X - archName = "Maxwell"; - computeCapability = "5.2"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = "11.0"; - maxCudaVersion = null; - } - { - # Tegra (Jetson) TX1 / Tegra X1, Drive CX, Drive PX, Jetson Nano - archName = "Maxwell"; - computeCapability = "5.3"; - isJetson = true; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # Quadro GP100, Tesla P100, DGX-1 (Generic Pascal) - archName = "Pascal"; - computeCapability = "6.0"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030 (GP108), GT 1010 (GP108) Titan Xp, Tesla - # P40, Tesla P4, Discrete GPU on the NVIDIA Drive PX2 - archName = "Pascal"; - computeCapability = "6.1"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # Integrated GPU on the NVIDIA Drive PX2, Tegra (Jetson) TX2 - archName = "Pascal"; - computeCapability = "6.2"; - isJetson = true; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # DGX-1 with Volta, Tesla V100, GTX 1180 (GV104), Titan V, Quadro GV100 - archName = "Volta"; - computeCapability = "7.0"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # Jetson AGX Xavier, Drive AGX Pegasus, Xavier NX - archName = "Volta"; - computeCapability = "7.2"; - isJetson = true; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # GTX/RTX Turing – GTX 1660 Ti, RTX 2060, RTX 2070, RTX 2080, Titan RTX, Quadro RTX 4000, - # Quadro RTX 5000, Quadro RTX 6000, Quadro RTX 8000, Quadro T1000/T2000, Tesla T4 - archName = "Turing"; - computeCapability = "7.5"; - isJetson = false; - minCudaVersion = "10.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA A100 (the name “Tesla” has been dropped – GA100), NVIDIA DGX-A100 - archName = "Ampere"; - computeCapability = "8.0"; - isJetson = false; - minCudaVersion = "11.2"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # Tesla GA10x cards, RTX Ampere – RTX 3080, GA102 – RTX 3090, RTX A2000, A3000, RTX A4000, - # A5000, A6000, NVIDIA A40, GA106 – RTX 3060, GA104 – RTX 3070, GA107 – RTX 3050, RTX A10, RTX - # A16, RTX A40, A2 Tensor Core GPU - archName = "Ampere"; - computeCapability = "8.6"; - isJetson = false; - minCudaVersion = "11.2"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # Jetson AGX Orin and Drive AGX Orin only - archName = "Ampere"; - computeCapability = "8.7"; - isJetson = true; - minCudaVersion = "11.5"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA GeForce RTX 4090, RTX 4080, RTX 6000, Tesla L40 - archName = "Ada"; - computeCapability = "8.9"; - isJetson = false; - minCudaVersion = "11.8"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA H100 (GH100) - archName = "Hopper"; - computeCapability = "9.0"; - isJetson = false; - minCudaVersion = "11.8"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA H100 (GH100) (Thor) - archName = "Hopper"; - computeCapability = "9.0a"; - isJetson = false; - minCudaVersion = "12.0"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA B100 - archName = "Blackwell"; - computeCapability = "10.0"; - isJetson = false; - minCudaVersion = "12.8"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA B100 Accelerated - archName = "Blackwell"; - computeCapability = "10.0a"; - isJetson = false; - minCudaVersion = "12.8"; - dontDefaultAfter = "12.0"; # disable to reduce size of OnnxRuntime and Torch CUDA binaries - maxCudaVersion = null; - } - { - # NVIDIA Blackwell - archName = "Blackwell"; - computeCapability = "10.1"; - isJetson = false; - minCudaVersion = "12.8"; - dontDefaultAfter = "12.0"; # disable to reduce size of OnnxRuntime and Torch CUDA binaries - maxCudaVersion = null; - } - { - # NVIDIA Blackwell Accelerated - archName = "Blackwell"; - computeCapability = "10.1a"; - isJetson = false; - minCudaVersion = "12.8"; - dontDefaultAfter = "12.0"; # disable to reduce size of OnnxRuntime and Torch CUDA binaries - maxCudaVersion = null; - } - { - # NVIDIA GeForce RTX 5090 (GB202), RTX 5080 (GB203), RTX 5070 (GB205) - archName = "Blackwell"; - computeCapability = "12.0"; - isJetson = false; - minCudaVersion = "12.8"; - dontDefaultAfter = null; - maxCudaVersion = null; - } - { - # NVIDIA Blackwell Accelerated - archName = "Blackwell"; - computeCapability = "12.0a"; - isJetson = false; - minCudaVersion = "12.8"; - dontDefaultAfter = "12.0"; # disable to reduce size of OnnxRuntime and Torch CUDA binaries - maxCudaVersion = null; - } -] diff --git a/pkgs/development/cuda-modules/nvcc-compatibilities.nix b/pkgs/development/cuda-modules/nvcc-compatibilities.nix deleted file mode 100644 index e4be18ea85ab..000000000000 --- a/pkgs/development/cuda-modules/nvcc-compatibilities.nix +++ /dev/null @@ -1,125 +0,0 @@ -# Taken from -# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#host-compiler-support-policy -# -# NVCC performs a version check on the host compiler’s major version and so newer minor versions -# of the compilers listed below will be supported, but major versions falling outside the range -# will not be supported. -# -# NOTE: These constraints don't apply to Jetson, which uses something else. -# NOTE: NVIDIA can and will add support for newer compilers even during patch releases. -# E.g.: CUDA 12.2.1 maxxed out with support for Clang 15.0; 12.2.2 added support for Clang 16.0. -# NOTE: Because all platforms NVIDIA supports use GCC and Clang, we omit the architectures here. -# Type Aliases -# CudaVersion = String (two-part version number, e.g. "11.2") -# Platform = String (e.g. "x86_64-linux") -# CompilerCompatibilities = { -# clangMaxMajorVersion = String (e.g. "15") -# clangMinMajorVersion = String (e.g. "7") -# gccMaxMajorVersion = String (e.g. "11") -# gccMinMajorVersion = String (e.g. "6") -# } -let - # attrs :: AttrSet CudaVersion CompilerCompatibilities - attrs = { - # Our baseline - # https://docs.nvidia.com/cuda/archive/11.0/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features - "11.0" = { - clangMaxMajorVersion = "9"; - clangMinMajorVersion = "7"; - gccMaxMajorVersion = "9"; - gccMinMajorVersion = "6"; - }; - - # Added support for Clang 10 and GCC 10 - # https://docs.nvidia.com/cuda/archive/11.1.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features - "11.1" = attrs."11.0" // { - clangMaxMajorVersion = "10"; - gccMaxMajorVersion = "10"; - }; - - # Added support for Clang 11 - # https://docs.nvidia.com/cuda/archive/11.2.2/cuda-installation-guide-linux/index.html#system-requirements - "11.2" = attrs."11.1" // { - clangMaxMajorVersion = "11"; - }; - - # No changes from 11.2 to 11.3 - "11.3" = attrs."11.2"; - - # Added support for Clang 12 and GCC 11 - # https://docs.nvidia.com/cuda/archive/11.4.4/cuda-toolkit-release-notes/index.html#cuda-general-new-features - "11.4" = attrs."11.3" // { - clangMaxMajorVersion = "12"; - # NOTE: There is a bug in the version of GLIBC that GCC 11 uses which causes it to fail to compile some CUDA - # code. As such, we skip it for this release, and do the bump in 11.6 (skipping 11.5). - # https://forums.developer.nvidia.com/t/cuda-11-5-samples-throw-multiple-error-attribute-malloc-does-not-take-arguments/192750/15 - # gccMaxMajorVersion = "11"; - }; - - # No changes from 11.4 to 11.5 - "11.5" = attrs."11.4"; - - # No changes from 11.5 to 11.6 - # However, as mentioned above, we add GCC 11 this release. - "11.6" = attrs."11.5" // { - gccMaxMajorVersion = "11"; - }; - - # Added support for Clang 13 - # https://docs.nvidia.com/cuda/archive/11.7.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features - "11.7" = attrs."11.6" // { - clangMaxMajorVersion = "13"; - }; - - # Added support for Clang 14 - # https://docs.nvidia.com/cuda/archive/11.8.0/cuda-installation-guide-linux/index.html#system-requirements - "11.8" = attrs."11.7" // { - clangMaxMajorVersion = "14"; - }; - - # Added support for GCC 12 - # https://docs.nvidia.com/cuda/archive/12.0.1/cuda-installation-guide-linux/index.html#system-requirements - "12.0" = attrs."11.8" // { - gccMaxMajorVersion = "12"; - }; - - # Added support for Clang 15 - # https://docs.nvidia.com/cuda/archive/12.1.1/cuda-toolkit-release-notes/index.html#cuda-compilers-new-features - "12.1" = attrs."12.0" // { - clangMaxMajorVersion = "15"; - }; - - # Added support for Clang 16 - # https://docs.nvidia.com/cuda/archive/12.2.2/cuda-installation-guide-linux/index.html#host-compiler-support-policy - "12.2" = attrs."12.1" // { - clangMaxMajorVersion = "16"; - }; - - # No changes from 12.2 to 12.3 - "12.3" = attrs."12.2"; - - # Added support for Clang 17 and GCC 13 - # https://docs.nvidia.com/cuda/archive/12.4.0/cuda-installation-guide-linux/index.html#host-compiler-support-policy - "12.4" = attrs."12.3" // { - clangMaxMajorVersion = "17"; - gccMaxMajorVersion = "13"; - }; - - # No changes from 12.4 to 12.5 - "12.5" = attrs."12.4"; - - # Added support for Clang 18 - # https://docs.nvidia.com/cuda/archive/12.6.0/cuda-installation-guide-linux/index.html#host-compiler-support-policy - "12.6" = attrs."12.4" // { - clangMaxMajorVersion = "18"; - }; - - # Added support for Clang 19 and GCC 14 - # https://docs.nvidia.com/cuda/archive/12.8.0/cuda-installation-guide-linux/index.html#host-compiler-support-policy - "12.8" = attrs."12.6" // { - clangMaxMajorVersion = "19"; - gccMaxMajorVersion = "14"; - }; - }; -in -attrs diff --git a/pkgs/development/cuda-modules/tensorrt/shims.nix b/pkgs/development/cuda-modules/tensorrt/shims.nix index d347ef7e294c..b452a515404c 100644 --- a/pkgs/development/cuda-modules/tensorrt/shims.nix +++ b/pkgs/development/cuda-modules/tensorrt/shims.nix @@ -1,14 +1,14 @@ # Shims to mimic the shape of ../modules/generic/manifests/{feature,redistrib}/release.nix { package, - # redistArch :: String + # redistSystem :: String # String is `"unsupported"` if the given architecture is unsupported. - redistArch, + redistSystem, }: { featureRelease = { inherit (package) cudnnVersion minCudaVersion maxCudaVersion; - ${redistArch}.outputs = { + ${redistSystem}.outputs = { bin = true; lib = true; static = true; diff --git a/pkgs/development/cuda-modules/tests/flags.nix b/pkgs/development/cuda-modules/tests/flags.nix new file mode 100644 index 000000000000..d9ae72231c6c --- /dev/null +++ b/pkgs/development/cuda-modules/tests/flags.nix @@ -0,0 +1,78 @@ +{ + cudaLib, + cudaNamePrefix, + lib, + runCommand, +}: +let + inherit (builtins) deepSeq toJSON tryEval; + inherit (cudaLib.data) cudaCapabilityToInfo; + inherit (cudaLib.utils) formatCapabilities; + inherit (lib.asserts) assertMsg; +in +# When changing names or formats: pause, validate, and update the assert +assert assertMsg ( + cudaCapabilityToInfo ? "7.5" && cudaCapabilityToInfo ? "8.6" +) "The following test requires both 7.5 and 8.6 be known CUDA capabilities"; +assert + let + expected = { + cudaCapabilities = [ + "7.5" + "8.6" + ]; + cudaForwardCompat = true; + + # Sorted alphabetically + archNames = [ + "Ampere" + "Turing" + ]; + + realArches = [ + "sm_75" + "sm_86" + ]; + + virtualArches = [ + "compute_75" + "compute_86" + ]; + + arches = [ + "sm_75" + "sm_86" + "compute_86" + ]; + + gencode = [ + "-gencode=arch=compute_75,code=sm_75" + "-gencode=arch=compute_86,code=sm_86" + "-gencode=arch=compute_86,code=compute_86" + ]; + + gencodeString = "-gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_86,code=compute_86"; + + cmakeCudaArchitecturesString = "75;86"; + }; + actual = formatCapabilities { + inherit cudaCapabilityToInfo; + cudaCapabilities = [ + "7.5" + "8.6" + ]; + }; + actualWrapped = (tryEval (deepSeq actual actual)).value; + in + assertMsg (expected == actualWrapped) '' + Expected: ${toJSON expected} + Actual: ${toJSON actualWrapped} + ''; +runCommand "${cudaNamePrefix}-tests-flags" + { + __structuredAttrs = true; + strictDeps = true; + } + '' + touch "$out" + '' diff --git a/pkgs/top-level/cuda-packages.nix b/pkgs/top-level/cuda-packages.nix index 70aaf15362f0..60bc5c0dd953 100644 --- a/pkgs/top-level/cuda-packages.nix +++ b/pkgs/top-level/cuda-packages.nix @@ -22,6 +22,7 @@ # I've (@connorbaker) attempted to do that, though I'm unsure of how this will interact with overrides. { config, + cudaLib, cudaMajorMinorVersion, lib, newScope, @@ -37,31 +38,33 @@ let strings versions ; + # MUST be defined outside fix-point (cf. "NAMESET STRICTNESS" above) fixups = import ../development/cuda-modules/fixups { inherit lib; }; - gpus = import ../development/cuda-modules/gpus.nix; - nvccCompatibilities = import ../development/cuda-modules/nvcc-compatibilities.nix; - flags = import ../development/cuda-modules/flags.nix { - inherit - config - cudaMajorMinorVersion - gpus - lib - stdenv - ; - }; - mkVersionedPackageName = - name: version: name + "_" + strings.replaceStrings [ "." ] [ "_" ] (versions.majorMinor version); + # Since Jetson capabilities are never built by default, we can check if any of them were requested + # through final.config.cudaCapabilities and use that to determine if we should change some manifest versions. + # Copied from backendStdenv. + jetsonCudaCapabilities = lib.filter ( + cudaCapability: cudaLib.data.cudaCapabilityToInfo.${cudaCapability}.isJetson + ) cudaLib.data.allSortedCudaCapabilities; + hasJetsonCudaCapability = + lib.intersectLists jetsonCudaCapabilities (config.cudaCapabilities or [ ]) != [ ]; + redistSystem = cudaLib.utils.getRedistSystem hasJetsonCudaCapability stdenv.hostPlatform.system; passthruFunction = final: { + # NOTE: + # It is important that cudaLib (and fixups, which will be addressed later) are not part of the package set + # fixed-point. + # As described by @SomeoneSerge: + # > The layering should be: configuration -> (identifies/is part of) cudaPackages -> (is built using) cudaLib. + # > No arrows should point in the reverse directions. + # That is to say that cudaLib should only know about package sets and configurations, because it implements + # functionality for interpreting configurations, resolving them against data, and constructing package sets. inherit cudaMajorMinorVersion fixups - flags - gpus lib - nvccCompatibilities pkgs ; @@ -71,10 +74,6 @@ let cudaOlder = strings.versionOlder cudaMajorMinorVersion; cudaAtLeast = strings.versionAtLeast cudaMajorMinorVersion; - # NOTE: mkVersionedPackageName is an internal, implementation detail and should not be relied on by outside consumers. - # It may be removed in the future. - inherit mkVersionedPackageName; - # Maintain a reference to the final cudaPackages. # Without this, if we use `final.callPackage` and a package accepts `cudaPackages` as an # argument, it's provided with `cudaPackages` from the top-level scope, which is not what we @@ -85,6 +84,21 @@ let __attrsFailEvaluation = true; }; + flags = + cudaLib.utils.formatCapabilities { + inherit (final.backendStdenv) cudaCapabilities cudaForwardCompat; + inherit (cudaLib.data) cudaCapabilityToInfo; + } + # TODO(@connorbaker): Enable the corresponding warnings in `../development/cuda-modules/aliases.nix` after some + # time to allow users to migrate to cudaLib and backendStdenv. + // { + inherit (cudaLib.utils) dropDots; + cudaComputeCapabilityToName = + cudaCapability: cudaLib.data.cudaCapabilityToInfo.${cudaCapability}.archName; + dropDot = cudaLib.utils.dropDots; + isJetsonBuild = final.backendStdenv.hasJetsonCudaCapability; + }; + # Loose packages # Barring packages which share a home (e.g., cudatoolkit and cudatoolkit-legacy-runfile), new packages # should be added to ../development/cuda-modules/packages in "by-name" style, where they will be automatically @@ -131,7 +145,10 @@ let value = final.callPackage ../development/cuda-modules/tests/opencv-and-torch config; }; in - attrsets.listToAttrs (attrsets.mapCartesianProduct builder configs); + attrsets.listToAttrs (attrsets.mapCartesianProduct builder configs) + // { + flags = final.callPackage ../development/cuda-modules/tests/flags.nix { }; + }; }; composedExtension = fixedPoints.composeManyExtensions ( @@ -146,10 +163,10 @@ let (import ../development/cuda-modules/cuda/extension.nix { inherit cudaMajorMinorVersion lib; }) (import ../development/cuda-modules/generic-builders/multiplex.nix { inherit + cudaLib cudaMajorMinorVersion - flags lib - mkVersionedPackageName + redistSystem stdenv ; pname = "cudnn"; @@ -159,28 +176,25 @@ let }) (import ../development/cuda-modules/cutensor/extension.nix { inherit + cudaLib cudaMajorMinorVersion - flags lib - mkVersionedPackageName - stdenv + redistSystem ; }) (import ../development/cuda-modules/cusparselt/extension.nix { inherit - cudaMajorMinorVersion - flags + cudaLib lib - mkVersionedPackageName - stdenv + redistSystem ; }) (import ../development/cuda-modules/generic-builders/multiplex.nix { inherit + cudaLib cudaMajorMinorVersion - flags lib - mkVersionedPackageName + redistSystem stdenv ; pname = "tensorrt"; @@ -193,7 +207,9 @@ let }) (import ../development/cuda-modules/cuda-library-samples/extension.nix { inherit lib stdenv; }) ] - ++ lib.optionals config.allowAliases [ (import ../development/cuda-modules/aliases.nix) ] + ++ lib.optionals config.allowAliases [ + (import ../development/cuda-modules/aliases.nix { inherit lib; }) + ] ); cudaPackages = customisation.makeScope newScope ( diff --git a/pkgs/top-level/release-cuda.nix b/pkgs/top-level/release-cuda.nix index d2e515556bb7..68bec74c9530 100644 --- a/pkgs/top-level/release-cuda.nix +++ b/pkgs/top-level/release-cuda.nix @@ -14,20 +14,7 @@ let lib = import ../../lib; - ensureList = x: if builtins.isList x then x else [ x ]; - allowUnfreePredicate = - p: - builtins.all ( - license: - license.free - || builtins.elem license.shortName [ - "CUDA EULA" - "cuDNN EULA" - "cuSPARSELt EULA" - "cuTENSOR EULA" - "NVidia OptiX EULA" - ] - ) (ensureList p.meta.license); + cudaLib = import ../development/cuda-modules/lib; in { @@ -40,7 +27,7 @@ in # Attributes passed to nixpkgs. nixpkgsArgs ? { config = { - inherit allowUnfreePredicate; + allowUnfreePredicate = cudaLib.utils.allowUnfreeCudaPredicate; "${variant}Support" = true; inHydra = true; From 765529dfff5cb04cd8ebf9275c9bccc9473fcbb5 Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Thu, 15 May 2025 21:00:40 +0000 Subject: [PATCH 179/220] cudaPackages.fixups -> pkgs.cudaFixups --- .../cuda-modules/fixups/default.nix | 5 +++-- .../cuda-modules/generic-builders/manifest.nix | 4 ++-- pkgs/top-level/all-packages.nix | 2 ++ pkgs/top-level/cuda-packages.nix | 18 +++++------------- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/pkgs/development/cuda-modules/fixups/default.nix b/pkgs/development/cuda-modules/fixups/default.nix index 9166e413bb71..a5ec4a90e817 100644 --- a/pkgs/development/cuda-modules/fixups/default.nix +++ b/pkgs/development/cuda-modules/fixups/default.nix @@ -1,5 +1,6 @@ -{ lib }: - +let + lib = import ../../../../lib; +in lib.concatMapAttrs ( fileName: _type: let diff --git a/pkgs/development/cuda-modules/generic-builders/manifest.nix b/pkgs/development/cuda-modules/generic-builders/manifest.nix index f4eecc29f4dc..712db3bd5b99 100644 --- a/pkgs/development/cuda-modules/generic-builders/manifest.nix +++ b/pkgs/development/cuda-modules/generic-builders/manifest.nix @@ -5,9 +5,9 @@ autoPatchelfHook, backendStdenv, callPackage, + cudaFixups, cudaLib, fetchurl, - fixups, lib, markForCudatoolkitRootHook, flags, @@ -45,7 +45,7 @@ let # Last step before returning control to `callPackage` (adds the `.override` method) # we'll apply (`overrideAttrs`) necessary package-specific "fixup" functions. # Order is significant. - maybeFixup = fixups.${pname} or null; + maybeFixup = cudaFixups.${pname} or null; fixup = if maybeFixup != null then callPackage maybeFixup { } else { }; # Get the redist systems for which package provides distributables. diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix index 59d64e99c884..dafb9f56fa5d 100644 --- a/pkgs/top-level/all-packages.nix +++ b/pkgs/top-level/all-packages.nix @@ -2721,6 +2721,8 @@ with pkgs; cudaLib = import ../development/cuda-modules/lib; + cudaFixups = import ../development/cuda-modules/fixups; + cudaPackages_11_0 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.0"; }; cudaPackages_11_1 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.1"; }; cudaPackages_11_2 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.2"; }; diff --git a/pkgs/top-level/cuda-packages.nix b/pkgs/top-level/cuda-packages.nix index 60bc5c0dd953..5870d383291e 100644 --- a/pkgs/top-level/cuda-packages.nix +++ b/pkgs/top-level/cuda-packages.nix @@ -26,7 +26,6 @@ cudaMajorMinorVersion, lib, newScope, - pkgs, stdenv, }: let @@ -39,9 +38,6 @@ let versions ; - # MUST be defined outside fix-point (cf. "NAMESET STRICTNESS" above) - fixups = import ../development/cuda-modules/fixups { inherit lib; }; - # Since Jetson capabilities are never built by default, we can check if any of them were requested # through final.config.cudaCapabilities and use that to determine if we should change some manifest versions. # Copied from backendStdenv. @@ -54,19 +50,15 @@ let passthruFunction = final: { # NOTE: - # It is important that cudaLib (and fixups, which will be addressed later) are not part of the package set - # fixed-point. - # As described by @SomeoneSerge: + # It is important that cudaLib and cudaFixups are not part of the package set fixed-point. As described by + # @SomeoneSerge: # > The layering should be: configuration -> (identifies/is part of) cudaPackages -> (is built using) cudaLib. # > No arrows should point in the reverse directions. # That is to say that cudaLib should only know about package sets and configurations, because it implements # functionality for interpreting configurations, resolving them against data, and constructing package sets. - inherit - cudaMajorMinorVersion - fixups - lib - pkgs - ; + # This decision is driven both by a separation of concerns and by "NAMESET STRICTNESS" (see above). + + inherit cudaMajorMinorVersion; cudaNamePrefix = "cuda${cudaMajorMinorVersion}"; From ead65813623f92f8630811b1b3616877a727b1d9 Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Thu, 15 May 2025 20:49:17 +0000 Subject: [PATCH 180/220] tree-wide: cudaPackages.flags updates Signed-off-by: Connor Baker --- pkgs/applications/science/misc/colmap/default.nix | 2 +- pkgs/by-name/dl/dlib/package.nix | 2 +- pkgs/by-name/gp/gpu-burn/package.nix | 4 ++-- pkgs/by-name/mi/mistral-rs/package.nix | 2 +- pkgs/by-name/mo/moshi/package.nix | 2 +- pkgs/by-name/ti/tiny-cuda-nn/package.nix | 2 +- pkgs/development/libraries/science/math/magma/generic.nix | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkgs/applications/science/misc/colmap/default.nix b/pkgs/applications/science/misc/colmap/default.nix index c28c1b15b96e..39c5798ff096 100644 --- a/pkgs/applications/science/misc/colmap/default.nix +++ b/pkgs/applications/science/misc/colmap/default.nix @@ -46,7 +46,7 @@ stdenv'.mkDerivation rec { cmakeFlags = lib.optionals cudaSupport [ (lib.cmakeBool "CUDA_ENABLED" true) (lib.cmakeFeature "CMAKE_CUDA_ARCHITECTURES" ( - lib.strings.concatStringsSep ";" (map cudaPackages.flags.dropDot cudaCapabilities) + lib.strings.concatStringsSep ";" (map cudaPackages.flags.dropDots cudaCapabilities) )) ]; diff --git a/pkgs/by-name/dl/dlib/package.nix b/pkgs/by-name/dl/dlib/package.nix index 7817b88b25e5..09135b652d75 100644 --- a/pkgs/by-name/dl/dlib/package.nix +++ b/pkgs/by-name/dl/dlib/package.nix @@ -42,7 +42,7 @@ ] ++ lib.optionals cudaSupport [ (lib.cmakeFeature "DLIB_USE_CUDA_COMPUTE_CAPABILITIES" ( - builtins.concatStringsSep "," (with cudaPackages.flags; map dropDot cudaCapabilities) + builtins.concatStringsSep "," (with cudaPackages.flags; map dropDots cudaCapabilities) )) ]; diff --git a/pkgs/by-name/gp/gpu-burn/package.nix b/pkgs/by-name/gp/gpu-burn/package.nix index 2028427c96c1..8f95db66b43c 100644 --- a/pkgs/by-name/gp/gpu-burn/package.nix +++ b/pkgs/by-name/gp/gpu-burn/package.nix @@ -17,7 +17,7 @@ let cuda_nvcc libcublas ; - inherit (cudaPackages.flags) cudaCapabilities dropDot isJetsonBuild; + inherit (cudaPackages.flags) cudaCapabilities dropDots isJetsonBuild; in backendStdenv.mkDerivation { pname = "gpu-burn"; @@ -53,7 +53,7 @@ backendStdenv.mkDerivation { makeFlags = [ "CUDAPATH=${getBin cuda_nvcc}" - "COMPUTE=${last (map dropDot cudaCapabilities)}" + "COMPUTE=${last (map dropDots cudaCapabilities)}" "IS_JETSON=${boolToString isJetsonBuild}" ]; diff --git a/pkgs/by-name/mi/mistral-rs/package.nix b/pkgs/by-name/mi/mistral-rs/package.nix index 18d3eccf6e3f..a3c22e807014 100644 --- a/pkgs/by-name/mi/mistral-rs/package.nix +++ b/pkgs/by-name/mi/mistral-rs/package.nix @@ -60,7 +60,7 @@ let )) else cudaCapability; - cudaCapability' = lib.toInt (cudaPackages.flags.dropDot cudaCapabilityString); + cudaCapability' = lib.toInt (cudaPackages.flags.dropDots cudaCapabilityString); mklSupport = assert accelIsValid; diff --git a/pkgs/by-name/mo/moshi/package.nix b/pkgs/by-name/mo/moshi/package.nix index 94fd6af59326..f0e2b3db2235 100644 --- a/pkgs/by-name/mo/moshi/package.nix +++ b/pkgs/by-name/mo/moshi/package.nix @@ -38,7 +38,7 @@ let )) else cudaCapability; - cudaCapability' = lib.toInt (cudaPackages.flags.dropDot cudaCapabilityString); + cudaCapability' = lib.toInt (cudaPackages.flags.dropDots cudaCapabilityString); in rustPlatform.buildRustPackage (finalAttrs: { pname = "moshi"; diff --git a/pkgs/by-name/ti/tiny-cuda-nn/package.nix b/pkgs/by-name/ti/tiny-cuda-nn/package.nix index 924d691e52bd..e2c2f75d13da 100644 --- a/pkgs/by-name/ti/tiny-cuda-nn/package.nix +++ b/pkgs/by-name/ti/tiny-cuda-nn/package.nix @@ -43,7 +43,7 @@ let cudaCapabilities = lists.subtractLists unsupportedCudaCapabilities flags.cudaCapabilities; - cudaArchitecturesString = strings.concatMapStringsSep ";" flags.dropDot cudaCapabilities; + cudaArchitecturesString = strings.concatMapStringsSep ";" flags.dropDots cudaCapabilities; in stdenv.mkDerivation (finalAttrs: { pname = "tiny-cuda-nn"; diff --git a/pkgs/development/libraries/science/math/magma/generic.nix b/pkgs/development/libraries/science/math/magma/generic.nix index 53407ebe7bcb..c7938b98368a 100644 --- a/pkgs/development/libraries/science/math/magma/generic.nix +++ b/pkgs/development/libraries/science/math/magma/generic.nix @@ -94,7 +94,7 @@ let minArch = let # E.g. [ "80" "86" "90" ] - cudaArchitectures = (builtins.map flags.dropDot flags.cudaCapabilities); + cudaArchitectures = (builtins.map flags.dropDots flags.cudaCapabilities); minArch' = builtins.head (builtins.sort strings.versionOlder cudaArchitectures); in # "75" -> "750" Cf. https://github.com/icl-utk-edu/magma/blob/v2.9.0/CMakeLists.txt#L200-L201 From 8fcff2390e3224e970291975cedcbd23f743c6da Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Sat, 10 May 2025 02:51:30 +0000 Subject: [PATCH 181/220] cudaPackages: doc fixup Signed-off-by: Connor Baker --- pkgs/development/cuda-modules/README.md | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/pkgs/development/cuda-modules/README.md b/pkgs/development/cuda-modules/README.md index 3e6168e52be8..4d4e14eb5af8 100644 --- a/pkgs/development/cuda-modules/README.md +++ b/pkgs/development/cuda-modules/README.md @@ -8,15 +8,6 @@ The files in this directory are added (in some way) to the `cudaPackages` package set by [cuda-packages.nix](../../top-level/cuda-packages.nix). -## Top-level files - -Top-level nix files are included in the initial creation of the `cudaPackages` -scope. These are typically required for the creation of the finalized -`cudaPackages` scope: - -- `backend-stdenv.nix`: Standard environment for CUDA packages. -- `flags.nix`: Flags set, or consumed by, NVCC in order to build packages. - ## Top-level directories - `cuda`: CUDA redistributables! Provides extension to `cudaPackages` scope. @@ -38,6 +29,8 @@ scope. These are typically required for the creation of the finalized short, the Multiplex builder adds multiple versions of a single package to single instance of the CUDA Packages package set. It is used primarily for packages like `cudnn` and `cutensor`. +- `lib`: A library of functions and data used by and for the CUDA package set. + This library is exposed at the top-level as `pkgs.cudaLib`. - `modules`: Nixpkgs modules to check the shape and content of CUDA redistributable and feature manifests. These modules additionally use shims provided by some CUDA packages to allow them to re-use the @@ -45,9 +38,8 @@ scope. These are typically required for the creation of the finalized own. `cudnn` and `tensorrt` are examples of packages which provide such shims. These modules are further described in the [Modules](./modules/README.md) documentation. -- `nccl`: NVIDIA NCCL library. -- `nccl-tests`: NVIDIA NCCL tests. -- `saxpy`: Example CMake project that uses CUDA. +- `packages`: Contains packages which exist in every instance of the CUDA + package set. These packages are built in a `by-name` fashion. - `setup-hooks`: Nixpkgs setup hooks for CUDA. - `tensorrt`: NVIDIA TensorRT library. From 688e14d21a38135270544bfdfcc793d25dea2802 Mon Sep 17 00:00:00 2001 From: Connor Baker Date: Tue, 20 May 2025 09:32:03 -0700 Subject: [PATCH 182/220] _cuda: introduce to organize CUDA package set backbone Signed-off-by: Connor Baker --- doc/languages-frameworks/cuda.section.md | 4 +- .../{lib/data => _cuda/db/bootstrap}/cuda.nix | 60 +---------------- .../data => _cuda/db/bootstrap}/default.nix | 8 +-- .../{lib/data => _cuda/db/bootstrap}/nvcc.nix | 0 .../data => _cuda/db/bootstrap}/redist.nix | 0 .../cuda-modules/_cuda/db/default.nix | 65 +++++++++++++++++++ .../cuda-modules/_cuda/default.nix | 30 +++++++++ .../{ => _cuda}/fixups/cuda_compat.nix | 0 .../{ => _cuda}/fixups/cuda_cudart.nix | 0 .../{ => _cuda}/fixups/cuda_demo_suite.nix | 0 .../{ => _cuda}/fixups/cuda_gdb.nix | 0 .../{ => _cuda}/fixups/cuda_nvcc.nix | 0 .../{ => _cuda}/fixups/cuda_nvprof.nix | 0 .../{ => _cuda}/fixups/cuda_sanitizer_api.nix | 0 .../cuda-modules/{ => _cuda}/fixups/cudnn.nix | 0 .../{ => _cuda}/fixups/default.nix | 4 +- .../{ => _cuda}/fixups/driver_assistant.nix | 0 .../{ => _cuda}/fixups/fabricmanager.nix | 0 .../cuda-modules/{ => _cuda}/fixups/imex.nix | 0 .../{ => _cuda}/fixups/libcufile.nix | 0 .../{ => _cuda}/fixups/libcusolver.nix | 0 .../{ => _cuda}/fixups/libcusparse.nix | 0 .../{ => _cuda}/fixups/libcusparse_lt.nix | 0 .../{ => _cuda}/fixups/libcutensor.nix | 0 .../{ => _cuda}/fixups/nsight_compute.nix | 0 .../{ => _cuda}/fixups/nsight_systems.nix | 0 .../{ => _cuda}/fixups/nvidia_driver.nix | 0 .../{ => _cuda}/fixups/tensorrt.nix | 4 +- .../{lib/utils => _cuda/lib}/assertions.nix | 10 +-- .../{lib/utils => _cuda/lib}/cuda.nix | 2 +- .../{lib/utils => _cuda/lib}/default.nix | 15 +++-- .../{lib/utils => _cuda/lib}/meta.nix | 6 +- .../{lib/utils => _cuda/lib}/redist.nix | 10 +-- .../{lib/utils => _cuda/lib}/strings.nix | 39 ++++++----- .../{lib/utils => _cuda/lib}/versions.nix | 11 ++-- .../cuda-modules/cusparselt/extension.nix | 2 +- .../cuda-modules/cutensor/extension.nix | 2 +- .../generic-builders/manifest.nix | 11 ++-- .../generic-builders/multiplex.nix | 3 +- pkgs/development/cuda-modules/lib/default.nix | 13 ---- .../cuda-modules/packages/backendStdenv.nix | 6 +- pkgs/development/cuda-modules/tests/flags.nix | 5 +- pkgs/top-level/all-packages.nix | 5 +- pkgs/top-level/cuda-packages.nix | 23 ++++--- pkgs/top-level/release-cuda.nix | 4 +- 45 files changed, 186 insertions(+), 156 deletions(-) rename pkgs/development/cuda-modules/{lib/data => _cuda/db/bootstrap}/cuda.nix (88%) rename pkgs/development/cuda-modules/{lib/data => _cuda/db/bootstrap}/default.nix (74%) rename pkgs/development/cuda-modules/{lib/data => _cuda/db/bootstrap}/nvcc.nix (100%) rename pkgs/development/cuda-modules/{lib/data => _cuda/db/bootstrap}/redist.nix (100%) create mode 100644 pkgs/development/cuda-modules/_cuda/db/default.nix create mode 100644 pkgs/development/cuda-modules/_cuda/default.nix rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_compat.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_cudart.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_demo_suite.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_gdb.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_nvcc.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_nvprof.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cuda_sanitizer_api.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/cudnn.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/default.nix (89%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/driver_assistant.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/fabricmanager.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/imex.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/libcufile.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/libcusolver.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/libcusparse.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/libcusparse_lt.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/libcutensor.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/nsight_compute.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/nsight_systems.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/nvidia_driver.nix (100%) rename pkgs/development/cuda-modules/{ => _cuda}/fixups/tensorrt.nix (98%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/assertions.nix (91%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/cuda.nix (98%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/default.nix (73%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/meta.nix (90%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/redist.nix (95%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/strings.nix (84%) rename pkgs/development/cuda-modules/{lib/utils => _cuda/lib}/versions.nix (84%) delete mode 100644 pkgs/development/cuda-modules/lib/default.nix diff --git a/doc/languages-frameworks/cuda.section.md b/doc/languages-frameworks/cuda.section.md index 7e489acdbd64..535ce02efbc6 100644 --- a/doc/languages-frameworks/cuda.section.md +++ b/doc/languages-frameworks/cuda.section.md @@ -115,8 +115,8 @@ All new projects should use the CUDA redistributables available in [`cudaPackage ### Updating supported compilers and GPUs {#updating-supported-compilers-and-gpus} -1. Update `nvccCompatibilities` in `pkgs/development/cuda-modules/lib/data/nvcc.nix` to include the newest release of NVCC, as well as any newly supported host compilers. -2. Update `cudaCapabilityToInfo` in `pkgs/development/cuda-modules/lib/data/cuda.nix` to include any new GPUs supported by the new release of CUDA. +1. Update `nvccCompatibilities` in `pkgs/development/cuda-modules/_cuda/data/nvcc.nix` to include the newest release of NVCC, as well as any newly supported host compilers. +2. Update `cudaCapabilityToInfo` in `pkgs/development/cuda-modules/_cuda/data/cuda.nix` to include any new GPUs supported by the new release of CUDA. ### Updating the CUDA Toolkit runfile installer {#updating-the-cuda-toolkit} diff --git a/pkgs/development/cuda-modules/lib/data/cuda.nix b/pkgs/development/cuda-modules/_cuda/db/bootstrap/cuda.nix similarity index 88% rename from pkgs/development/cuda-modules/lib/data/cuda.nix rename to pkgs/development/cuda-modules/_cuda/db/bootstrap/cuda.nix index 0b97645c420d..e852bc85b639 100644 --- a/pkgs/development/cuda-modules/lib/data/cuda.nix +++ b/pkgs/development/cuda-modules/_cuda/db/bootstrap/cuda.nix @@ -1,63 +1,5 @@ -{ cudaLib, lib }: +{ lib }: { - /** - All CUDA capabilities, sorted by version. - - NOTE: Since the capabilities are sorted by version and architecture/family-specific features are - appended to the minor version component, the sorted list groups capabilities by baseline feature - set. - - # Type - - ``` - allSortedCudaCapabilities :: [CudaCapability] - ``` - - # Example - - ``` - allSortedCudaCapabilities = [ - "5.0" - "5.2" - "6.0" - "6.1" - "7.0" - "7.2" - "7.5" - "8.0" - "8.6" - "8.7" - "8.9" - "9.0" - "9.0a" - "10.0" - "10.0a" - "10.0f" - "10.1" - "10.1a" - "10.1f" - "10.3" - "10.3a" - "10.3f" - ]; - ``` - */ - allSortedCudaCapabilities = lib.sort lib.versionOlder ( - lib.attrNames cudaLib.data.cudaCapabilityToInfo - ); - - /** - Mapping of CUDA micro-architecture name to capabilities belonging to that micro-architecture. - - # Type - - ``` - cudaArchNameToCapabilities :: AttrSet NonEmptyStr (NonEmptyListOf CudaCapability) - ``` - */ - cudaArchNameToCapabilities = lib.groupBy ( - cudaCapability: cudaLib.data.cudaCapabilityToInfo.${cudaCapability}.archName - ) cudaLib.data.allSortedCudaCapabilities; /** Attribute set of supported CUDA capability mapped to information about that capability. diff --git a/pkgs/development/cuda-modules/lib/data/default.nix b/pkgs/development/cuda-modules/_cuda/db/bootstrap/default.nix similarity index 74% rename from pkgs/development/cuda-modules/lib/data/default.nix rename to pkgs/development/cuda-modules/_cuda/db/bootstrap/default.nix index 60924cbb4f9e..8cfea1406934 100644 --- a/pkgs/development/cuda-modules/lib/data/default.nix +++ b/pkgs/development/cuda-modules/_cuda/db/bootstrap/default.nix @@ -1,9 +1,7 @@ -{ cudaLib, lib }: +{ lib }: { # See ./cuda.nix for documentation. - inherit (import ./cuda.nix { inherit cudaLib lib; }) - allSortedCudaCapabilities - cudaArchNameToCapabilities + inherit (import ./cuda.nix { inherit lib; }) cudaCapabilityToInfo ; @@ -28,5 +26,5 @@ cudaPackagesPath :: Path ``` */ - cudaPackagesPath = ./..; + cudaPackagesPath = ./../../..; } diff --git a/pkgs/development/cuda-modules/lib/data/nvcc.nix b/pkgs/development/cuda-modules/_cuda/db/bootstrap/nvcc.nix similarity index 100% rename from pkgs/development/cuda-modules/lib/data/nvcc.nix rename to pkgs/development/cuda-modules/_cuda/db/bootstrap/nvcc.nix diff --git a/pkgs/development/cuda-modules/lib/data/redist.nix b/pkgs/development/cuda-modules/_cuda/db/bootstrap/redist.nix similarity index 100% rename from pkgs/development/cuda-modules/lib/data/redist.nix rename to pkgs/development/cuda-modules/_cuda/db/bootstrap/redist.nix diff --git a/pkgs/development/cuda-modules/_cuda/db/default.nix b/pkgs/development/cuda-modules/_cuda/db/default.nix new file mode 100644 index 000000000000..e6d404102aab --- /dev/null +++ b/pkgs/development/cuda-modules/_cuda/db/default.nix @@ -0,0 +1,65 @@ +{ + lib, + bootstrapData, + db, +}: + +bootstrapData +// { + /** + All CUDA capabilities, sorted by version. + + NOTE: Since the capabilities are sorted by version and architecture/family-specific features are + appended to the minor version component, the sorted list groups capabilities by baseline feature + set. + + # Type + + ``` + allSortedCudaCapabilities :: [CudaCapability] + ``` + + # Example + + ``` + allSortedCudaCapabilities = [ + "5.0" + "5.2" + "6.0" + "6.1" + "7.0" + "7.2" + "7.5" + "8.0" + "8.6" + "8.7" + "8.9" + "9.0" + "9.0a" + "10.0" + "10.0a" + "10.0f" + "10.1" + "10.1a" + "10.1f" + "10.3" + "10.3a" + "10.3f" + ]; + ``` + */ + allSortedCudaCapabilities = lib.sort lib.versionOlder (lib.attrNames db.cudaCapabilityToInfo); + + /** + Mapping of CUDA micro-architecture name to capabilities belonging to that micro-architecture. + + # Type + + ``` + cudaArchNameToCapabilities :: AttrSet NonEmptyStr (NonEmptyListOf CudaCapability) + ``` + */ + cudaArchNameToCapabilities = lib.groupBy ( + cudaCapability: db.cudaCapabilityToInfo.${cudaCapability}.archName + ) db.allSortedCudaCapabilities; +} diff --git a/pkgs/development/cuda-modules/_cuda/default.nix b/pkgs/development/cuda-modules/_cuda/default.nix new file mode 100644 index 000000000000..4d0c3c4b5d6a --- /dev/null +++ b/pkgs/development/cuda-modules/_cuda/default.nix @@ -0,0 +1,30 @@ +# The _cuda attribute set is a fixed-point which contains the static functionality required to construct CUDA package +# sets. For example, `_cuda.cudaData` includes information about NVIDIA's redistributables (such as the names NVIDIA +# uses for different systems), `_cuda.cudaLib` contains utility functions like `formatCapabilities` (which generate +# common arguments passed to NVCC and `cmakeFlags`), and `_cuda.cudaFixups` contains `callPackage`-able functions +# which are provided to the corresponding package's `overrideAttrs` attribute to provide package-specific fixups +# out of scope of the generic redistributable builder. +# +# Since this attribute set is used to construct the CUDA package sets, it must exist outside the fixed point of the +# package sets. Make these attributes available directly in the package set construction could cause confusion if +# users override the attribute set with the expection that changes will be reflected in the enclosing CUDA package +# set. To avoid this, we declare `_cuda` and inherit its members here, at top-level. (This also allows us to benefit +# from import caching, as it should be evaluated once per system, rather than per-system and CUDA package set.) + +let + lib = import ../../../../lib; +in +lib.fixedPoints.makeExtensible (final: { + bootstrapData = import ./db/bootstrap { + inherit lib; + }; + db = import ./db { + inherit (final) bootstrapData db; + inherit lib; + }; + fixups = import ./fixups { inherit lib; }; + lib = import ./lib { + _cuda = final; + inherit lib; + }; +}) diff --git a/pkgs/development/cuda-modules/fixups/cuda_compat.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_compat.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_compat.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_compat.nix diff --git a/pkgs/development/cuda-modules/fixups/cuda_cudart.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_cudart.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_cudart.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_cudart.nix diff --git a/pkgs/development/cuda-modules/fixups/cuda_demo_suite.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_demo_suite.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_demo_suite.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_demo_suite.nix diff --git a/pkgs/development/cuda-modules/fixups/cuda_gdb.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_gdb.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_gdb.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_gdb.nix diff --git a/pkgs/development/cuda-modules/fixups/cuda_nvcc.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_nvcc.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_nvcc.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_nvcc.nix diff --git a/pkgs/development/cuda-modules/fixups/cuda_nvprof.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_nvprof.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_nvprof.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_nvprof.nix diff --git a/pkgs/development/cuda-modules/fixups/cuda_sanitizer_api.nix b/pkgs/development/cuda-modules/_cuda/fixups/cuda_sanitizer_api.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cuda_sanitizer_api.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cuda_sanitizer_api.nix diff --git a/pkgs/development/cuda-modules/fixups/cudnn.nix b/pkgs/development/cuda-modules/_cuda/fixups/cudnn.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/cudnn.nix rename to pkgs/development/cuda-modules/_cuda/fixups/cudnn.nix diff --git a/pkgs/development/cuda-modules/fixups/default.nix b/pkgs/development/cuda-modules/_cuda/fixups/default.nix similarity index 89% rename from pkgs/development/cuda-modules/fixups/default.nix rename to pkgs/development/cuda-modules/_cuda/fixups/default.nix index a5ec4a90e817..0c9874672ce4 100644 --- a/pkgs/development/cuda-modules/fixups/default.nix +++ b/pkgs/development/cuda-modules/_cuda/fixups/default.nix @@ -1,6 +1,4 @@ -let - lib = import ../../../../lib; -in +{ lib }: lib.concatMapAttrs ( fileName: _type: let diff --git a/pkgs/development/cuda-modules/fixups/driver_assistant.nix b/pkgs/development/cuda-modules/_cuda/fixups/driver_assistant.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/driver_assistant.nix rename to pkgs/development/cuda-modules/_cuda/fixups/driver_assistant.nix diff --git a/pkgs/development/cuda-modules/fixups/fabricmanager.nix b/pkgs/development/cuda-modules/_cuda/fixups/fabricmanager.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/fabricmanager.nix rename to pkgs/development/cuda-modules/_cuda/fixups/fabricmanager.nix diff --git a/pkgs/development/cuda-modules/fixups/imex.nix b/pkgs/development/cuda-modules/_cuda/fixups/imex.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/imex.nix rename to pkgs/development/cuda-modules/_cuda/fixups/imex.nix diff --git a/pkgs/development/cuda-modules/fixups/libcufile.nix b/pkgs/development/cuda-modules/_cuda/fixups/libcufile.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/libcufile.nix rename to pkgs/development/cuda-modules/_cuda/fixups/libcufile.nix diff --git a/pkgs/development/cuda-modules/fixups/libcusolver.nix b/pkgs/development/cuda-modules/_cuda/fixups/libcusolver.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/libcusolver.nix rename to pkgs/development/cuda-modules/_cuda/fixups/libcusolver.nix diff --git a/pkgs/development/cuda-modules/fixups/libcusparse.nix b/pkgs/development/cuda-modules/_cuda/fixups/libcusparse.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/libcusparse.nix rename to pkgs/development/cuda-modules/_cuda/fixups/libcusparse.nix diff --git a/pkgs/development/cuda-modules/fixups/libcusparse_lt.nix b/pkgs/development/cuda-modules/_cuda/fixups/libcusparse_lt.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/libcusparse_lt.nix rename to pkgs/development/cuda-modules/_cuda/fixups/libcusparse_lt.nix diff --git a/pkgs/development/cuda-modules/fixups/libcutensor.nix b/pkgs/development/cuda-modules/_cuda/fixups/libcutensor.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/libcutensor.nix rename to pkgs/development/cuda-modules/_cuda/fixups/libcutensor.nix diff --git a/pkgs/development/cuda-modules/fixups/nsight_compute.nix b/pkgs/development/cuda-modules/_cuda/fixups/nsight_compute.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/nsight_compute.nix rename to pkgs/development/cuda-modules/_cuda/fixups/nsight_compute.nix diff --git a/pkgs/development/cuda-modules/fixups/nsight_systems.nix b/pkgs/development/cuda-modules/_cuda/fixups/nsight_systems.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/nsight_systems.nix rename to pkgs/development/cuda-modules/_cuda/fixups/nsight_systems.nix diff --git a/pkgs/development/cuda-modules/fixups/nvidia_driver.nix b/pkgs/development/cuda-modules/_cuda/fixups/nvidia_driver.nix similarity index 100% rename from pkgs/development/cuda-modules/fixups/nvidia_driver.nix rename to pkgs/development/cuda-modules/_cuda/fixups/nvidia_driver.nix diff --git a/pkgs/development/cuda-modules/fixups/tensorrt.nix b/pkgs/development/cuda-modules/_cuda/fixups/tensorrt.nix similarity index 98% rename from pkgs/development/cuda-modules/fixups/tensorrt.nix rename to pkgs/development/cuda-modules/_cuda/fixups/tensorrt.nix index c55844eac0d9..2b73e8c2d4be 100644 --- a/pkgs/development/cuda-modules/fixups/tensorrt.nix +++ b/pkgs/development/cuda-modules/_cuda/fixups/tensorrt.nix @@ -1,5 +1,5 @@ { - cudaLib, + _cuda, cudaOlder, cudaPackages, cudaMajorMinorVersion, @@ -103,7 +103,7 @@ finalAttrs: prevAttrs: { # unless it is not available, in which case the default cudnn derivation will be used. cudnn = let - desiredName = cudaLib.utils.mkVersionedName "cudnn" ( + desiredName = _cuda.lib.mkVersionedName "cudnn" ( lib.versions.majorMinor finalAttrs.passthru.featureRelease.cudnnVersion ); in diff --git a/pkgs/development/cuda-modules/lib/utils/assertions.nix b/pkgs/development/cuda-modules/_cuda/lib/assertions.nix similarity index 91% rename from pkgs/development/cuda-modules/lib/utils/assertions.nix rename to pkgs/development/cuda-modules/_cuda/lib/assertions.nix index 5fc940889b7f..f4413b28e4f3 100644 --- a/pkgs/development/cuda-modules/lib/utils/assertions.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/assertions.nix @@ -1,4 +1,4 @@ -{ cudaLib, lib }: +{ _cuda, lib }: { /** Evaluate assertions and add error context to return value. @@ -16,7 +16,7 @@ _evaluateAssertions = assertions: let - failedAssertionsString = cudaLib.utils._mkFailedAssertionsString assertions; + failedAssertionsString = _cuda.lib._mkFailedAssertionsString assertions; in if failedAssertionsString == "" then true @@ -45,7 +45,7 @@ # Examples :::{.example} - ## `cudaLib.utils._mkFailedAssertionsString` usage examples + ## `_cuda.lib._mkFailedAssertionsString` usage examples ```nix _mkFailedAssertionsString [ @@ -103,7 +103,7 @@ # Examples :::{.example} - ## `cudaLib.utils._mkMissingPackagesAssertions` usage examples + ## `_cuda.lib._mkMissingPackagesAssertions` usage examples ```nix { @@ -114,7 +114,7 @@ }: let inherit (lib.attrsets) recursiveUpdate; - inherit (cudaLib.utils) _mkMissingPackagesAssertions; + inherit (_cuda.lib) _mkMissingPackagesAssertions; in prevAttrs: { passthru = prevAttrs.passthru or { } // { diff --git a/pkgs/development/cuda-modules/lib/utils/cuda.nix b/pkgs/development/cuda-modules/_cuda/lib/cuda.nix similarity index 98% rename from pkgs/development/cuda-modules/lib/utils/cuda.nix rename to pkgs/development/cuda-modules/_cuda/lib/cuda.nix index e372fda0d1b5..16f99e57709a 100644 --- a/pkgs/development/cuda-modules/lib/utils/cuda.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/cuda.nix @@ -92,7 +92,7 @@ # Examples :::{.example} - ## `cudaLib.utils._mkCudaVariant` usage examples + ## `_cuda.lib._mkCudaVariant` usage examples ```nix _mkCudaVariant "11.0" diff --git a/pkgs/development/cuda-modules/lib/utils/default.nix b/pkgs/development/cuda-modules/_cuda/lib/default.nix similarity index 73% rename from pkgs/development/cuda-modules/lib/utils/default.nix rename to pkgs/development/cuda-modules/_cuda/lib/default.nix index ee9ace015fae..b2fe9838de61 100644 --- a/pkgs/development/cuda-modules/lib/utils/default.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/default.nix @@ -1,7 +1,10 @@ -{ cudaLib, lib }: +{ + _cuda, + lib, +}: { # See ./assertions.nix for documentation. - inherit (import ./assertions.nix { inherit cudaLib lib; }) + inherit (import ./assertions.nix { inherit _cuda lib; }) _evaluateAssertions _mkFailedAssertionsString _mkMissingPackagesAssertions @@ -16,13 +19,13 @@ ; # See ./meta.nix for documentation. - inherit (import ./meta.nix { inherit cudaLib lib; }) + inherit (import ./meta.nix { inherit _cuda lib; }) _mkMetaBadPlatforms _mkMetaBroken ; # See ./redist.nix for documentation. - inherit (import ./redist.nix { inherit cudaLib lib; }) + inherit (import ./redist.nix { inherit _cuda lib; }) _redistSystemIsSupported getNixSystems getRedistSystem @@ -30,7 +33,7 @@ ; # See ./strings.nix for documentation. - inherit (import ./strings.nix { inherit cudaLib lib; }) + inherit (import ./strings.nix { inherit _cuda lib; }) dotsToUnderscores dropDots formatCapabilities @@ -42,7 +45,7 @@ ; # See ./versions.nix for documentation. - inherit (import ./versions.nix { inherit cudaLib lib; }) + inherit (import ./versions.nix { inherit _cuda lib; }) majorMinorPatch trimComponents ; diff --git a/pkgs/development/cuda-modules/lib/utils/meta.nix b/pkgs/development/cuda-modules/_cuda/lib/meta.nix similarity index 90% rename from pkgs/development/cuda-modules/lib/utils/meta.nix rename to pkgs/development/cuda-modules/_cuda/lib/meta.nix index 327d4ae97d95..72f71973b0cd 100644 --- a/pkgs/development/cuda-modules/lib/utils/meta.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/meta.nix @@ -1,4 +1,4 @@ -{ cudaLib, lib }: +{ _cuda, lib }: { /** Returns a list of bad platforms for a given package if assertsions in `finalAttrs.passthru.platformAssertions` @@ -18,7 +18,7 @@ _mkMetaBadPlatforms = warn: finalAttrs: let - failedAssertionsString = cudaLib.utils._mkFailedAssertionsString finalAttrs.passthru.platformAssertions; + failedAssertionsString = _cuda.lib._mkFailedAssertionsString finalAttrs.passthru.platformAssertions; hasFailedAssertions = failedAssertionsString != ""; finalStdenv = finalAttrs.finalPackage.stdenv; in @@ -62,7 +62,7 @@ _mkMetaBroken = warn: finalAttrs: let - failedAssertionsString = cudaLib.utils._mkFailedAssertionsString finalAttrs.passthru.brokenAssertions; + failedAssertionsString = _cuda.lib._mkFailedAssertionsString finalAttrs.passthru.brokenAssertions; hasFailedAssertions = failedAssertionsString != ""; in lib.warnIf (warn && hasFailedAssertions) diff --git a/pkgs/development/cuda-modules/lib/utils/redist.nix b/pkgs/development/cuda-modules/_cuda/lib/redist.nix similarity index 95% rename from pkgs/development/cuda-modules/lib/utils/redist.nix rename to pkgs/development/cuda-modules/_cuda/lib/redist.nix index b9cd2a3b1e0d..a7053dc582c4 100644 --- a/pkgs/development/cuda-modules/lib/utils/redist.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/redist.nix @@ -1,4 +1,4 @@ -{ cudaLib, lib }: +{ _cuda, lib }: { /** Returns a boolean indicating whether the provided redist system is supported by any of the provided redist systems. @@ -27,7 +27,7 @@ # Examples :::{.example} - ## `cudaLib.utils._redistSystemIsSupported` usage examples + ## `cudaLib._redistSystemIsSupported` usage examples ```nix _redistSystemIsSupported "linux-x86_64" [ "linux-x86_64" ] @@ -81,7 +81,7 @@ # Examples :::{.example} - ## `cudaLib.utils.getNixSystems` usage examples + ## `cudaLib.getNixSystems` usage examples ```nix getNixSystems "linux-sbsa" @@ -137,7 +137,7 @@ # Examples :::{.example} - ## `cudaLib.utils.getRedistSystem` usage examples + ## `cudaLib.getRedistSystem` usage examples ```nix getRedistSystem true "aarch64-linux" @@ -181,7 +181,7 @@ mkRedistUrl = redistName: relativePath: lib.concatStringsSep "/" ( - [ cudaLib.data.redistUrlPrefix ] + [ _cuda.db.redistUrlPrefix ] ++ ( if redistName != "tensorrt" then [ diff --git a/pkgs/development/cuda-modules/lib/utils/strings.nix b/pkgs/development/cuda-modules/_cuda/lib/strings.nix similarity index 84% rename from pkgs/development/cuda-modules/lib/utils/strings.nix rename to pkgs/development/cuda-modules/_cuda/lib/strings.nix index 47a71b395f4f..f25157b4da4c 100644 --- a/pkgs/development/cuda-modules/lib/utils/strings.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/strings.nix @@ -1,4 +1,7 @@ -{ cudaLib, lib }: +{ _cuda, lib }: +let + cudaLib = _cuda.lib; +in { /** Replaces dots in a string with underscores. @@ -18,7 +21,7 @@ # Examples :::{.example} - ## `cudaLib.utils.dotsToUnderscores` usage examples + ## `cudaLib.dotsToUnderscores` usage examples ```nix dotsToUnderscores "1.2.3" @@ -46,7 +49,7 @@ # Examples :::{.example} - ## `cudaLib.utils.dropDots` usage examples + ## `cudaLib.dropDots` usage examples ```nix dropDots "1.2.3" @@ -110,7 +113,7 @@ realArches :: List String ``` */ - realArches = lib.map cudaLib.utils.mkRealArchitecture cudaCapabilities; + realArches = lib.map cudaLib.mkRealArchitecture cudaCapabilities; /** The virtual architectures for the given CUDA capabilities. @@ -124,7 +127,7 @@ virtualArches :: List String ``` */ - virtualArches = lib.map cudaLib.utils.mkVirtualArchitecture cudaCapabilities; + virtualArches = lib.map cudaLib.mkVirtualArchitecture cudaCapabilities; /** The gencode flags for the given CUDA capabilities. @@ -137,8 +140,8 @@ */ gencode = let - base = lib.map (cudaLib.utils.mkGencodeFlag "sm") cudaCapabilities; - forward = cudaLib.utils.mkGencodeFlag "compute" (lib.last cudaCapabilities); + base = lib.map (cudaLib.mkGencodeFlag "sm") cudaCapabilities; + forward = cudaLib.mkGencodeFlag "compute" (lib.last cudaCapabilities); in base ++ lib.optionals cudaForwardCompat [ forward ]; in @@ -190,7 +193,7 @@ cmakeCudaArchitecturesString :: String ``` */ - cmakeCudaArchitecturesString = cudaLib.utils.mkCmakeCudaArchitecturesString cudaCapabilities; + cmakeCudaArchitecturesString = cudaLib.mkCmakeCudaArchitecturesString cudaCapabilities; /** The gencode string for the given CUDA capabilities. @@ -222,7 +225,7 @@ # Examples :::{.example} - ## `cudaLib.utils.mkCmakeCudaArchitecturesString` usage examples + ## `cudaLib.mkCmakeCudaArchitecturesString` usage examples ```nix mkCmakeCudaArchitecturesString [ "8.9" "10.0a" ] @@ -230,7 +233,7 @@ ``` ::: */ - mkCmakeCudaArchitecturesString = lib.concatMapStringsSep ";" cudaLib.utils.dropDots; + mkCmakeCudaArchitecturesString = lib.concatMapStringsSep ";" cudaLib.dropDots; /** Produces a gencode flag from a CUDA capability. @@ -254,7 +257,7 @@ # Examples :::{.example} - ## `cudaLib.utils.mkGencodeFlag` usage examples + ## `cudaLib.mkGencodeFlag` usage examples ```nix mkGencodeFlag "sm" "8.9" @@ -270,7 +273,7 @@ mkGencodeFlag = archPrefix: cudaCapability: let - cap = cudaLib.utils.dropDots cudaCapability; + cap = cudaLib.dropDots cudaCapability; in "-gencode=arch=compute_${cap},code=${archPrefix}_${cap}"; @@ -292,7 +295,7 @@ # Examples :::{.example} - ## `cudaLib.utils.mkRealArchitecture` usage examples + ## `cudaLib.mkRealArchitecture` usage examples ```nix mkRealArchitecture "8.9" @@ -305,7 +308,7 @@ ``` ::: */ - mkRealArchitecture = cudaCapability: "sm_" + cudaLib.utils.dropDots cudaCapability; + mkRealArchitecture = cudaCapability: "sm_" + cudaLib.dropDots cudaCapability; /** Create a versioned attribute name from a version by replacing dots with underscores. @@ -329,7 +332,7 @@ # Examples :::{.example} - ## `cudaLib.utils.mkVersionedName` usage examples + ## `cudaLib.mkVersionedName` usage examples ```nix mkVersionedName "hello" "1.2.3" @@ -342,7 +345,7 @@ ``` ::: */ - mkVersionedName = name: version: "${name}_${cudaLib.utils.dotsToUnderscores version}"; + mkVersionedName = name: version: "${name}_${cudaLib.dotsToUnderscores version}"; /** Produces a virtual architecture string from a CUDA capability. @@ -362,7 +365,7 @@ # Examples :::{.example} - ## `cudaLib.utils.mkVirtualArchitecture` usage examples + ## `cudaLib.mkVirtualArchitecture` usage examples ```nix mkVirtualArchitecture "8.9" @@ -375,5 +378,5 @@ ``` ::: */ - mkVirtualArchitecture = cudaCapability: "compute_" + cudaLib.utils.dropDots cudaCapability; + mkVirtualArchitecture = cudaCapability: "compute_" + cudaLib.dropDots cudaCapability; } diff --git a/pkgs/development/cuda-modules/lib/utils/versions.nix b/pkgs/development/cuda-modules/_cuda/lib/versions.nix similarity index 84% rename from pkgs/development/cuda-modules/lib/utils/versions.nix rename to pkgs/development/cuda-modules/_cuda/lib/versions.nix index 976ad65b2b65..e231f795f13f 100644 --- a/pkgs/development/cuda-modules/lib/utils/versions.nix +++ b/pkgs/development/cuda-modules/_cuda/lib/versions.nix @@ -1,4 +1,7 @@ -{ cudaLib, lib }: +{ _cuda, lib }: +let + cudaLib = _cuda.lib; +in { /** Extracts the major, minor, and patch version from a string. @@ -18,7 +21,7 @@ # Examples :::{.example} - ## `cudaLib.utils.majorMinorPatch` usage examples + ## `_cuda.lib.majorMinorPatch` usage examples ```nix majorMinorPatch "11.0.3.4" @@ -26,7 +29,7 @@ ``` ::: */ - majorMinorPatch = cudaLib.utils.trimComponents 3; + majorMinorPatch = cudaLib.trimComponents 3; /** Get a version string with no more than than the specified number of components. @@ -48,7 +51,7 @@ # Examples :::{.example} - ## `cudaLib.utils.trimComponents` usage examples + ## `_cuda.lib.trimComponents` usage examples ```nix trimComponents 1 "1.2.3.4" diff --git a/pkgs/development/cuda-modules/cusparselt/extension.nix b/pkgs/development/cuda-modules/cusparselt/extension.nix index 56308973c341..f53405e3d099 100644 --- a/pkgs/development/cuda-modules/cusparselt/extension.nix +++ b/pkgs/development/cuda-modules/cusparselt/extension.nix @@ -69,7 +69,7 @@ let # Patch version changes should not break the build, so we only use major and minor # computeName :: RedistribRelease -> String computeName = - { version, ... }: cudaLib.utils.mkVersionedName redistName (lib.versions.majorMinor version); + { version, ... }: cudaLib.mkVersionedName redistName (lib.versions.majorMinor version); in final: _: let diff --git a/pkgs/development/cuda-modules/cutensor/extension.nix b/pkgs/development/cuda-modules/cutensor/extension.nix index 5f59cfb15bb6..57de518aa7ac 100644 --- a/pkgs/development/cuda-modules/cutensor/extension.nix +++ b/pkgs/development/cuda-modules/cutensor/extension.nix @@ -108,7 +108,7 @@ let # Patch version changes should not break the build, so we only use major and minor # computeName :: RedistribRelease -> String computeName = - { version, ... }: cudaLib.utils.mkVersionedName redistName (lib.versions.majorMinor version); + { version, ... }: cudaLib.mkVersionedName redistName (lib.versions.majorMinor version); in final: _: let diff --git a/pkgs/development/cuda-modules/generic-builders/manifest.nix b/pkgs/development/cuda-modules/generic-builders/manifest.nix index 712db3bd5b99..36872cf9682f 100644 --- a/pkgs/development/cuda-modules/generic-builders/manifest.nix +++ b/pkgs/development/cuda-modules/generic-builders/manifest.nix @@ -5,8 +5,7 @@ autoPatchelfHook, backendStdenv, callPackage, - cudaFixups, - cudaLib, + _cuda, fetchurl, lib, markForCudatoolkitRootHook, @@ -45,7 +44,7 @@ let # Last step before returning control to `callPackage` (adds the `.override` method) # we'll apply (`overrideAttrs`) necessary package-specific "fixup" functions. # Order is significant. - maybeFixup = cudaFixups.${pname} or null; + maybeFixup = _cuda.fixups.${pname} or null; fixup = if maybeFixup != null then callPackage maybeFixup { } else { }; # Get the redist systems for which package provides distributables. @@ -54,9 +53,9 @@ let # redistSystem :: String # The redistSystem is the name of the system for which the redistributable is built. # It is `"unsupported"` if the redistributable is not supported on the target system. - redistSystem = cudaLib.utils.getRedistSystem backendStdenv.hasJetsonCudaCapability hostPlatform.system; + redistSystem = _cuda.lib.getRedistSystem backendStdenv.hasJetsonCudaCapability hostPlatform.system; - sourceMatchesHost = lib.elem hostPlatform.system (cudaLib.utils.getNixSystems redistSystem); + sourceMatchesHost = lib.elem hostPlatform.system (_cuda.lib.getNixSystems redistSystem); in (backendStdenv.mkDerivation (finalAttrs: { # NOTE: Even though there's no actual buildPhase going on here, the derivations of the @@ -327,7 +326,7 @@ in broken = lists.any trivial.id (attrsets.attrValues finalAttrs.brokenConditions); platforms = trivial.pipe supportedRedistSystems [ # Map each redist system to the equivalent nix systems. - (lib.concatMap cudaLib.utils.getNixSystems) + (lib.concatMap _cuda.lib.getNixSystems) # Take all the unique values. lib.unique # Sort the list. diff --git a/pkgs/development/cuda-modules/generic-builders/multiplex.nix b/pkgs/development/cuda-modules/generic-builders/multiplex.nix index 0a6d101eaf9d..3085ae3e4610 100644 --- a/pkgs/development/cuda-modules/generic-builders/multiplex.nix +++ b/pkgs/development/cuda-modules/generic-builders/multiplex.nix @@ -64,8 +64,7 @@ let # Compute versioned attribute name to be used in this package set # Patch version changes should not break the build, so we only use major and minor # computeName :: Package -> String - computeName = - { version, ... }: cudaLib.utils.mkVersionedName pname (lib.versions.majorMinor version); + computeName = { version, ... }: cudaLib.mkVersionedName pname (lib.versions.majorMinor version); # The newest package for each major-minor version, with newest first. # newestPackages :: List Package diff --git a/pkgs/development/cuda-modules/lib/default.nix b/pkgs/development/cuda-modules/lib/default.nix deleted file mode 100644 index 6918d9ca5053..000000000000 --- a/pkgs/development/cuda-modules/lib/default.nix +++ /dev/null @@ -1,13 +0,0 @@ -let - lib = import ../../../../lib; -in -lib.fixedPoints.makeExtensible (final: { - data = import ./data { - inherit lib; - cudaLib = final; - }; - utils = import ./utils { - inherit lib; - cudaLib = final; - }; -}) diff --git a/pkgs/development/cuda-modules/packages/backendStdenv.nix b/pkgs/development/cuda-modules/packages/backendStdenv.nix index ac9a8ebaf44f..7122ad2da319 100644 --- a/pkgs/development/cuda-modules/packages/backendStdenv.nix +++ b/pkgs/development/cuda-modules/packages/backendStdenv.nix @@ -7,7 +7,7 @@ # Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context { config, - cudaLib, + _cuda, cudaMajorMinorVersion, lib, pkgs, @@ -16,8 +16,8 @@ }: let inherit (builtins) toJSON; - inherit (cudaLib.data) allSortedCudaCapabilities cudaCapabilityToInfo nvccCompatibilities; - inherit (cudaLib.utils) + inherit (_cuda.db) allSortedCudaCapabilities cudaCapabilityToInfo nvccCompatibilities; + inherit (_cuda.lib) _cudaCapabilityIsDefault _cudaCapabilityIsSupported _evaluateAssertions diff --git a/pkgs/development/cuda-modules/tests/flags.nix b/pkgs/development/cuda-modules/tests/flags.nix index d9ae72231c6c..314f69d1b90a 100644 --- a/pkgs/development/cuda-modules/tests/flags.nix +++ b/pkgs/development/cuda-modules/tests/flags.nix @@ -1,4 +1,5 @@ { + cudaData, cudaLib, cudaNamePrefix, lib, @@ -6,8 +7,8 @@ }: let inherit (builtins) deepSeq toJSON tryEval; - inherit (cudaLib.data) cudaCapabilityToInfo; - inherit (cudaLib.utils) formatCapabilities; + inherit (cudaData) cudaCapabilityToInfo; + inherit (cudaLib) formatCapabilities; inherit (lib.asserts) assertMsg; in # When changing names or formats: pause, validate, and update the assert diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix index dafb9f56fa5d..7c5afa23f43c 100644 --- a/pkgs/top-level/all-packages.nix +++ b/pkgs/top-level/all-packages.nix @@ -2719,9 +2719,8 @@ with pkgs; cron = isc-cron; - cudaLib = import ../development/cuda-modules/lib; - - cudaFixups = import ../development/cuda-modules/fixups; + # Top-level fix-point used in `cudaPackages`' internals + _cuda = import ../development/cuda-modules/_cuda; cudaPackages_11_0 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.0"; }; cudaPackages_11_1 = callPackage ./cuda-packages.nix { cudaMajorMinorVersion = "11.1"; }; diff --git a/pkgs/top-level/cuda-packages.nix b/pkgs/top-level/cuda-packages.nix index 5870d383291e..53dd4b6029ea 100644 --- a/pkgs/top-level/cuda-packages.nix +++ b/pkgs/top-level/cuda-packages.nix @@ -22,7 +22,7 @@ # I've (@connorbaker) attempted to do that, though I'm unsure of how this will interact with overrides. { config, - cudaLib, + _cuda, cudaMajorMinorVersion, lib, newScope, @@ -38,25 +38,28 @@ let versions ; + cudaLib = _cuda.lib; + # Since Jetson capabilities are never built by default, we can check if any of them were requested # through final.config.cudaCapabilities and use that to determine if we should change some manifest versions. # Copied from backendStdenv. jetsonCudaCapabilities = lib.filter ( - cudaCapability: cudaLib.data.cudaCapabilityToInfo.${cudaCapability}.isJetson - ) cudaLib.data.allSortedCudaCapabilities; + cudaCapability: _cuda.db.cudaCapabilityToInfo.${cudaCapability}.isJetson + ) _cuda.db.allSortedCudaCapabilities; hasJetsonCudaCapability = lib.intersectLists jetsonCudaCapabilities (config.cudaCapabilities or [ ]) != [ ]; - redistSystem = cudaLib.utils.getRedistSystem hasJetsonCudaCapability stdenv.hostPlatform.system; + redistSystem = _cuda.lib.getRedistSystem hasJetsonCudaCapability stdenv.hostPlatform.system; passthruFunction = final: { # NOTE: - # It is important that cudaLib and cudaFixups are not part of the package set fixed-point. As described by + # It is important that _cuda is not part of the package set fixed-point. As described by # @SomeoneSerge: # > The layering should be: configuration -> (identifies/is part of) cudaPackages -> (is built using) cudaLib. # > No arrows should point in the reverse directions. # That is to say that cudaLib should only know about package sets and configurations, because it implements # functionality for interpreting configurations, resolving them against data, and constructing package sets. # This decision is driven both by a separation of concerns and by "NAMESET STRICTNESS" (see above). + # Also see the comment in `pkgs/top-level/all-packages.nix` about the `_cuda` attribute. inherit cudaMajorMinorVersion; @@ -77,17 +80,17 @@ let }; flags = - cudaLib.utils.formatCapabilities { + cudaLib.formatCapabilities { inherit (final.backendStdenv) cudaCapabilities cudaForwardCompat; - inherit (cudaLib.data) cudaCapabilityToInfo; + inherit (_cuda.db) cudaCapabilityToInfo; } # TODO(@connorbaker): Enable the corresponding warnings in `../development/cuda-modules/aliases.nix` after some # time to allow users to migrate to cudaLib and backendStdenv. // { - inherit (cudaLib.utils) dropDots; + inherit (cudaLib) dropDots; cudaComputeCapabilityToName = - cudaCapability: cudaLib.data.cudaCapabilityToInfo.${cudaCapability}.archName; - dropDot = cudaLib.utils.dropDots; + cudaCapability: _cuda.db.cudaCapabilityToInfo.${cudaCapability}.archName; + dropDot = cudaLib.dropDots; isJetsonBuild = final.backendStdenv.hasJetsonCudaCapability; }; diff --git a/pkgs/top-level/release-cuda.nix b/pkgs/top-level/release-cuda.nix index 68bec74c9530..76e813c51427 100644 --- a/pkgs/top-level/release-cuda.nix +++ b/pkgs/top-level/release-cuda.nix @@ -14,7 +14,7 @@ let lib = import ../../lib; - cudaLib = import ../development/cuda-modules/lib; + inherit (import ../development/cuda-modules/_cuda) cudaLib; in { @@ -27,7 +27,7 @@ in # Attributes passed to nixpkgs. nixpkgsArgs ? { config = { - allowUnfreePredicate = cudaLib.utils.allowUnfreeCudaPredicate; + allowUnfreePredicate = cudaLib.allowUnfreeCudaPredicate; "${variant}Support" = true; inHydra = true; From fb1e54cf601f4c4c760950f6db517324f9a07a2b Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 15:06:36 +0000 Subject: [PATCH 183/220] candy-icons: 0-unstable-2025-05-08 -> 0-unstable-2025-05-24 --- pkgs/by-name/ca/candy-icons/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ca/candy-icons/package.nix b/pkgs/by-name/ca/candy-icons/package.nix index 08be228942e6..0dc9fedfe78c 100644 --- a/pkgs/by-name/ca/candy-icons/package.nix +++ b/pkgs/by-name/ca/candy-icons/package.nix @@ -8,13 +8,13 @@ stdenvNoCC.mkDerivation { pname = "candy-icons"; - version = "0-unstable-2025-05-08"; + version = "0-unstable-2025-05-24"; src = fetchFromGitHub { owner = "EliverLara"; repo = "candy-icons"; - rev = "47abaeba8de75a1805b10f4fc8d698c6e8c614c9"; - hash = "sha256-COC926EPA7w1eUnXq40iFuANUF3Rch83e/BekBsHNFo="; + rev = "b099e7f437da41f65ffb710d801471e2f813f1b2"; + hash = "sha256-gy58GQMoYOFXbvXoKELUydg/X/B8BJ6hIbcOl5com1E="; }; nativeBuildInputs = [ gtk3 ]; From 5e4eddb2f276552a6049499d87e8897a0bd127e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Tue, 27 May 2025 17:18:43 +0200 Subject: [PATCH 184/220] vaultwarden: 1.33.2 -> 1.34.1 Diff: https://github.com/dani-garcia/vaultwarden/compare/1.33.2...1.34.1 Changelog: https://github.com/dani-garcia/vaultwarden/releases/tag/1.34.1 --- pkgs/by-name/va/vaultwarden/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/va/vaultwarden/package.nix b/pkgs/by-name/va/vaultwarden/package.nix index 0eba9e6e26c6..077791d40554 100644 --- a/pkgs/by-name/va/vaultwarden/package.nix +++ b/pkgs/by-name/va/vaultwarden/package.nix @@ -19,17 +19,17 @@ in rustPlatform.buildRustPackage rec { pname = "vaultwarden"; - version = "1.33.2"; + version = "1.34.1"; src = fetchFromGitHub { owner = "dani-garcia"; repo = "vaultwarden"; rev = version; - hash = "sha256-Lu3/qVTi5Eedcm+3XlHAAJ1nPHm9hW4HZncQKmzDdoo="; + hash = "sha256-SVEQX+uAYb4/qFQZRm2khOi8ti76v3F5lRnUgoHk8wA="; }; useFetchCargoVendor = true; - cargoHash = "sha256-T/ehLSPJmEuQYhotK12iqXQSe5Ke8+dkr9PVDe4Kmis="; + cargoHash = "sha256-Or259iQP89Ptf/XHpkHD08VDyCk/nQcFlyoKRUUQKt0="; # used for "Server Installed" version in admin panel env.VW_VERSION = version; From 485e4e08cd9f7959430d7544b4cfcda46f982a87 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 15:34:15 +0000 Subject: [PATCH 185/220] containerd: 2.1.0 -> 2.1.1 --- pkgs/by-name/co/containerd/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/co/containerd/package.nix b/pkgs/by-name/co/containerd/package.nix index 3d2016a256cd..555f0bc2fe80 100644 --- a/pkgs/by-name/co/containerd/package.nix +++ b/pkgs/by-name/co/containerd/package.nix @@ -16,7 +16,7 @@ buildGoModule rec { pname = "containerd"; - version = "2.1.0"; + version = "2.1.1"; outputs = [ "out" @@ -27,7 +27,7 @@ buildGoModule rec { owner = "containerd"; repo = "containerd"; tag = "v${version}"; - hash = "sha256-5Fd9LrpJUf5MEtfQaRM6zo5C8RUsOasR2NHCDj8vMBk="; + hash = "sha256-ZqQX+bogzAsMvqYNKyWvHF2jdPOIhNQDizKEDbcbmOg="; }; postPatch = "patchShebangs ."; From 4fb8ed7dfef13afa8efce195c5fd4460db5f8787 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alejandro=20S=C3=A1nchez=20Medina?= Date: Tue, 27 May 2025 00:29:06 +0200 Subject: [PATCH 186/220] doc/nrd: add example of multipage rendering --- pkgs/by-name/ni/nixos-render-docs/README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkgs/by-name/ni/nixos-render-docs/README.md b/pkgs/by-name/ni/nixos-render-docs/README.md index 5ef0faebcd79..a27e9fdbc9eb 100644 --- a/pkgs/by-name/ni/nixos-render-docs/README.md +++ b/pkgs/by-name/ni/nixos-render-docs/README.md @@ -75,3 +75,17 @@ In case this identifier is renamed, the mapping would change into: ] } ``` + +## Rendering multiple pages + +The `include` directive accepts an argument `into-file` to specify the file into which the imported markdown should be rendered to. We can use this argument to set up multipage rendering of the manuals. + +For example + +~~~ +```{=include=} appendix html:into-file=//release-notes.html +release-notes/release-notes.md +``` +~~~ + +will render the release notes into a `release-notes.html` file, instead of making it a section within the default `index.html`. From 6bbaca3b7217134f7a194ad8f7b55389c926f91e Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 15:50:17 +0000 Subject: [PATCH 187/220] python3Packages.oelint-data: 1.0.14 -> 1.0.15 --- pkgs/development/python-modules/oelint-data/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/development/python-modules/oelint-data/default.nix b/pkgs/development/python-modules/oelint-data/default.nix index 580bae09ecf8..01c53b7eda78 100644 --- a/pkgs/development/python-modules/oelint-data/default.nix +++ b/pkgs/development/python-modules/oelint-data/default.nix @@ -8,14 +8,14 @@ buildPythonPackage rec { pname = "oelint-data"; - version = "1.0.14"; + version = "1.0.15"; pyproject = true; src = fetchFromGitHub { owner = "priv-kweihmann"; repo = "oelint-data"; tag = version; - hash = "sha256-4EfvRj9vtSZRtsypCshg2T7raV3a0bozZVu1IdRdtdg="; + hash = "sha256-OZtBJWjAYuwXt1ehgt6bIZPeVByXCjOcctPJGLrsxiU="; }; build-system = [ @@ -34,7 +34,7 @@ buildPythonPackage rec { meta = { description = "Data for oelint-adv"; homepage = "https://github.com/priv-kweihmann/oelint-data"; - changelog = "https://github.com/priv-kweihmann/oelint-data/releases/tag/${version}"; + changelog = "https://github.com/priv-kweihmann/oelint-data/releases/tag/${src.tag}"; license = lib.licenses.bsd2; maintainers = with lib.maintainers; [ GaetanLepage ]; }; From cf6bb7f51b70939ebc5ed26426817fa4521cca14 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 16:02:22 +0000 Subject: [PATCH 188/220] anyrun: 0-unstable-2025-04-29 -> 0-unstable-2025-05-19 --- pkgs/by-name/an/anyrun/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/an/anyrun/package.nix b/pkgs/by-name/an/anyrun/package.nix index 232cb1c23748..76061af6b4ab 100644 --- a/pkgs/by-name/an/anyrun/package.nix +++ b/pkgs/by-name/an/anyrun/package.nix @@ -17,13 +17,13 @@ rustPlatform.buildRustPackage { pname = "anyrun"; - version = "0-unstable-2025-04-29"; + version = "0-unstable-2025-05-19"; src = fetchFromGitHub { owner = "kirottu"; repo = "anyrun"; - rev = "005333a60c03cf58e0a59b03e76989441276e88b"; - hash = "sha256-0zJs4J4w1jG83hByNJ+WxANHW7sLzMdvA408LDCCnTY="; + rev = "54b462b87129cf059a348fc3a6cc170b9714e0e7"; + hash = "sha256-7VcdMOgQ/PRLr0bnJwNWZX7asrWbRJlLFw21xffm6g8="; }; useFetchCargoVendor = true; From deea624844d6c117606bc751fdc342a91686af77 Mon Sep 17 00:00:00 2001 From: Defelo Date: Tue, 27 May 2025 15:58:39 +0000 Subject: [PATCH 189/220] glitchtip: 5.0.1 -> 5.0.4 --- pkgs/by-name/gl/glitchtip/frontend.nix | 6 +++--- pkgs/by-name/gl/glitchtip/package.nix | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkgs/by-name/gl/glitchtip/frontend.nix b/pkgs/by-name/gl/glitchtip/frontend.nix index 2f075b546bde..6b536f6b6425 100644 --- a/pkgs/by-name/gl/glitchtip/frontend.nix +++ b/pkgs/by-name/gl/glitchtip/frontend.nix @@ -9,18 +9,18 @@ buildNpmPackage (finalAttrs: { pname = "glitchtip-frontend"; - version = "5.0.1"; + version = "5.0.4"; src = fetchFromGitLab { owner = "glitchtip"; repo = "glitchtip-frontend"; tag = "v${finalAttrs.version}"; - hash = "sha256-mqwPCp7C5n2fOE8kgUnW3SYuuaY8ZkJtuhYXP4HevnM="; + hash = "sha256-2XZCIIWQAM2Nk8/JTs5MzUJJOvJS+wrsa2m/XiC9FHM="; }; npmDeps = fetchNpmDeps { inherit (finalAttrs) src; - hash = "sha256-Jzwarti+WwKecWn3fPcF9LV+mbU22rgiTP7mslyoqRk="; + hash = "sha256-iJFEeUaPP6ZnntoZ2X0TyR6f923zPuzzZNW/zkd8M7E="; }; postPatch = '' diff --git a/pkgs/by-name/gl/glitchtip/package.nix b/pkgs/by-name/gl/glitchtip/package.nix index b39900869faa..8de4566399f5 100644 --- a/pkgs/by-name/gl/glitchtip/package.nix +++ b/pkgs/by-name/gl/glitchtip/package.nix @@ -102,14 +102,14 @@ in stdenv.mkDerivation (finalAttrs: { pname = "glitchtip"; - version = "5.0.1"; + version = "5.0.4"; pyproject = true; src = fetchFromGitLab { owner = "glitchtip"; repo = "glitchtip-backend"; tag = "v${finalAttrs.version}"; - hash = "sha256-vfsuJn6lpaesK40nqCdJMCDiaaqS1EdZdvgmy9jPuo8="; + hash = "sha256-ihefyunZc191w9cn7iSqblNA4V4hELi9jwxfFrjPvu0="; }; propagatedBuildInputs = pythonPackages; From 9fccd46b6ab5fdbee160e58eae3e1bd18226226e Mon Sep 17 00:00:00 2001 From: Defelo Date: Tue, 27 May 2025 15:59:54 +0000 Subject: [PATCH 190/220] clorinde: 0.15.0 -> 0.15.1 --- pkgs/by-name/cl/clorinde/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/cl/clorinde/package.nix b/pkgs/by-name/cl/clorinde/package.nix index c6a4052d8999..981fd2cf1b14 100644 --- a/pkgs/by-name/cl/clorinde/package.nix +++ b/pkgs/by-name/cl/clorinde/package.nix @@ -8,17 +8,17 @@ rustPlatform.buildRustPackage (finalAttrs: { pname = "clorinde"; - version = "0.15.0"; + version = "0.15.1"; src = fetchFromGitHub { owner = "halcyonnouveau"; repo = "clorinde"; tag = "clorinde-v${finalAttrs.version}"; - hash = "sha256-PUAySbgmbulSlkabABiSFeDDa+o0tQ2uQtiQGSqO1/w="; + hash = "sha256-Ynz1pdgckQzMLuUJUGSzNRNwWZKrEZuYgrrT/BxAxzc="; }; useFetchCargoVendor = true; - cargoHash = "sha256-zSeAD3MBflO+lhuLxH57YhR6wxsqZn62XQ0dgImdNLE="; + cargoHash = "sha256-g3pWvoTq1DlKlIDJq79IJrvDiLR0HZRPIt4K1YUPsvM="; cargoBuildFlags = [ "--package=clorinde" ]; From a7485af7d504540d9c82ef3e9ac1da1c8f75280a Mon Sep 17 00:00:00 2001 From: Jan Tojnar Date: Sat, 17 May 2025 11:24:31 +0200 Subject: [PATCH 191/220] nixos/telepathy: Remove GNOME remnants GNOME Shell 46 dropped the telepathy support so we no longer need to add the typelib to session path. https://gitlab.gnome.org/GNOME/gnome-shell/-/commit/c5ec3e45e4562246ba65ac2ca19eadfdfee627ca Looking at Debian code search, no packages other than Polari should need the typelib from path anyway, and Polari already gets it from a wrapper: https://codesearch.debian.net/search?q=TelepathyGLib+-package%3Atelepathy-glib+-package%3Asugar+-path%3Avala&literal=0 Also unmaintain as it is no longer used by GNOME. The daemon components are needed by lomiri and polari: https://codesearch.debian.net/search?q=org.freedesktop.Telepathy.MissionControl5%7Corg.freedesktop.Telepathy.AccountManager%7C%5Cbmc-tool%5Cb%7Cmc-wait-for-name&literal=0 --- nixos/modules/services/desktops/telepathy.nix | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/nixos/modules/services/desktops/telepathy.nix b/nixos/modules/services/desktops/telepathy.nix index cfab2c3a413f..b60c7e0199c5 100644 --- a/nixos/modules/services/desktops/telepathy.nix +++ b/nixos/modules/services/desktops/telepathy.nix @@ -8,7 +8,7 @@ { meta = { - maintainers = lib.teams.gnome.members; + maintainers = [ ]; }; ###### interface @@ -37,11 +37,6 @@ environment.systemPackages = [ pkgs.telepathy-mission-control ]; services.dbus.packages = [ pkgs.telepathy-mission-control ]; - - # Enable runtime optional telepathy in gnome-shell - services.xserver.desktopManager.gnome.sessionPath = with pkgs; [ - telepathy-glib - ]; }; } From b117a78eb86fbe52a998ce796d61808bfb748f65 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 16:56:34 +0000 Subject: [PATCH 192/220] gat: 0.23.2 -> 0.24.0 --- pkgs/by-name/ga/gat/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ga/gat/package.nix b/pkgs/by-name/ga/gat/package.nix index c1796663160b..4375315e49b7 100644 --- a/pkgs/by-name/ga/gat/package.nix +++ b/pkgs/by-name/ga/gat/package.nix @@ -6,16 +6,16 @@ buildGoModule rec { pname = "gat"; - version = "0.23.2"; + version = "0.24.0"; src = fetchFromGitHub { owner = "koki-develop"; repo = "gat"; tag = "v${version}"; - hash = "sha256-vJREExCJ+JvPYxNeJWQ6A4LRB2viEisnXrRM6yDGOc4="; + hash = "sha256-2AIRFG4YmEr1ZQ6JjhmRmOc5/BfTbeBd4azy1xQQr3Q="; }; - vendorHash = "sha256-yGTzDlu9l1Vfnt9Za4Axh7nFWe5CmW2kqssa+51bA3w="; + vendorHash = "sha256-9LHTyIL0+aJAUJsn3m1SUrZYM9JLo70JY0zb1oVFJFo="; env.CGO_ENABLED = 0; From 0a39769102627e8742c3e423a4e414fdb4bbea11 Mon Sep 17 00:00:00 2001 From: nicoo Date: Tue, 27 May 2025 17:36:24 +0000 Subject: [PATCH 193/220] lib.maintainers.nicoo: update GH account name --- maintainers/maintainer-list.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/maintainer-list.nix b/maintainers/maintainer-list.nix index bc4380ec360c..c229d28adc3f 100644 --- a/maintainers/maintainer-list.nix +++ b/maintainers/maintainer-list.nix @@ -17609,7 +17609,7 @@ }; nicoo = { email = "nicoo@debian.org"; - github = "nbraud"; + github = "nicoonoclaste"; githubId = 1155801; name = "nicoo"; keys = [ { fingerprint = "E44E 9EA5 4B8E 256A FB73 49D3 EC9D 3708 72BC 7A8C"; } ]; From c839ac340b7890d9e617eeee7bd143d3d77ae0b6 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 17:48:49 +0000 Subject: [PATCH 194/220] teams-for-linux: 2.0.13 -> 2.0.14 --- pkgs/by-name/te/teams-for-linux/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/te/teams-for-linux/package.nix b/pkgs/by-name/te/teams-for-linux/package.nix index f4e1769e8caf..434226ffef39 100644 --- a/pkgs/by-name/te/teams-for-linux/package.nix +++ b/pkgs/by-name/te/teams-for-linux/package.nix @@ -16,16 +16,16 @@ buildNpmPackage rec { pname = "teams-for-linux"; - version = "2.0.13"; + version = "2.0.14"; src = fetchFromGitHub { owner = "IsmaelMartinez"; repo = "teams-for-linux"; tag = "v${version}"; - hash = "sha256-xkVJ8PrW2huf3oNVSYySQnzKPadsYFXnrMgi+mFXBQU="; + hash = "sha256-B7u5xJHss09QOP1AHoHZWIiHyd2RBcF/XU3BB9N2C60="; }; - npmDepsHash = "sha256-jDJH/lAbmUURMkdv49S1KeE4I/MOEzK0ZDWh1sbnzXY="; + npmDepsHash = "sha256-JJUkZlol09Hehxc26DMdEzdxy8Nxa16G8YdTZkHhi78="; nativeBuildInputs = [ makeWrapper From 279f4ffcc06ea833dcd834c8a6cf03c80746a2be Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 07:24:54 +0000 Subject: [PATCH 195/220] projectm-sdl-cpp: 0-unstable-2025-03-17 -> 0-unstable-2025-05-20 --- pkgs/by-name/pr/projectm-sdl-cpp/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/pr/projectm-sdl-cpp/package.nix b/pkgs/by-name/pr/projectm-sdl-cpp/package.nix index e7d9679e20cc..374ae12bcfdd 100644 --- a/pkgs/by-name/pr/projectm-sdl-cpp/package.nix +++ b/pkgs/by-name/pr/projectm-sdl-cpp/package.nix @@ -14,13 +14,13 @@ stdenv.mkDerivation { pname = "projectm-sdl-cpp"; - version = "0-unstable-2025-03-17"; + version = "0-unstable-2025-05-20"; src = fetchFromGitHub { owner = "projectM-visualizer"; repo = "frontend-sdl-cpp"; - rev = "080e48b303fe9d7eebaba6d266ddef0543bba0d3"; - hash = "sha256-5thnZhw22U2IVn6VNDEIw1fEaTnTwJv8pkgAdWdrHu8="; + rev = "85dfdda3dcb139c7e1eb47fd5e4723114e4fb86b"; + hash = "sha256-FHOncC6fhs1CPNMBGtA+QB+NHVn4QlF2Qn9NJb5j3p8="; fetchSubmodules = true; }; From d283c3b36ff4ed09cc1b275b604bddc82693f075 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Tue, 27 May 2025 19:59:28 +0200 Subject: [PATCH 196/220] maintainer/scripts/check-cherry-picks: propagate git errors instead of passing silently Bash will not propagate the exit code from a subshell within a herestring, so the script silently passes when git throws an error there. Re-arranging things a bit and an error will now be thrown. --- maintainers/scripts/check-cherry-picks.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/maintainers/scripts/check-cherry-picks.sh b/maintainers/scripts/check-cherry-picks.sh index e7ffe2bf4c73..c846fa108df2 100755 --- a/maintainers/scripts/check-cherry-picks.sh +++ b/maintainers/scripts/check-cherry-picks.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Find alleged cherry-picks -set -e +set -eo pipefail if [ $# != "2" ] ; then echo "usage: check-cherry-picks.sh base_rev head_rev" @@ -11,6 +11,10 @@ fi PICKABLE_BRANCHES=${PICKABLE_BRANCHES:-master staging release-??.?? staging-??.??} problem=0 +commits="$(git rev-list \ + -E -i --grep="cherry.*[0-9a-f]{40}" --reverse \ + "$1..$2")" + while read new_commit_sha ; do if [ -z "$new_commit_sha" ] ; then continue # skip empty lines @@ -88,10 +92,6 @@ while read new_commit_sha ; do echo "$original_commit_sha not found in any pickable branch" problem=1 -done <<< "$( - git rev-list \ - -E -i --grep="cherry.*[0-9a-f]{40}" --reverse \ - "$1..$2" -)" +done <<< "$commits" exit $problem From 642de212a6ec1d4e76f7cca957163bbeaef85e88 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Tue, 27 May 2025 20:00:24 +0200 Subject: [PATCH 197/220] maintainer/scripts/check-cherry-picks: fix calling from outside nixpkgs The CI job calls this as trusted/maintainers/..., i.e. with a working directory outside the checkout. The git commands inside the script assume to be inside the checkout, though, so let's force that. --- maintainers/scripts/check-cherry-picks.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/maintainers/scripts/check-cherry-picks.sh b/maintainers/scripts/check-cherry-picks.sh index c846fa108df2..0e02c709d8f5 100755 --- a/maintainers/scripts/check-cherry-picks.sh +++ b/maintainers/scripts/check-cherry-picks.sh @@ -8,6 +8,9 @@ if [ $# != "2" ] ; then exit 2 fi +# Make sure we are inside the nixpkgs repo, even when called from outside +cd "$(dirname "${BASH_SOURCE[0]}")" + PICKABLE_BRANCHES=${PICKABLE_BRANCHES:-master staging release-??.?? staging-??.??} problem=0 From f648164c03c55baf0a649585686c9e1dd4584487 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 18:57:12 +0000 Subject: [PATCH 198/220] simdutf: 7.1.0 -> 7.2.0 --- pkgs/by-name/si/simdutf/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/si/simdutf/package.nix b/pkgs/by-name/si/simdutf/package.nix index 9351d0c02835..50183972f8fe 100644 --- a/pkgs/by-name/si/simdutf/package.nix +++ b/pkgs/by-name/si/simdutf/package.nix @@ -8,13 +8,13 @@ stdenv.mkDerivation (finalAttrs: { pname = "simdutf"; - version = "7.1.0"; + version = "7.2.0"; src = fetchFromGitHub { owner = "simdutf"; repo = "simdutf"; rev = "v${finalAttrs.version}"; - hash = "sha256-2nDTARUH5gb/pP4WnCfSPzreAThMAuAuwRT7y2aumBA="; + hash = "sha256-ZfC0k1Z7nOeauHYTKjMt73qhoTn4c15dZWvICeuYhM8="; }; # Fix build on darwin From 6bd17384f1f2b44831a709cc8bdb838248d05a93 Mon Sep 17 00:00:00 2001 From: DashieTM Date: Sat, 10 May 2025 12:44:25 +0200 Subject: [PATCH 199/220] vimPlugins.gh-nvim: init at 2025-01-21 --- pkgs/applications/editors/vim/plugins/generated.nix | 13 +++++++++++++ pkgs/applications/editors/vim/plugins/overrides.nix | 4 ++++ .../editors/vim/plugins/vim-plugin-names | 1 + 3 files changed, 18 insertions(+) diff --git a/pkgs/applications/editors/vim/plugins/generated.nix b/pkgs/applications/editors/vim/plugins/generated.nix index fe343e2881de..89e6fa048caa 100644 --- a/pkgs/applications/editors/vim/plugins/generated.nix +++ b/pkgs/applications/editors/vim/plugins/generated.nix @@ -5282,6 +5282,19 @@ final: prev: { meta.hydraPlatforms = [ ]; }; + gh-nvim = buildVimPlugin { + pname = "gh.nvim"; + version = "0-unstable-2025-01-21"; + src = fetchFromGitHub { + owner = "ldelossa"; + repo = "gh.nvim"; + rev = "6f367b2ab8f9d4a0a23df2b703a3f91137618387"; + hash = "sha256-XI4FVjajin0NM+OaEN+O5vmalPpOB2RII+aOERSzjJA="; + }; + meta.description = "Fully featured GitHub integration for performing code reviews in Neovim"; + meta.homepage = "https://github.com/ldelossa/gh.nvim"; + }; + git-blame-nvim = buildVimPlugin { pname = "git-blame.nvim"; version = "2025-04-12"; diff --git a/pkgs/applications/editors/vim/plugins/overrides.nix b/pkgs/applications/editors/vim/plugins/overrides.nix index 486a6d1e56d7..2f69b8c65c30 100644 --- a/pkgs/applications/editors/vim/plugins/overrides.nix +++ b/pkgs/applications/editors/vim/plugins/overrides.nix @@ -1278,6 +1278,10 @@ in configurePhase = "cd plugins/nvim"; }; + gh-nvim = super.gh-nvim.overrideAttrs { + dependencies = [ self.litee-nvim ]; + }; + gitlinker-nvim = super.gitlinker-nvim.overrideAttrs { dependencies = [ self.plenary-nvim ]; }; diff --git a/pkgs/applications/editors/vim/plugins/vim-plugin-names b/pkgs/applications/editors/vim/plugins/vim-plugin-names index 1b622615869f..ca43030ff988 100644 --- a/pkgs/applications/editors/vim/plugins/vim-plugin-names +++ b/pkgs/applications/editors/vim/plugins/vim-plugin-names @@ -403,6 +403,7 @@ https://github.com/David-Kunz/gen.nvim/,HEAD, https://github.com/jsfaint/gen_tags.vim/,, https://github.com/gentoo/gentoo-syntax/,, https://github.com/ndmitchell/ghcid/,, +https://github.com/ldelossa/gh.nvim/,, https://github.com/eagletmt/ghcmod-vim/,, https://github.com/f-person/git-blame.nvim/,, https://github.com/akinsho/git-conflict.nvim/,HEAD, From f6850bb50c1aa78b7cee981387bedc8d0af585a5 Mon Sep 17 00:00:00 2001 From: Leona Maroni Date: Tue, 27 May 2025 21:03:28 +0200 Subject: [PATCH 200/220] Revert "victoriametrics: 1.117.1 -> 1.118.0" --- pkgs/by-name/vi/victoriametrics/package.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/by-name/vi/victoriametrics/package.nix b/pkgs/by-name/vi/victoriametrics/package.nix index 5de81b3498a0..2cb333913c17 100644 --- a/pkgs/by-name/vi/victoriametrics/package.nix +++ b/pkgs/by-name/vi/victoriametrics/package.nix @@ -14,7 +14,7 @@ buildGoModule (finalAttrs: { pname = "VictoriaMetrics"; - version = "1.118.0"; + version = "1.117.1"; src = fetchFromGitHub { owner = "VictoriaMetrics"; From 6924c67f2e7d07d7663b86634c5932ea0f847a3b Mon Sep 17 00:00:00 2001 From: Robert Rose Date: Tue, 27 May 2025 21:26:14 +0200 Subject: [PATCH 201/220] k3s_1_33: 1.33.0 -> 1.33.1 https://github.com/k3s-io/k3s/releases/tag/v1.33.1%2Bk3s1 --- .../cluster/k3s/1_33/images-versions.json | 14 +++++++------- .../networking/cluster/k3s/1_33/versions.nix | 12 ++++++------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkgs/applications/networking/cluster/k3s/1_33/images-versions.json b/pkgs/applications/networking/cluster/k3s/1_33/images-versions.json index 5d212c7f9e0e..8caf711a8a74 100644 --- a/pkgs/applications/networking/cluster/k3s/1_33/images-versions.json +++ b/pkgs/applications/networking/cluster/k3s/1_33/images-versions.json @@ -1,18 +1,18 @@ { "airgap-images-amd64": { - "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.0%2Bk3s1/k3s-airgap-images-amd64.tar.zst", - "sha256": "2822af9a2341033243bd1aff7433c0670bcad4059a8d0a35fb314dba24692674" + "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.1%2Bk3s1/k3s-airgap-images-amd64.tar.zst", + "sha256": "5fd0e18b7cd7457773d30e86270a4e1caed66d2e5c1380e65b3d0375227d241c" }, "airgap-images-arm": { - "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.0%2Bk3s1/k3s-airgap-images-arm.tar.zst", - "sha256": "cf175be8674dd1812cd92930a852ef7a11572c4588180f51d5c19bdfd0a810e4" + "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.1%2Bk3s1/k3s-airgap-images-arm.tar.zst", + "sha256": "518343da6213e7edba9da2f58fcf30c268600fc8003f92f245d9818ead2db03e" }, "airgap-images-arm64": { - "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.0%2Bk3s1/k3s-airgap-images-arm64.tar.zst", - "sha256": "c97f7a86a04aad7d5dc74d7e85e0fbc097e9edf76358f8a7c2097e6ca9740211" + "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.1%2Bk3s1/k3s-airgap-images-arm64.tar.zst", + "sha256": "2ac40b650104a1dc21b4b77fccf943efca8893de0432e6cdf1e7e1534eb6f4aa" }, "images-list": { - "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.0%2Bk3s1/k3s-images.txt", + "url": "https://github.com/k3s-io/k3s/releases/download/v1.33.1%2Bk3s1/k3s-images.txt", "sha256": "aa8e10337aef453cb17e6408dbaec9eb2da409ca6ba1f8bc7332fcef97fdaf3a" } } diff --git a/pkgs/applications/networking/cluster/k3s/1_33/versions.nix b/pkgs/applications/networking/cluster/k3s/1_33/versions.nix index be10b6dd7b6a..9665eb3c02af 100644 --- a/pkgs/applications/networking/cluster/k3s/1_33/versions.nix +++ b/pkgs/applications/networking/cluster/k3s/1_33/versions.nix @@ -1,15 +1,15 @@ { - k3sVersion = "1.33.0+k3s1"; - k3sCommit = "63ab8e534cdfce2a60f4b016dfedb4f8d74ae8ec"; - k3sRepoSha256 = "1ysfzb4216qk9gjmp2zp103xzjgz8irc7h9m4yp041gkvffa7pyg"; - k3sVendorHash = "sha256-eVMCrOAOCB7saYuxQQUUrmRHT+ZURXESTI6ZRKSDGZs="; + k3sVersion = "1.33.1+k3s1"; + k3sCommit = "99d91538b1327da933356c318dc8040335fbb66c"; + k3sRepoSha256 = "1ncj30nid3x96irw2raxf1naa2jap1d0s1ygxsvfckblbb6rjnmx"; + k3sVendorHash = "sha256-jrPVY+FVZV9wlbik/I35W8ChcLrHlYbLAwUYU16mJLM="; chartVersions = import ./chart-versions.nix; imagesVersions = builtins.fromJSON (builtins.readFile ./images-versions.json); k3sRootVersion = "0.14.1"; k3sRootSha256 = "0svbi42agqxqh5q2ri7xmaw2a2c70s7q5y587ls0qkflw5vx4sl7"; k3sCNIVersion = "1.6.0-k3s1"; k3sCNISha256 = "0g7zczvwba5xqawk37b0v96xysdwanyf1grxn3l3lhxsgjjsmkd7"; - containerdVersion = "2.0.4-k3s4"; - containerdSha256 = "05j5jyjnirks11z2930w4k5ij015hsm4pd2wxgj2531fyiy98azl"; + containerdVersion = "2.0.5-k3s1"; + containerdSha256 = "1c3hv22zx8y94zwmv5r59bnwgqyhxd10zkinm0jrcvny32ijqdfj"; criCtlVersion = "1.31.0-k3s2"; } From 0e64b8d5672da476fed52995e35c46156ce18d18 Mon Sep 17 00:00:00 2001 From: wrvsrx Date: Wed, 28 May 2025 02:24:25 +0800 Subject: [PATCH 202/220] python3Packages.starlette-compress: init at 0.16.0 --- .../starlette-compress/default.nix | 53 +++++++++++++++++++ pkgs/top-level/python-packages.nix | 2 + 2 files changed, 55 insertions(+) create mode 100644 pkgs/development/python-modules/starlette-compress/default.nix diff --git a/pkgs/development/python-modules/starlette-compress/default.nix b/pkgs/development/python-modules/starlette-compress/default.nix new file mode 100644 index 000000000000..970d665ae9a4 --- /dev/null +++ b/pkgs/development/python-modules/starlette-compress/default.nix @@ -0,0 +1,53 @@ +{ + lib, + buildPythonPackage, + fetchFromGitHub, + hatchling, + brotli, + brotlicffi, + starlette, + zstandard, + pytestCheckHook, + httpx, + trio, +}: + +buildPythonPackage rec { + pname = "starlette-compress"; + version = "1.6.0"; + pyproject = true; + + src = fetchFromGitHub { + owner = "Zaczero"; + repo = "starlette-compress"; + tag = version; + hash = "sha256-VEVPbCGE4BQo/0t/P785TyMHZGSKCicV6H0LbBsv8uo="; + }; + + build-system = [ hatchling ]; + + dependencies = [ + brotli + brotlicffi + starlette + zstandard + ]; + + checkInputs = [ + httpx + trio + ]; + + nativeCheckInputs = [ + pytestCheckHook + ]; + + pythonImportsCheck = [ "starlette_compress" ]; + + meta = { + description = "Compression middleware for Starlette - supporting ZStd, Brotli, and GZip"; + homepage = "https://pypi.org/p/starlette-compress"; + license = lib.licenses.bsd0; + maintainers = with lib.maintainers; [ wrvsrx ]; + }; +} diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index 16ed1fa0480e..322f955fc066 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -16726,6 +16726,8 @@ self: super: with self; { starlette-admin = callPackage ../development/python-modules/starlette-admin { }; + starlette-compress = callPackage ../development/python-modules/starlette-compress { }; + starlette-context = callPackage ../development/python-modules/starlette-context { }; starlette-wtf = callPackage ../development/python-modules/starlette-wtf { }; From a7c5e8eb5cdb931727edabb401c6630ca4523414 Mon Sep 17 00:00:00 2001 From: Gaetan Lepage Date: Tue, 27 May 2025 21:29:49 +0200 Subject: [PATCH 203/220] python312Packages.devito: 4.8.16 -> 4.8.17 Diff: https://github.com/devitocodes/devito/compare/refs/tags/v4.8.16...refs/tags/v4.8.17 Changelog: https://github.com/devitocodes/devito/releases/tag/v4.8.17 --- pkgs/development/python-modules/devito/default.nix | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pkgs/development/python-modules/devito/default.nix b/pkgs/development/python-modules/devito/default.nix index 3503ea42f522..34a18bca77af 100644 --- a/pkgs/development/python-modules/devito/default.nix +++ b/pkgs/development/python-modules/devito/default.nix @@ -6,11 +6,11 @@ # build-system setuptools, + setuptools-scm, # dependencies anytree, cgen, - click, cloudpickle, codepy, llvmPackages, @@ -22,6 +22,7 @@ sympy, # tests + click, gcc, matplotlib, pytest-xdist, @@ -31,26 +32,28 @@ buildPythonPackage rec { pname = "devito"; - version = "4.8.16"; + version = "4.8.17"; pyproject = true; src = fetchFromGitHub { owner = "devitocodes"; repo = "devito"; tag = "v${version}"; - hash = "sha256-yG4nJLnzIrITRMbtT/9UxDB0xvRiwnQMW13Z9HNQIq8="; + hash = "sha256-1aZSL23yNi/X9hnYKpIvgEOjEZtvPgTo5Pi5kKOWJhQ="; }; pythonRemoveDeps = [ "pip" ]; pythonRelaxDeps = true; - build-system = [ setuptools ]; + build-system = [ + setuptools + setuptools-scm + ]; dependencies = [ anytree cgen - click cloudpickle codepy multidict @@ -62,6 +65,7 @@ buildPythonPackage rec { ] ++ lib.optionals stdenv.cc.isClang [ llvmPackages.openmp ]; nativeCheckInputs = [ + click gcc matplotlib pytest-xdist From 4d5d4ef221f6da06c8924ad26bd5099e8a85df15 Mon Sep 17 00:00:00 2001 From: wrvsrx Date: Wed, 28 May 2025 02:25:51 +0800 Subject: [PATCH 204/220] open-webui: 0.6.10 -> 0.6.11 --- pkgs/by-name/op/open-webui/package.nix | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/pkgs/by-name/op/open-webui/package.nix b/pkgs/by-name/op/open-webui/package.nix index 036a1767ed82..207f7c8c9e6c 100644 --- a/pkgs/by-name/op/open-webui/package.nix +++ b/pkgs/by-name/op/open-webui/package.nix @@ -2,7 +2,6 @@ lib, buildNpmPackage, fetchFromGitHub, - fetchpatch2, python3Packages, nixosTests, fetchurl, @@ -10,28 +9,19 @@ }: let pname = "open-webui"; - version = "0.6.10"; + version = "0.6.11"; src = fetchFromGitHub { owner = "open-webui"; repo = "open-webui"; tag = "v${version}"; - hash = "sha256-OZPZlF6tXzfuFU8/ZavE67E8+XdRu+7oCA1eD0EA9fg="; + hash = "sha256-G5rbSClztrphQwVYoBvfFTZ/dPHCuxL1PdZZhSy2RbQ="; }; frontend = buildNpmPackage rec { pname = "open-webui-frontend"; inherit version src; - patches = [ - # Git is not available in the sandbox - # Remove this patch at the next release - (fetchpatch2 { - url = "https://github.com/open-webui/open-webui/commit/ed0659aca60eedadadba4362b309015b4a8368c6.patch"; - hash = "sha256-lTzCdAk9gagIfN5Ld1tCS3gp/oVm4+CRy/lD42702WM="; - }) - ]; - # the backend for run-on-client-browser python execution # must match lock file in open-webui # TODO: should we automate this? @@ -42,7 +32,7 @@ let url = "https://github.com/pyodide/pyodide/releases/download/${pyodideVersion}/pyodide-${pyodideVersion}.tar.bz2"; }; - npmDepsHash = "sha256-F/xum76SHFwX/77kPHTFayJ00wv6ZWE09hw8taUbMMQ="; + npmDepsHash = "sha256-qQzAehIXMWyXmz7jT0aU6zsSXi3WVcOjchcA+3M7tAU="; # Disabling `pyodide:fetch` as it downloads packages during `buildPhase` # Until this is solved, running python packages from the browser will not work. @@ -197,6 +187,7 @@ python3Packages.buildPythonApplication rec { sentence-transformers sentencepiece soundfile + starlette-compress tencentcloud-sdk-python tiktoken transformers From 6ee1ed9b82bf4a86a63fb4a213dbafb7f9049a38 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 20:00:07 +0000 Subject: [PATCH 205/220] gamescope: 3.16.9 -> 3.16.10 --- pkgs/by-name/ga/gamescope/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ga/gamescope/package.nix b/pkgs/by-name/ga/gamescope/package.nix index 57a6c6ccc575..26c47796dcdc 100644 --- a/pkgs/by-name/ga/gamescope/package.nix +++ b/pkgs/by-name/ga/gamescope/package.nix @@ -49,14 +49,14 @@ let in stdenv.mkDerivation (finalAttrs: { pname = "gamescope"; - version = "3.16.9"; + version = "3.16.10"; src = fetchFromGitHub { owner = "ValveSoftware"; repo = "gamescope"; tag = finalAttrs.version; fetchSubmodules = true; - hash = "sha256-Dw9EErOINGoOlnNqroKR+fbRfMGL7Q13gP3E5iw4RhU="; + hash = "sha256-MZhIsnSp2uGMQds5zEhF8WZgGNHDGH+3A2TGjB6Vn10="; }; patches = [ From 33db1f0871e41a986ba90bfd393eb83ba10ab156 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 20:56:29 +0000 Subject: [PATCH 206/220] legendary-heroic: 0.20.36 -> 0.20.37 --- pkgs/by-name/le/legendary-heroic/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/le/legendary-heroic/package.nix b/pkgs/by-name/le/legendary-heroic/package.nix index c0fbd88df158..8aa57c2458e0 100644 --- a/pkgs/by-name/le/legendary-heroic/package.nix +++ b/pkgs/by-name/le/legendary-heroic/package.nix @@ -5,7 +5,7 @@ python3Packages, }: let - version = "0.20.36"; + version = "0.20.37"; in python3Packages.buildPythonApplication { pname = "legendary-heroic"; @@ -15,7 +15,7 @@ python3Packages.buildPythonApplication { owner = "Heroic-Games-Launcher"; repo = "legendary"; rev = version; - sha256 = "sha256-+aywgd5RZfkmVuA0MaF2/Ie4a5If/zQxvVCcTfGpQpE="; + sha256 = "sha256-mOys7lOPrrzBUBMIM/JvKygFQ/qIGD68BDNigk5BCIo="; }; propagatedBuildInputs = with python3Packages; [ From 37ce8aa45973884653284335d79b9b20b7e5d142 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Tue, 27 May 2025 22:23:02 +0100 Subject: [PATCH 207/220] gcc15: apply forgotten no-sys-dirs and mangle-NIX_STORE patches --- pkgs/development/compilers/gcc/patches/default.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkgs/development/compilers/gcc/patches/default.nix b/pkgs/development/compilers/gcc/patches/default.nix index 598c42a48d3e..ee1adfd43e0c 100644 --- a/pkgs/development/compilers/gcc/patches/default.nix +++ b/pkgs/development/compilers/gcc/patches/default.nix @@ -67,6 +67,10 @@ in [ (if atLeast12 then ./gcc-12-no-sys-dirs.patch else ./no-sys-dirs.patch) ] ++ ( { + "15" = [ + ./13/no-sys-dirs-riscv.patch + ./13/mangle-NIX_STORE-in-__FILE__.patch + ]; "14" = [ ./13/no-sys-dirs-riscv.patch ./13/mangle-NIX_STORE-in-__FILE__.patch From f870c6ccc8951fc48aeb293cf3e98ade6ac42668 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Forsman?= Date: Tue, 27 May 2025 17:28:42 +0200 Subject: [PATCH 208/220] libfaketime: run tests --- pkgs/by-name/li/libfaketime/package.nix | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkgs/by-name/li/libfaketime/package.nix b/pkgs/by-name/li/libfaketime/package.nix index 9eba7b189c1f..4a48a3881806 100644 --- a/pkgs/by-name/li/libfaketime/package.nix +++ b/pkgs/by-name/li/libfaketime/package.nix @@ -53,6 +53,8 @@ stdenv.mkDerivation rec { nativeCheckInputs = [ perl ]; + doCheck = true; + meta = with lib; { description = "Report faked system time to programs without having to change the system-wide time"; homepage = "https://github.com/wolfcw/libfaketime/"; From 1eb2ed4d3b1b2f687516d4c3c840a1827d8e6da0 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 27 May 2025 21:58:05 +0000 Subject: [PATCH 209/220] simplotask: 1.17.0 -> 1.17.1 --- pkgs/by-name/si/simplotask/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/si/simplotask/package.nix b/pkgs/by-name/si/simplotask/package.nix index 67cb01ac86c7..8fd2638c5d2a 100644 --- a/pkgs/by-name/si/simplotask/package.nix +++ b/pkgs/by-name/si/simplotask/package.nix @@ -7,13 +7,13 @@ buildGoModule rec { pname = "simplotask"; - version = "1.17.0"; + version = "1.17.1"; src = fetchFromGitHub { owner = "umputun"; repo = "spot"; rev = "v${version}"; - hash = "sha256-uMS2Nf5Brx4hXMGMG3vTU3V2y83gLPb8vau7GA+DGak="; + hash = "sha256-6SWWf1ZprCZPXLvUtIln1+TrOztn7WWkN8o/fQXau5I="; }; vendorHash = null; From 9c6c3d6043f8be547a7669a598f32c2d7e10ce40 Mon Sep 17 00:00:00 2001 From: Sarah Clark Date: Sun, 25 May 2025 12:43:06 -0700 Subject: [PATCH 210/220] python3Packages.pytricia: remove Abandoned. Last change was to the README in 2022. --- .../python-modules/pytricia/default.nix | 25 ------------------- pkgs/top-level/python-aliases.nix | 1 + pkgs/top-level/python-packages.nix | 2 -- 3 files changed, 1 insertion(+), 27 deletions(-) delete mode 100644 pkgs/development/python-modules/pytricia/default.nix diff --git a/pkgs/development/python-modules/pytricia/default.nix b/pkgs/development/python-modules/pytricia/default.nix deleted file mode 100644 index d2caf52c6010..000000000000 --- a/pkgs/development/python-modules/pytricia/default.nix +++ /dev/null @@ -1,25 +0,0 @@ -{ - lib, - buildPythonPackage, - fetchFromGitHub, -}: - -buildPythonPackage { - pname = "pytricia"; - version = "unstable-2019-01-16"; - format = "setuptools"; - - src = fetchFromGitHub { - owner = "jsommers"; - repo = "pytricia"; - rev = "4ba88f68c3125f789ca8cd1cfae156e1464bde87"; - sha256 = "0qp5774xkm700g35k5c76pck8pdzqlyzbaqgrz76a1yh67s2ri8h"; - }; - - meta = with lib; { - description = "Library for fast IP address lookup in Python"; - homepage = "https://github.com/jsommers/pytricia"; - license = with licenses; [ lgpl3Plus ]; - maintainers = with maintainers; [ mkg ]; - }; -} diff --git a/pkgs/top-level/python-aliases.nix b/pkgs/top-level/python-aliases.nix index a49d67cfe7ed..deae43830fe0 100644 --- a/pkgs/top-level/python-aliases.nix +++ b/pkgs/top-level/python-aliases.nix @@ -521,6 +521,7 @@ mapAliases ({ pwndbg = throw "'pwndbg' has been removed due to dependency version incompatibilities that are infeasible to maintain in nixpkgs. Use the downstream flake that pwndbg provides instead: https://github.com/pwndbg/pwndbg"; # Added 2025-02-09 pxml = throw "pxml was removed, because it was disabled on all python version since 3.8 and last updated in 2020."; # added 2024-05-13 py3to2 = throw "py3to2 is unmaintained and source is no longer available"; # added 2024-10-23 + pytricia = throw "pytricia has been removed, since it is unmaintained"; # added 2025-05-25 py-radix = throw "py-radix has been removed, since it abandoned"; # added 2023-07-07 py_stringmatching = py-stringmatching; # added 2023-11-12 py17track = throw "py17track was removed because Home Assistant switched to pyseventeentrack"; # added 2024-08-08 diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index 322f955fc066..fbd80431a5b1 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -14520,8 +14520,6 @@ self: super: with self; { pytransportnswv2 = callPackage ../development/python-modules/pytransportnswv2 { }; - pytricia = callPackage ../development/python-modules/pytricia { }; - pytrydan = callPackage ../development/python-modules/pytrydan { }; pyttsx3 = callPackage ../development/python-modules/pyttsx3 { }; From 88fdea3a79e4b2edea6b6c1abc7a4c70890aad18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Tue, 27 May 2025 17:36:25 +0200 Subject: [PATCH 211/220] vaultwarden.webvault: 2025.1.1 -> 2025.5.0 --- pkgs/by-name/va/vaultwarden/webvault.nix | 33 +++++------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/pkgs/by-name/va/vaultwarden/webvault.nix b/pkgs/by-name/va/vaultwarden/webvault.nix index 9779d63b25b8..5e306c9860b0 100644 --- a/pkgs/by-name/va/vaultwarden/webvault.nix +++ b/pkgs/by-name/va/vaultwarden/webvault.nix @@ -2,43 +2,23 @@ lib, buildNpmPackage, fetchFromGitHub, - git, nixosTests, python3, vaultwarden, }: -let - version = "2025.1.1"; - - suffix = lib.head (lib.match "[0-9.]*([a-z]*)" version); - - bw_web_builds = fetchFromGitHub { - owner = "dani-garcia"; - repo = "bw_web_builds"; - rev = "v${version}"; - hash = "sha256-wQGpl7N0D83FrrV4T+LFe9h3n5Q/MqLbGGO2F5R9k2g="; - }; - -in buildNpmPackage rec { pname = "vaultwarden-webvault"; - inherit version; + version = "2025.5.0.0"; src = fetchFromGitHub { - owner = "bitwarden"; - repo = "clients"; - rev = "web-v${lib.removeSuffix suffix version}"; - hash = "sha256-Bq133V8CsDMnLeaKrW5JmLTGRaZVLRbp+tTgG725tqE="; + owner = "vaultwarden"; + repo = "vw_web_builds"; + tag = "v${version}"; + hash = "sha256-Z3QPKeo7+QV3XnECvLXz2Upv41h579WoVH0Vev0fixk="; }; - npmDepsHash = "sha256-bWcp3VJI2bObLH/XBx3cdxXQY9Cw+IFpeNA2TXVTtFg="; - - postPatch = '' - ln -s ${bw_web_builds}/{patches,resources} .. - PATH="${git}/bin:$PATH" VAULT_VERSION="${lib.removePrefix "web-" src.rev}" \ - bash ${bw_web_builds}/scripts/apply_patches.sh - ''; + npmDepsHash = "sha256-FC3x7H0MQDVGajtaMA2PUK5+soG6kD9AaDbq/s1pOnY="; nativeBuildInputs = [ python3 @@ -73,7 +53,6 @@ buildNpmPackage rec { ''; passthru = { - inherit bw_web_builds; tests = nixosTests.vaultwarden; }; From ce08d17cb5e32ecc41beb900df1c865f8d99b7db Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Wed, 28 May 2025 00:48:16 +0000 Subject: [PATCH 212/220] libretro-shaders-slang: 0-unstable-2025-05-17 -> 0-unstable-2025-05-20 --- pkgs/by-name/li/libretro-shaders-slang/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/li/libretro-shaders-slang/package.nix b/pkgs/by-name/li/libretro-shaders-slang/package.nix index d371b4371d03..ff39c9c8fa65 100644 --- a/pkgs/by-name/li/libretro-shaders-slang/package.nix +++ b/pkgs/by-name/li/libretro-shaders-slang/package.nix @@ -7,13 +7,13 @@ stdenvNoCC.mkDerivation { pname = "libretro-shaders-slang"; - version = "0-unstable-2025-05-17"; + version = "0-unstable-2025-05-20"; src = fetchFromGitHub { owner = "libretro"; repo = "slang-shaders"; - rev = "37aa1f355e19701b192ae0b88855094fe2f5ff22"; - hash = "sha256-30xMYR54z7aJqQZL8zqO93n12vYA7uE3GnZTHP92uRc="; + rev = "8c630e0d3234d93b6c2bc847371f86aa4e535686"; + hash = "sha256-BDxgVBWDUYgSvEl9dn/PB8c4ceYgM1Bo4aEzvqwTaYA="; }; dontConfigure = true; From d4519a6f74c14cb7d800ac2e793cba65bc3c421e Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Wed, 28 May 2025 01:47:42 +0000 Subject: [PATCH 213/220] mona-sans: 1.0.1 -> 2.0 --- pkgs/by-name/mo/mona-sans/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/mo/mona-sans/package.nix b/pkgs/by-name/mo/mona-sans/package.nix index bc41d2d58f28..c7942b3ef057 100644 --- a/pkgs/by-name/mo/mona-sans/package.nix +++ b/pkgs/by-name/mo/mona-sans/package.nix @@ -6,13 +6,13 @@ stdenvNoCC.mkDerivation (finalAttrs: { pname = "mona-sans"; - version = "1.0.1"; + version = "2.0"; src = fetchFromGitHub { rev = "v${finalAttrs.version}"; owner = "github"; repo = "mona-sans"; - sha256 = "sha256-XvqLFzlgIqx9aZH2SEAtwMiuWgUiDi/gHGSpfreUHuk="; + sha256 = "sha256-pFc6EUzf4FlA0LVdDEnFNr7m5V1FLc4jTAvtZdOslTg="; }; installPhase = '' From 4303275296251ae6c18b0062811286f19dce28c3 Mon Sep 17 00:00:00 2001 From: rewine Date: Wed, 28 May 2025 10:02:54 +0800 Subject: [PATCH 214/220] deepin.dde-tray-loader: 1.0.7 -> 1.0.9 --- pkgs/desktops/deepin/core/dde-tray-loader/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/desktops/deepin/core/dde-tray-loader/default.nix b/pkgs/desktops/deepin/core/dde-tray-loader/default.nix index 6c3b7febf80b..858924b5d140 100644 --- a/pkgs/desktops/deepin/core/dde-tray-loader/default.nix +++ b/pkgs/desktops/deepin/core/dde-tray-loader/default.nix @@ -18,13 +18,13 @@ stdenv.mkDerivation (finalAttrs: { pname = "dde-tray-loader"; - version = "1.0.7"; + version = "1.0.9"; src = fetchFromGitHub { owner = "linuxdeepin"; repo = "dde-tray-loader"; rev = finalAttrs.version; - hash = "sha256-LzRjOl3kHArpxwerh7XOisYIJ+t+r/zWUbvYh6k6zKw="; + hash = "sha256-3rmLQRGtBLASr0VSsIfGP0R9HDxFlea+iNbVjkqKTVg="; }; patches = [ From 8d399f35cc57dfbfc09658fe7fdd284a0fd34dbc Mon Sep 17 00:00:00 2001 From: emilylange Date: Wed, 28 May 2025 04:02:58 +0200 Subject: [PATCH 215/220] chromium,chromedriver: 136.0.7103.113 -> 137.0.7151.55 https://chromereleases.googleblog.com/2025/05/stable-channel-update-for-desktop_27.html This update includes 11 security fixes. CVEs: CVE-2025-5063 CVE-2025-5280 CVE-2025-5064 CVE-2025-5065 CVE-2025-5066 CVE-2025-5281 CVE-2025-5283 CVE-2025-5067 --- .../networking/browsers/chromium/common.nix | 26 ++ .../networking/browsers/chromium/info.json | 276 +++++++++--------- 2 files changed, 164 insertions(+), 138 deletions(-) diff --git a/pkgs/applications/networking/browsers/chromium/common.nix b/pkgs/applications/networking/browsers/chromium/common.nix index fe3e5e28cef3..031ac6d70db7 100644 --- a/pkgs/applications/networking/browsers/chromium/common.nix +++ b/pkgs/applications/networking/browsers/chromium/common.nix @@ -565,6 +565,32 @@ let # preventing compilations of chromium with versions below their intended version, not about running the very # exact version or even running a newer version. ./patches/chromium-136-nodejs-assert-minimal-version-instead-of-exact-match.patch + ] + ++ lib.optionals (chromiumVersionAtLeast "137") [ + (fetchpatch { + # Partial revert of upstream clang+llvm bump revert to fix the following error when building with LLVM < 21: + # clang++: error: unknown argument: '-fextend-variable-liveness=none' + # https://chromium-review.googlesource.com/c/chromium/src/+/6514242 + name = "chromium-137-llvm-19.patch"; + url = "https://chromium.googlesource.com/chromium/src/+/ddf8f8a465be2779bd826db57f1299ccd2f3aa25^!?format=TEXT"; + includes = [ "build/config/compiler/BUILD.gn" ]; + revert = true; + decode = "base64 -d"; + hash = "sha256-wAR8E4WKMvdkW8DzdKpyNpp4dynIsYAbnJ2MqE8V2o8="; + }) + ] + ++ lib.optionals (chromiumVersionAtLeast "137") [ + (fetchpatch { + # Backport "Fix build with system libpng" that fixes a typo in core/fxcodec/png/png_decoder.cpp that causes + # the build to fail at the final linking step. + # https://pdfium-review.googlesource.com/c/pdfium/+/132130 + name = "pdfium-Fix-build-with-system-libpng.patch"; + url = "https://pdfium.googlesource.com/pdfium.git/+/83f11d630aa1cb6d5ceb292364412f7b0585a201^!?format=TEXT"; + extraPrefix = "third_party/pdfium/"; + stripLen = 1; + decode = "base64 -d"; + hash = "sha256-lDX0OLdxxTNLtViqEt0luJQ/H0mlvQfV0zbY1Ubqyq0="; + }) ]; postPatch = diff --git a/pkgs/applications/networking/browsers/chromium/info.json b/pkgs/applications/networking/browsers/chromium/info.json index 64a68d26f105..b3bda2dfd9df 100644 --- a/pkgs/applications/networking/browsers/chromium/info.json +++ b/pkgs/applications/networking/browsers/chromium/info.json @@ -1,27 +1,27 @@ { "chromium": { - "version": "136.0.7103.113", + "version": "137.0.7151.55", "chromedriver": { - "version": "136.0.7103.114", - "hash_darwin": "sha256-RAWarx2vOh23XKvhNwAkCgG9swGxX1dw8LaqIQBPJFo=", - "hash_darwin_aarch64": "sha256-TZcO5RiRW0dN0+jBArclBkIvYSSirhmPgJXswfTufgk=" + "version": "137.0.7151.56", + "hash_darwin": "sha256-z4GTPrONaXARP0d8vInJdFxR052PuuI6IJy1PEv2RNg=", + "hash_darwin_aarch64": "sha256-wlSDfCiBTdLWwabpHwOiM8Y3asn7ueHGSMh2AANaE+A=" }, "deps": { "depot_tools": { - "rev": "f40ddcd8d51626fb7be3ab3c418b3f3be801623f", - "hash": "sha256-O9vVbrCqHD4w39Q8ZAxl1RwzJxbH/thjqacMtCnOPdg=" + "rev": "1fcc527019d786502b02f71b8b764ee674a40953", + "hash": "sha256-7HJyJARZPes5MmKgXd3TV1uRjk0bH/pkPm+F4scg+Tc=" }, "gn": { - "rev": "6e8e0d6d4a151ab2ed9b4a35366e630c55888444", - "hash": "sha256-vDKMt23RMDI+KX6CmjfeOhRv2haf/mDOuHpWKnlODcg=" + "rev": "85cc21e94af590a267c1c7a47020d9b420f8a033", + "hash": "sha256-+nKP2hBUKIqdNfDz1vGggXSdCuttOt0GwyGUQ3Z1ZHI=" }, - "npmHash": "sha256-QRjk9X4rJW3ofizK33R4T1qym1riqcnpBhDF+FfNZLo=" + "npmHash": "sha256-I6MsfAhrLRmgiRJ13LSejfy2N63C3Oug5tOOXA622j4=" }, "DEPS": { "src": { "url": "https://chromium.googlesource.com/chromium/src.git", - "rev": "76fa3c1782406c63308c70b54f228fd39c7aaa71", - "hash": "sha256-U6WsxmGf4eFKVBBgppoHIfMlrT34a1oymZETzEhzkQA=", + "rev": "254bc711794d7ad269495f3d419a209935b78cad", + "hash": "sha256-dB81lgjgVK0qXWgAddB7G4L7rsJpZp+0VsjDKvGugEs=", "recompress": true }, "src/third_party/clang-format/script": { @@ -31,28 +31,28 @@ }, "src/third_party/compiler-rt/src": { "url": "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/compiler-rt.git", - "rev": "bc2b30185219a2defe3c8a3b45f95a11386a7f6f", - "hash": "sha256-bfDMglQaiExTFwaVBroia+6G+9AHEVy5cQGocaEVOgA=" + "rev": "d0e4db9fcea15a392aaada986cbe33658afc0454", + "hash": "sha256-P/uDeqalafY1S7AqZkL1Pz7Jc+iWrkfiACxEtgTRqdU=" }, "src/third_party/libc++/src": { "url": "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git", - "rev": "449310fe2e37834a7e62972d2a690cade2ef596b", - "hash": "sha256-Ypi5fmWdoNA1IZDoKITlkNRITmho8HzVlgjlmtx0Y84=" + "rev": "9d0cba76be7399399d3a499ff3a52c264db3b104", + "hash": "sha256-wpMma142NBqyrSbaReQr5yOYhvQIZ06j6S2EUnXmZ2I=" }, "src/third_party/libc++abi/src": { "url": "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git", - "rev": "94c5d7a8edc09f0680aee57548c0b5d400c2840d", - "hash": "sha256-wMMfj3E2AQJxovoSEIuT2uTyrcGBurS1HrHZOmP36+g=" + "rev": "f2a7f2987f9dcdf8b04c2d8cd4dcb186641a7c3e", + "hash": "sha256-X9cAbyd8ZPSwqOGhPYwIZ6b9E3tVwAuAYZKMgbZQxgk=" }, "src/third_party/libunwind/src": { "url": "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git", - "rev": "e2e6f2a67e9420e770b014ce9bba476fa2ab9874", - "hash": "sha256-LdRaxPo2i7uMeFxpR7R4o3V+1ycBcygT/D+gklsD0tA=" + "rev": "81e2cb40a70de2b6978e6d8658891ded9a77f7e3", + "hash": "sha256-XdFKn+cGOxA0fHkVMG9UAhCmpML44ocoyHB7XnumX7o=" }, "src/third_party/llvm-libc/src": { "url": "https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libc.git", - "rev": "97989c1bfa112c81f6499487fedc661dcf6d3b2e", - "hash": "sha256-9Ieaxe0PFIIP4RttODd8pTw/zVjQZGZtaYSybwnzTz0=" + "rev": "cc59264cf9b2ecab0cfc8b51f6f1878372416d36", + "hash": "sha256-wQMUL5uAaR8sA1V0FHTZv3jVVaF3NxiHfNnlMq3YImY=" }, "src/chrome/test/data/perf/canvas_bench": { "url": "https://chromium.googlesource.com/chromium/canvas_bench.git", @@ -71,18 +71,18 @@ }, "src/docs/website": { "url": "https://chromium.googlesource.com/website.git", - "rev": "929dd3e6d02aac1f46653d03b2a644e2873a3bbb", - "hash": "sha256-lY4P2f90/9JwCpxuBFjim7KygczM8zMDQVUaEYaQjnA=" + "rev": "e157e12d99cfc729a970b474344673c44e2d2c9c", + "hash": "sha256-fowwJbXOR4OIN4+1bJEWv9VP/TLHb9+H1Vt3apVLwkk=" }, "src/media/cdm/api": { "url": "https://chromium.googlesource.com/chromium/cdm.git", - "rev": "5a1675c86821a48f8983842d07f774df28dfb43c", - "hash": "sha256-FgeuOsxToA4qx3H76czCPeO/WVtprRkllDMPancw3Ik=" + "rev": "852a81f0ae3ab350041d2e44d207a42fb0436ae1", + "hash": "sha256-3JBBcBg2ep/7LnvMHBWnqAFG+etETArFXZr4Klv30T4=" }, "src/net/third_party/quiche/src": { "url": "https://quiche.googlesource.com/quiche.git", - "rev": "5077431b183c43f10890b865fc9f02a4dcf1dd85", - "hash": "sha256-CLvZTBvtTdOpC8eWUTWkb0ITJ5EViPmc6d5O8cTaKY8=" + "rev": "faec206356fe384c522f34982ae2e92f2f111242", + "hash": "sha256-8SuRhYAD3RWMiqD/a8usrRnYKd6prAK5jdwJVXRI+Q0=" }, "src/testing/libfuzzer/fuzzers/wasm_corpus": { "url": "https://chromium.googlesource.com/v8/fuzzer_wasm_corpus.git", @@ -96,8 +96,8 @@ }, "src/third_party/angle": { "url": "https://chromium.googlesource.com/angle/angle.git", - "rev": "fa40b7c586fd2da9fd7e5c4d893ecb1334553b9e", - "hash": "sha256-bIpN9lehrKpJYBKLeo8Szz0/aVe7NU2Eo2NIO5dAZ9w=" + "rev": "df9c59dcacff7d186d00e3263a1aa68f8059137c", + "hash": "sha256-ybi/DwOQ10I+MK9buKpdNcUlFAI9RA3NfyoB3Udpfyo=" }, "src/third_party/angle/third_party/glmark2/src": { "url": "https://chromium.googlesource.com/external/github.com/glmark2/glmark2", @@ -111,8 +111,8 @@ }, "src/third_party/angle/third_party/VK-GL-CTS/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/VK-GL-CTS", - "rev": "b6bb4bab7b4a36bc95566e00cb8f01051089afc3", - "hash": "sha256-L2ewIW6C+PTftbbXf+nlWcFD0y4naBNg7FLXMMxiWac=" + "rev": "dd7e71367795e2dc4d46effda5378f22e9000d16", + "hash": "sha256-EZoSoDLFWRR2xkHOKNaNVQvubFp8in0p7/CHN8CFaVI=" }, "src/third_party/anonymous_tokens/src": { "url": "https://chromium.googlesource.com/external/github.com/google/anonymous-tokens.git", @@ -131,8 +131,8 @@ }, "src/third_party/dawn": { "url": "https://dawn.googlesource.com/dawn.git", - "rev": "1cffe7ec763900d104e4df62bc96d93f572157cb", - "hash": "sha256-VK+5saAJlZOluMAYKTKwNcnZALsCYkzgVfQHylt3584=" + "rev": "fbe707f88ccabca01031e47bf165bd9d499878dd", + "hash": "sha256-8tmDR3l7eHWUfVRU90Kg76N/moU6Lb5b3FySJOckl8U=" }, "src/third_party/dawn/third_party/glfw": { "url": "https://chromium.googlesource.com/external/github.com/glfw/glfw", @@ -141,8 +141,8 @@ }, "src/third_party/dawn/third_party/dxc": { "url": "https://chromium.googlesource.com/external/github.com/microsoft/DirectXShaderCompiler", - "rev": "206b77577d15fc5798eb7ad52290388539b7146d", - "hash": "sha256-WXgiOlqtczrUkXp46Q/GTaYk0LDqebQSFbyWpD299Xw=" + "rev": "8209d53f0ef0257e5b8c78d22057086403946cca", + "hash": "sha256-2yM8Fct7Ru8ZSFr+Qm1Bv52K2/geAwmOpWc/X7yxLQY=" }, "src/third_party/dawn/third_party/dxheaders": { "url": "https://chromium.googlesource.com/external/github.com/microsoft/DirectX-Headers", @@ -161,8 +161,8 @@ }, "src/third_party/dawn/third_party/webgpu-cts": { "url": "https://chromium.googlesource.com/external/github.com/gpuweb/cts", - "rev": "5fbd82847521cb2d584773facd56c2eb6a4df180", - "hash": "sha256-WTVOc2EVB/DJ4aDeB8XIF/ff6LSeEUMt2Xkvj5Hu4aU=" + "rev": "3df76734dc695c4d1c51276b5d9eb63078362972", + "hash": "sha256-4jCsCt2rcUpUk2xeL3tZx/jTnuJ+COG+xsDtR+sK1oQ=" }, "src/third_party/highway/src": { "url": "https://chromium.googlesource.com/external/github.com/google/highway.git", @@ -176,13 +176,13 @@ }, "src/third_party/boringssl/src": { "url": "https://boringssl.googlesource.com/boringssl.git", - "rev": "a9993612faac4866bc33ca8ff37bfd0659af1c48", - "hash": "sha256-fUPl9E2b7RfanH0pZNArIkJ4lnnmCtyk7sCaTArCB70=" + "rev": "918cf66ed841930fe1554ae8d78974b95e989596", + "hash": "sha256-gzcXse/emv9JBMiInUV5KTeyMQ0igUdFpzUJR4vCUu4=" }, "src/third_party/breakpad/breakpad": { "url": "https://chromium.googlesource.com/breakpad/breakpad.git", - "rev": "657a441e5c1a818d4c10b7bafd431454e6614901", - "hash": "sha256-9MePkv10fwyJ0VDWRtvRcbLMAcJzZlziGTPzXJYjVJE=" + "rev": "232a723f5096ab02d53d87931efa485fa77d3b03", + "hash": "sha256-0ynZuxIqBIpNkfD3Y9XdPFQr7HeQcsUO3lhnqvH+k8c=" }, "src/third_party/cast_core/public/src": { "url": "https://chromium.googlesource.com/cast_core/public", @@ -191,8 +191,8 @@ }, "src/third_party/catapult": { "url": "https://chromium.googlesource.com/catapult.git", - "rev": "5bda0fdab9d93ec9963e2cd858c7b49ad7fec7d4", - "hash": "sha256-xwR9gGE8uU8qFr7GgS3/1JiuTmj1tvcM5CoCfPMdW2M=" + "rev": "000f47cfa393d7f9557025a252862e2a61a60d44", + "hash": "sha256-FIJZE1Qu1MLZA4qxB68k1NjhgSbFTjf57YF85JicVZw=" }, "src/third_party/ced/src": { "url": "https://chromium.googlesource.com/external/github.com/google/compact_enc_det.git", @@ -216,8 +216,8 @@ }, "src/third_party/cpuinfo/src": { "url": "https://chromium.googlesource.com/external/github.com/pytorch/cpuinfo.git", - "rev": "b73ae6ce38d5dd0b7fe46dbe0a4b5f4bab91c7ea", - "hash": "sha256-JNLaK105qDk9DxTqCFyXFfYn46dF+nZIaF5urSVRa0U=" + "rev": "39ea79a3c132f4e678695c579ea9353d2bd29968", + "hash": "sha256-uochXC0AtOw8N/ycyVJdiRw4pibCW2ENrFMT3jtxDSg=" }, "src/third_party/crc32c/src": { "url": "https://chromium.googlesource.com/external/github.com/google/crc32c.git", @@ -226,29 +226,34 @@ }, "src/third_party/cros_system_api": { "url": "https://chromium.googlesource.com/chromiumos/platform2/system_api.git", - "rev": "62ab80355a8194e051bd1d93a5c09093c7645a32", - "hash": "sha256-pZi6GRu7OGL7jbN4FM2qDsLCsT6cM+RM0a7XtFZVSVE=" + "rev": "68114875ad35b573034a2ab1f5cdf3dbb0e59468", + "hash": "sha256-cGpteAnjGcxJUcrdLRFfQN7ruTEdNvNCbOH6EC+a39s=" }, "src/third_party/crossbench": { "url": "https://chromium.googlesource.com/crossbench.git", - "rev": "ce46be2573328fa7b0fd1d23c04b63389f298122", - "hash": "sha256-Q0kdJdEmh+wbO5oeTp98OHKh9luz8u6PDztGToldZjk=" + "rev": "d91cc488cd651b00009e5d6c70f222362598bec9", + "hash": "sha256-o/sw1P+mZOSb6XIVFivC02hTPu++x+xJy2SRP2I9yGE=" }, "src/third_party/depot_tools": { "url": "https://chromium.googlesource.com/chromium/tools/depot_tools.git", - "rev": "f40ddcd8d51626fb7be3ab3c418b3f3be801623f", - "hash": "sha256-O9vVbrCqHD4w39Q8ZAxl1RwzJxbH/thjqacMtCnOPdg=" + "rev": "1fcc527019d786502b02f71b8b764ee674a40953", + "hash": "sha256-7HJyJARZPes5MmKgXd3TV1uRjk0bH/pkPm+F4scg+Tc=" }, "src/third_party/devtools-frontend/src": { "url": "https://chromium.googlesource.com/devtools/devtools-frontend", - "rev": "4a53cbe7a1270c91ec60903ee792de658453becb", - "hash": "sha256-hEksLeJli/1TNNrDcUjv19cpyIJph6kfriNfe7FWO0U=" + "rev": "a54ed1df191a9e2aff2e9ef453ee6fdc959dd125", + "hash": "sha256-E6sx2ioDZRWJljbS17ztRwz+gsDhIHiluvkUx1rRZcw=" }, "src/third_party/dom_distiller_js/dist": { "url": "https://chromium.googlesource.com/chromium/dom-distiller/dist.git", "rev": "199de96b345ada7c6e7e6ba3d2fa7a6911b8767d", "hash": "sha256-yuEBD2XQlV3FGI/i7lTmJbCqzeBiuG1Qow8wvsppGJw=" }, + "src/third_party/dragonbox/src": { + "url": "https://chromium.googlesource.com/external/github.com/jk-jeon/dragonbox.git", + "rev": "6c7c925b571d54486b9ffae8d9d18a822801cbda", + "hash": "sha256-AOniXMPgwKpkJqivRd+GazEnhdw53FzhxKqG+GdU+cc=" + }, "src/third_party/eigen3/src": { "url": "https://chromium.googlesource.com/external/gitlab.com/libeigen/eigen.git", "rev": "464c1d097891a1462ab28bf8bb763c1683883892", @@ -266,8 +271,8 @@ }, "src/third_party/ffmpeg": { "url": "https://chromium.googlesource.com/chromium/third_party/ffmpeg.git", - "rev": "fbce2a76c00cd2e5aeffe3c2e71d44c284ec52d6", - "hash": "sha256-bGa0BCvzNxEKu9VZEwJ1NLt+b2KKWUxshpKSN2FHNEM=" + "rev": "01f23648c6b84de6c0f717fa4e1816f53b9ee72e", + "hash": "sha256-hNzQZQxaa2Wtl7GWWF852cFmmXy4pc15Pp0d59TTfnI=" }, "src/third_party/flac": { "url": "https://chromium.googlesource.com/chromium/deps/flac.git", @@ -296,8 +301,8 @@ }, "src/third_party/freetype/src": { "url": "https://chromium.googlesource.com/chromium/src/third_party/freetype2.git", - "rev": "82090e67c24259c343c83fd9cefe6ff0be7a7eca", - "hash": "sha256-LhSIX7X0+dmLADYGNclg73kIrXmjTMM++tJ92MKzanA=" + "rev": "2d1abd3bbb4d2396ed63b3e5accd66724cf62307", + "hash": "sha256-MAVHzILj9f+/HfVjZXyJkSQM3WBwzg7IDpAwiYHfA88=" }, "src/third_party/freetype-testing/src": { "url": "https://chromium.googlesource.com/external/github.com/freetype/freetype2-testing.git", @@ -311,18 +316,18 @@ }, "src/third_party/harfbuzz-ng/src": { "url": "https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git", - "rev": "8efd2d85c78fbba6ca09a3e454f77525f3b296ce", - "hash": "sha256-/WNGrvyvJ+FGqoIoHapaux1iu63zjID0yR30HYPpxaw=" + "rev": "9f83bbbe64654b45ba5bb06927ff36c2e7588495", + "hash": "sha256-lNnCtgIegUy4DLhYaGZXcEaFw83KWAHoKpz69AEsWp4=" }, "src/third_party/ink/src": { "url": "https://chromium.googlesource.com/external/github.com/google/ink.git", - "rev": "c542d619a8959415beda5a76fe89ffa2f83df886", - "hash": "sha256-sMqSHYs3lvuHXEov1K9xWRd8tUPG00QBJl6an0zrxwA=" + "rev": "da9cb551ada1e55309b0ac89b9fbff2d29dbfe1e", + "hash": "sha256-MqJXwtUGL/IakwOO63JX4gx0gTocgQT3hbhw6OcYUbc=" }, "src/third_party/ink_stroke_modeler/src": { "url": "https://chromium.googlesource.com/external/github.com/google/ink-stroke-modeler.git", - "rev": "f61f28792a00c9bdcb3489fec81d8fd0ca1cbaba", - "hash": "sha256-XMLW/m+Qx+RVgo1DeYggBLjUYg/M+2eHwgjVWrA/Erw=" + "rev": "03db1ed37b8b10b47d62ed0fa142d198a3861689", + "hash": "sha256-jnIljheEBq96e6zZO87bhVJbA1vIjiRzm1Hh6YMBdnU=" }, "src/third_party/instrumented_libs": { "url": "https://chromium.googlesource.com/chromium/third_party/instrumented_libraries.git", @@ -346,8 +351,8 @@ }, "src/third_party/googletest/src": { "url": "https://chromium.googlesource.com/external/github.com/google/googletest.git", - "rev": "52204f78f94d7512df1f0f3bea1d47437a2c3a58", - "hash": "sha256-8keF4E6ag/rikv5ROaWUB7oganjViupEAdxW1NJVgmE=" + "rev": "cd430b47a54841ec45d64d2377d7cabaf0eba610", + "hash": "sha256-QT9PQ9bF+eCPfRLkcHpH4jc0UZfGPc98fHf8QDV5bZg=" }, "src/third_party/hunspell_dictionaries": { "url": "https://chromium.googlesource.com/chromium/deps/hunspell_dictionaries.git", @@ -356,8 +361,8 @@ }, "src/third_party/icu": { "url": "https://chromium.googlesource.com/chromium/deps/icu.git", - "rev": "c9fb4b3a6fb54aa8c20a03bbcaa0a4a985ffd34b", - "hash": "sha256-Omv4sp9z44eINXtaE0+1TzIU1q2hWviANA79fmkF78U=" + "rev": "4c8cc4b365a505ce35be1e0bd488476c5f79805d", + "hash": "sha256-eGI/6wk6IOUPvX7pRTm4VJk1CqkkxalTu84L36i/D6k=" }, "src/third_party/jsoncpp/source": { "url": "https://chromium.googlesource.com/external/github.com/open-source-parsers/jsoncpp.git", @@ -376,8 +381,8 @@ }, "src/third_party/fuzztest/src": { "url": "https://chromium.googlesource.com/external/github.com/google/fuzztest.git", - "rev": "c31f0c0e6df5725c6b03124b579c9cf815fd10f4", - "hash": "sha256-Dz7DqucOxr5HzLNOdGNOG4iMw66bkOj64qOvqeADTic=" + "rev": "b10387fdbbca18192f85eaa5323a59f44bf9c468", + "hash": "sha256-L2QG0pUmGjGdtdlivxYfxSqO9YaVHpIT6lvJwBMTxMw=" }, "src/third_party/domato/src": { "url": "https://chromium.googlesource.com/external/github.com/googleprojectzero/domato.git", @@ -391,8 +396,8 @@ }, "src/third_party/libaom/source/libaom": { "url": "https://aomedia.googlesource.com/aom.git", - "rev": "9680f2b1781fb33b9eeb52409b75c679c8a954be", - "hash": "sha256-nfnt5JXyKR9JR3BflpGEkwzDo0lYa/oeCDm2bKH/j1g=" + "rev": "719f60edc51b6141a2434bf1b5110c2fb075b246", + "hash": "sha256-W62uXVbQiq6Ef3bar2NsCXJoz5KKUK8Y/9n2vK7Vf3Q=" }, "src/third_party/crabbyavif/src": { "url": "https://chromium.googlesource.com/external/github.com/webmproject/CrabbyAvif.git", @@ -401,13 +406,8 @@ }, "src/third_party/nearby/src": { "url": "https://chromium.googlesource.com/external/github.com/google/nearby-connections.git", - "rev": "8acf9249344ea9ff9806d0d7f46e07640fddf550", - "hash": "sha256-qIIyCHay3vkE14GVCq77psm1OyuEYs4guAaQDlEwiMg=" - }, - "src/third_party/beto-core/src": { - "url": "https://beto-core.googlesource.com/beto-core.git", - "rev": "89563fec14c756482afa08b016eeba9087c8d1e3", - "hash": "sha256-QPFGjtu/I0r4+dTQ2eSlWIEYwJ43B3yW0q4QtVFTVGY=" + "rev": "e71de0e0c312caf8d2fa22f132619c6a68496444", + "hash": "sha256-dzJtRhoPA1FWeu0xjd7kJ1Q2nT5gIkKpIgQmywsRlPY=" }, "src/third_party/securemessage/src": { "url": "https://chromium.googlesource.com/external/github.com/google/securemessage.git", @@ -416,8 +416,8 @@ }, "src/third_party/jetstream/main": { "url": "https://chromium.googlesource.com/external/github.com/WebKit/JetStream.git", - "rev": "0260caf74b5c115507ee0adb6d9cdf6aefb0965f", - "hash": "sha256-DbRup4tOAYv27plzB2JKi2DBX2FVMDtFR7AzuovXUDU=" + "rev": "0976ddeae0863ef5fb3f9ad09906224b0989f9ad", + "hash": "sha256-NyXGd7SwsECGBJ2qodGYB3os+UBgIOg/I8mnrsZJuTg=" }, "src/third_party/jetstream/v2.2": { "url": "https://chromium.googlesource.com/external/github.com/WebKit/JetStream.git", @@ -426,8 +426,8 @@ }, "src/third_party/speedometer/main": { "url": "https://chromium.googlesource.com/external/github.com/WebKit/Speedometer.git", - "rev": "c760d160caa05792d3ed7650e85861c9f9462506", - "hash": "sha256-/nAK2uLjpPem37XCHHx3LGZEpvL/7w4Uw5bVpQ4C6ms=" + "rev": "dd661c033abdde11022779f40375c52632a9f43a", + "hash": "sha256-1/G06WCO5ssBS3+T6E3rnGdIf0r205wVxfJX7lgivR4=" }, "src/third_party/speedometer/v3.1": { "url": "https://chromium.googlesource.com/external/github.com/WebKit/Speedometer.git", @@ -466,8 +466,8 @@ }, "src/third_party/expat/src": { "url": "https://chromium.googlesource.com/external/github.com/libexpat/libexpat.git", - "rev": "624da0f593bb8d7e146b9f42b06d8e6c80d032a3", - "hash": "sha256-Iwu9+i/0vsPyu6pOWFxjNNblVxMl6bTPW5eWyaju4Mg=" + "rev": "69d6c054c1bd5258c2a13405a7f5628c72c177c2", + "hash": "sha256-qe8O7otL6YcDDBx2DS/+c5mWIS8Rf8RQXVtLFMIAeyk=" }, "src/third_party/libipp/libipp": { "url": "https://chromium.googlesource.com/chromiumos/platform2/libipp.git", @@ -511,8 +511,8 @@ }, "src/third_party/libvpx/source/libvpx": { "url": "https://chromium.googlesource.com/webm/libvpx.git", - "rev": "027bbee30a0103b99d86327b48d29567fed11688", - "hash": "sha256-+4I6B1aTa+txhey6LMeflU0pe39V6TJ+lNIJPh6yFGM=" + "rev": "40ec928b3fadcf8edd836445bb5842a11aeb7a2d", + "hash": "sha256-aUHvIv78KTiyN/cOYNuhW4UCOD55s8l8VLu4oP0Pk1s=" }, "src/third_party/libwebm/source": { "url": "https://chromium.googlesource.com/webm/libwebm.git", @@ -526,8 +526,8 @@ }, "src/third_party/libyuv": { "url": "https://chromium.googlesource.com/libyuv/libyuv.git", - "rev": "ccdf870348764e4b77fa3b56accb2a896a901bad", - "hash": "sha256-8sH11psWPXLMy3Q0tAizCZ/woUWvTCCUf44jcr2C4Xs=" + "rev": "9f9b5cf660dcfa0d3fdee41cf4ffbe4bb6e95114", + "hash": "sha256-OYmsMPz7nJwkVSpsDW7SbqrCU5raC1k3Mh/UkonCujM=" }, "src/third_party/lss": { "url": "https://chromium.googlesource.com/linux-syscall-support.git", @@ -546,8 +546,8 @@ }, "src/third_party/nasm": { "url": "https://chromium.googlesource.com/chromium/deps/nasm.git", - "rev": "767a169c8811b090df222a458b25dfa137fc637e", - "hash": "sha256-yg4qwhS68B/sWfcJeXUqPC69ppE8FaIyRc+IkUQXSnU=" + "rev": "9f916e90e6fc34ec302573f6ce147e43e33d68ca", + "hash": "sha256-neYrS4kQ76ihUh22Q3uPR67Ld8+yerA922YSZU1KxJs=" }, "src/third_party/neon_2_sse/src": { "url": "https://chromium.googlesource.com/external/github.com/intel/ARM_NEON_2_x86_SSE.git", @@ -561,8 +561,8 @@ }, "src/third_party/openscreen/src": { "url": "https://chromium.googlesource.com/openscreen", - "rev": "db9e1ea566813606ca055868be13f6ff4a760ab8", - "hash": "sha256-K/frmCf3JMvPVZc6ZKPFAQrq4Pz4io3XBvADS0O5u78=" + "rev": "40fe10467c27b6536e5d3241e5881b6e9f243216", + "hash": "sha256-fKXCuGzNVcN8l/2VNR5c9lwUjmSDb7MuEAVF5h8VXQU=" }, "src/third_party/openscreen/src/buildtools": { "url": "https://chromium.googlesource.com/chromium/src/buildtools", @@ -576,13 +576,13 @@ }, "src/third_party/pdfium": { "url": "https://pdfium.googlesource.com/pdfium.git", - "rev": "ca83e69429af8f0bfa34b22dc54f538b9eebf5c5", - "hash": "sha256-6gsur+fx546YJn/PUOOthuj+XrSIruVUeAYl4nRI6xM=" + "rev": "c82c611f105c0df064cc8c76363578caf9eafb75", + "hash": "sha256-kcrWcvbbGgQTfGypJ2EaLunYtSipJJRAin2jHunZoCU=" }, "src/third_party/perfetto": { "url": "https://chromium.googlesource.com/external/github.com/google/perfetto.git", - "rev": "054635b91453895720951f7329619d003a98b3e4", - "hash": "sha256-2jKRhHLitR0m2a4/asvVvTqAOhUlyLsBBSjpQAer4GA=" + "rev": "f35ae1939989c58c29df43f9c2d8610f5b932715", + "hash": "sha256-SyYTZnNar6F6/k6PGrkRan3l9hAikEVRciDQQaR7Jvs=" }, "src/third_party/protobuf-javascript/src": { "url": "https://chromium.googlesource.com/external/github.com/protocolbuffers/protobuf-javascript", @@ -591,8 +591,8 @@ }, "src/third_party/pthreadpool/src": { "url": "https://chromium.googlesource.com/external/github.com/google/pthreadpool.git", - "rev": "4e1831c02c74334a35ead03362f3342b6cea2a86", - "hash": "sha256-mB1QaAuY8vfv8FasPyio1AF75iYH+dM8t1GIr0Ty/+g=" + "rev": "290ee6fff0c36614702d6b297c148e3fa08e056a", + "hash": "sha256-jRHF7vZPmh70jNFVukfWzVnA2dBLSDSnMWVyZ9e08n4=" }, "src/third_party/pyelftools": { "url": "https://chromium.googlesource.com/chromiumos/third_party/pyelftools.git", @@ -621,13 +621,13 @@ }, "src/third_party/search_engines_data/resources": { "url": "https://chromium.googlesource.com/external/search_engines_data.git", - "rev": "07834ba1e5ebfb333d0b73556b7c4d62a53cb455", - "hash": "sha256-DTz351NpoygQLESm/z+fzFc/KGJyQelLnWpzNMmNT9o=" + "rev": "be408bdc2c1501ef25206145a49dcebb98db34b5", + "hash": "sha256-XlAE782PsEysPVIBM/Q8VdE9XnvoYUVaeMmUUoYFgvM=" }, "src/third_party/skia": { "url": "https://skia.googlesource.com/skia.git", - "rev": "bcce46ca33b67cc302dd53927a63013b8f53bf73", - "hash": "sha256-ei95CJRfNPrsYt8XcDi7Pnl5dGiJu3qs7R4rAcZ24Uc=" + "rev": "0dfd95a49aed617f242c8b06dd5b255d1cb07776", + "hash": "sha256-HBqkqEoyQo3KuRCwP5NW9kuY9maaBYSpjA1lcBdFjxk=" }, "src/third_party/smhasher/src": { "url": "https://chromium.googlesource.com/external/smhasher.git", @@ -646,8 +646,8 @@ }, "src/third_party/swiftshader": { "url": "https://swiftshader.googlesource.com/SwiftShader.git", - "rev": "4982425ff1bdcb2ce52a360edde58a379119bfde", - "hash": "sha256-QTGU9Dgc6rgMeFZvhZyYeYj5W+ClJO8Yfa4+K7TmEec=" + "rev": "7905fa19e456df5aa8e2233a7ec5832c9c6c287b", + "hash": "sha256-Wi8mttxM1fuLqrL2q6qPnpmyAfmDqJGA8Wub+yexFLA=" }, "src/third_party/text-fragments-polyfill/src": { "url": "https://chromium.googlesource.com/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git", @@ -656,18 +656,18 @@ }, "src/third_party/tflite/src": { "url": "https://chromium.googlesource.com/external/github.com/tensorflow/tensorflow.git", - "rev": "c8ed430d092acd485f00e7a9d7a888a0857d0430", - "hash": "sha256-S5zkpQZdhRdnZRUrUfi5FCrF2XFe3y/adAWwfh1OQYE=" + "rev": "42d6877b1aa1cf324eb03ccf9b13511400341deb", + "hash": "sha256-KummGT7CUoGd3lCGXvtSFcFD1FhSlJXDcEi1WKUza70=" }, "src/third_party/vulkan-deps": { "url": "https://chromium.googlesource.com/vulkan-deps", - "rev": "1648e664337ca19a4f8679cbb9547a5b4b926995", - "hash": "sha256-CI0X6zbRV/snGcQZOUKQFn8Zo6D6Out6nN027HGZaa8=" + "rev": "96793fb0ff6fb5d4328cc6f71d84f5cb2d835daf", + "hash": "sha256-rAtsw8JV8EwrNzjK5p7JbWQa6fHfpByvZcP71hHC8uM=" }, "src/third_party/glslang/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/glslang", - "rev": "e57f993cff981c8c3ffd38967e030f04d13781a9", - "hash": "sha256-nr7pGPNPMbmL/XnL27M4m5in8qnCDcpNtVsxBAc7zms=" + "rev": "fc9889c889561c5882e83819dcaffef5ed45529b", + "hash": "sha256-HwFP4KJuA+BMQVvBWV0BCRj9U5I3CLEU+5bBtde2f6w=" }, "src/third_party/spirv-cross/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/SPIRV-Cross", @@ -676,38 +676,38 @@ }, "src/third_party/spirv-headers/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/SPIRV-Headers", - "rev": "8c88e0c4c94a21de825efccba5f99a862b049825", - "hash": "sha256-s0Pe7kg5syKhK8qEZH8b7UCDa87Xk32Lh95cQbpLdAc=" + "rev": "bab63ff679c41eb75fc67dac76e1dc44426101e1", + "hash": "sha256-hi4vCwdCnwuYodUYq75niCZt2t9lERQH6529/R+7nH8=" }, "src/third_party/spirv-tools/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/SPIRV-Tools", - "rev": "2e83ad7e6f2cc51f7eaff3ffeb10e34351b3c157", - "hash": "sha256-u4WDbWywua71yWB1cVIt1IDZRe4NnT5bUq3yHLKBgPo=" + "rev": "8e9165a3d162967a424dcf2ff645a98b50381cce", + "hash": "sha256-GsoaeO3FMzMtMStg1Wp0KUHU3Xxmmr7t3lDyu0ervNk=" }, "src/third_party/vulkan-headers/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/Vulkan-Headers", - "rev": "78c359741d855213e8685278eb81bb62599f8e56", - "hash": "sha256-VqKQeJd81feSgYnYLqb2sYirCmnHN9Rr19/4cPZ2TzE=" + "rev": "e2e53a724677f6eba8ff0ce1ccb64ee321785cbd", + "hash": "sha256-lIuJ50zi9UIMrP/FePI8jHFhJ5LsKhthDY4gIHeZNpo=" }, "src/third_party/vulkan-loader/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/Vulkan-Loader", - "rev": "723d6b4aa35853315c6e021ec86388b3a2559fae", - "hash": "sha256-tDW5ed6gsDKlCKf4gT8MNi1yaafocUTohL1upGKB+Cc=" + "rev": "fb78607414e154c7a5c01b23177ba719c8a44909", + "hash": "sha256-CeIjyW90Ri0MvhyFfYgss5Rjh5fHKhQf7CgBEcB/nPk=" }, "src/third_party/vulkan-tools/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/Vulkan-Tools", - "rev": "289efccc7560f2b970e2b4e0f50349da87669311", - "hash": "sha256-Cw7LWBPRbDVlfmeMM4CYEC9xbfqT1wV7yuUcpGMLahs=" + "rev": "0b8196724e4ad28cc7459b82a9b75f252c08cb3e", + "hash": "sha256-oL4lyUH26eO6eJy7EQmuXdt4oy3eQ65fribfMSOZV+8=" }, "src/third_party/vulkan-utility-libraries/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/Vulkan-Utility-Libraries", - "rev": "0d5b49b80f17bca25e7f9321ad4e671a56f70887", - "hash": "sha256-NdvjtdCrNVKY23B4YDL33KB+/9HsSWTVolZJOto8+pc=" + "rev": "4e246c56ec5afb5ad66b9b04374d39ac04675c8e", + "hash": "sha256-MmC4UVa9P/0h7r8IBp1LhP9EztwyZv/ASWKKj8Gk1T8=" }, "src/third_party/vulkan-validation-layers/src": { "url": "https://chromium.googlesource.com/external/github.com/KhronosGroup/Vulkan-ValidationLayers", - "rev": "73d7d74bc979c8a16c823c4eae4ee881153e000a", - "hash": "sha256-2GII+RBRzPZTTib82srUEFDG+CbtPTZ6lX3oDJBC2gU=" + "rev": "cea6ec1cdd37494c1f0fc5619c6c356ac33372fb", + "hash": "sha256-iXQZ6Qpe0li+QeThxMUCn45OufZ8W/qJcejpMb4/gWc=" }, "src/third_party/vulkan_memory_allocator": { "url": "https://chromium.googlesource.com/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git", @@ -716,8 +716,8 @@ }, "src/third_party/wasm_tts_engine/src": { "url": "https://chromium.googlesource.com/chromium/wasm-tts-engine", - "rev": "53d2aba6f0cf7db57e17edfc3ff6471871b0c125", - "hash": "sha256-t5eeehwspRLaowEMPLa8/lV5AHamXQBfH/un0DHLVAM=" + "rev": "352880bb49e2410707543c252ef6b94a21b0f47f", + "hash": "sha256-TFkniS4XvP0RlPnI1lv4RxxSY44RUuwCMKmmybENEBw=" }, "src/third_party/wayland/src": { "url": "https://chromium.googlesource.com/external/anongit.freedesktop.org/git/wayland/wayland.git", @@ -751,8 +751,8 @@ }, "src/third_party/webgpu-cts/src": { "url": "https://chromium.googlesource.com/external/github.com/gpuweb/cts.git", - "rev": "92f4eb4dae0f5439f2cdc7ce467d66b10e165f42", - "hash": "sha256-vXyp0+6eyKOzzQbkRa8f8dO+B9cyUCY2hCZEFc7+7lU=" + "rev": "168536ad91bff176bbe31ae692d97f8bfe9fb86d", + "hash": "sha256-HB16HM4Gj+2F26tyN393VmHbGxvKOZ+M949059odN/4=" }, "src/third_party/webpagereplay": { "url": "https://chromium.googlesource.com/webpagereplay.git", @@ -761,8 +761,8 @@ }, "src/third_party/webrtc": { "url": "https://webrtc.googlesource.com/src.git", - "rev": "2c8f5be6924d507ee74191b1aeadcec07f747f21", - "hash": "sha256-cNONf88oSbsdYuSdPiLxgTI973qOP6fb1OKb2WMQMMg=" + "rev": "cec4daea7ed5da94fc38d790bd12694c86865447", + "hash": "sha256-mxRckkiBIpQp2Qxj6fcer3jDftp3wlg+aO4BoUHhyiY=" }, "src/third_party/wuffs/src": { "url": "https://skia.googlesource.com/external/github.com/google/wuffs-mirror-release-c.git", @@ -771,8 +771,8 @@ }, "src/third_party/weston/src": { "url": "https://chromium.googlesource.com/external/anongit.freedesktop.org/git/wayland/weston.git", - "rev": "ccf29cb237c3ed09c5f370f35239c93d07abfdd7", - "hash": "sha256-y2srFaPUOoB2umzpo4+hFfhNlqXM2AoMGOpUy/ZSacg=" + "rev": "4eb10b123b483327214d8da5da67e8bbeeaed8fe", + "hash": "sha256-VNHUAtfTB24SIf2kl+MMXF3rG5cJOPM93WU/sVSIQ1A=" }, "src/third_party/xdg-utils": { "url": "https://chromium.googlesource.com/chromium/deps/xdg-utils.git", @@ -781,18 +781,18 @@ }, "src/third_party/xnnpack/src": { "url": "https://chromium.googlesource.com/external/github.com/google/XNNPACK.git", - "rev": "d6fc3be20b0d3e3742157fa26c5359babaa8bc8b", - "hash": "sha256-p5DjGNH9IR0KPWSFmbsdt2PU+kHgWRAnBw7J9sLV/S8=" + "rev": "474d7e58d4b8f4bd1a98ee74bc57858769f7d925", + "hash": "sha256-UO+nOh7R+3xTSxF2u8dIrv7qn/QmhnDr2J5Ciumj93M=" }, "src/third_party/zstd/src": { "url": "https://chromium.googlesource.com/external/github.com/facebook/zstd.git", - "rev": "ef2bf5781112a4cd6b62ac1817f7842bbdc7ea8f", - "hash": "sha256-hDDNrUXGxG/o1oZnypAnuLyIeM16Hy6x1KacGu9Hhmw=" + "rev": "d654fca78690fa15cceb8058ac47454d914a0e63", + "hash": "sha256-Ginvak0y1CjURT3mQZzdLn3MW9vXxC7T0KLsM6SHDV0=" }, "src/v8": { "url": "https://chromium.googlesource.com/v8/v8.git", - "rev": "5297e56d91816747d539abca52b578e5832135f0", - "hash": "sha256-Fi4pl6xSXkHF4XaQNfNzULVjQZSzDfaHFIyIxH103go=" + "rev": "44fdd9108308773dd3f4fa040de5f4f75edf671f", + "hash": "sha256-BkLOmb97p2NcAIuQiDjIoVAe49h9iv79rC5G8wyD1as=" } } }, From 9ecb156898854647eda171d499401f1f6e27167f Mon Sep 17 00:00:00 2001 From: Anders Kaseorg Date: Tue, 27 May 2025 01:20:39 -0700 Subject: [PATCH 216/220] linuxPackages.openafs: Patch for Linux kernel 6.15 Signed-off-by: Anders Kaseorg --- pkgs/servers/openafs/1.8/module.nix | 36 +++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pkgs/servers/openafs/1.8/module.nix b/pkgs/servers/openafs/1.8/module.nix index 2ca0e2b9ca51..5e3c9010ed55 100644 --- a/pkgs/servers/openafs/1.8/module.nix +++ b/pkgs/servers/openafs/1.8/module.nix @@ -40,6 +40,42 @@ stdenv.mkDerivation { decode = "base64 -d"; hash = "sha256-08jedwZ1KX1RSs8y9sh7BUvv5xK9tlzZ6uBOR4kS0Jo="; }) + # Linux: Add required MODULE_DESCRIPTION + (fetchpatch { + url = "https://gerrit.openafs.org/changes/16372/revisions/39189eba45542376e668636bd79a93ae6a8a7cd2/patch"; + decode = "base64 -d"; + hash = "sha256-j5ckKQvybEvmlnFs5jX8g8Dfw37LYWGnfsl4hnZ3+A4="; + }) + # linux: inode_ops mkdir returns struct dentry * + (fetchpatch { + url = "https://gerrit.openafs.org/changes/16373/revisions/769847e205d5908a0c430f7bcfbd2f48e19f8bf8/patch"; + decode = "base64 -d"; + hash = "sha256-znv5gunyPnJgi4SRFERJudtYFqiS+AVYDWfvr52Ku3s="; + }) + # Linux: Use __filemap_get_folio() + (fetchpatch { + url = "https://gerrit.openafs.org/changes/16374/revisions/f187add554da9e9c52752edbfa98f486f683cf25/patch"; + decode = "base64 -d"; + hash = "sha256-+ay87ThSn6QyPZcN0+oE01Wqbxmz0Z1KXYwocQCvYLg="; + }) + # Linux: Use folio_wait_locked() + (fetchpatch { + url = "https://gerrit.openafs.org/changes/16375/revisions/87a93f6488585553d833e1397e7f0dae0545cb7e/patch"; + decode = "base64 -d"; + hash = "sha256-MOVX2LFe8OBnvsQ2UdLvwKrwztOmnu1rdIou4CF+EBs="; + }) + # cf: Introduce AC_CHECK_LINUX_SYMBOL + (fetchpatch { + url = "https://gerrit.openafs.org/changes/16376/revisions/bab5968d7f4639d4a0cbe81aaa3e9716dda00632/patch"; + decode = "base64 -d"; + hash = "sha256-d6DZqDTW1uEKSB5PsomS4ix9fYYQzdQVmDATKl6n7x4="; + }) + # cf: check for dentry flag macros/enums + (fetchpatch { + url = "https://gerrit.openafs.org/changes/16377/revisions/f791d8ca4804486c656bc7c221076480df39b465/patch"; + decode = "base64 -d"; + hash = "sha256-7B0VJE3FeSQU1ElvXI5zXCPq1JRLAycyhqIQuDdR7xE="; + }) ]; nativeBuildInputs = [ From 3aa5e9ac6bf873c771c87fc982341cc2e749670e Mon Sep 17 00:00:00 2001 From: eljamm Date: Thu, 22 May 2025 22:29:12 +0200 Subject: [PATCH 217/220] linux_xanmod: 6.12.28 -> 6.12.30 --- pkgs/os-specific/linux/kernel/xanmod-kernels.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/os-specific/linux/kernel/xanmod-kernels.nix b/pkgs/os-specific/linux/kernel/xanmod-kernels.nix index ec5c1056188c..a6de49ae3931 100644 --- a/pkgs/os-specific/linux/kernel/xanmod-kernels.nix +++ b/pkgs/os-specific/linux/kernel/xanmod-kernels.nix @@ -14,8 +14,8 @@ let # kernel config in the xanmod version commit variants = { lts = { - version = "6.12.28"; - hash = "sha256-afmAC1tdiggI2qN1Kf9fJiUJqC9d8gzTj5PAg7wsiuw="; + version = "6.12.30"; + hash = "sha256-OqO41rbb3nBegL+ZjgqR0RGwrka6oLv0QOYXJ8Wdpv8="; }; main = { version = "6.14.6"; From 905d8b756529b9682d23d4aacfc5d05ab87ab727 Mon Sep 17 00:00:00 2001 From: eljamm Date: Thu, 22 May 2025 22:29:22 +0200 Subject: [PATCH 218/220] linux_xanmod_latest: 6.14.6 -> 6.14.8 --- pkgs/os-specific/linux/kernel/xanmod-kernels.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/os-specific/linux/kernel/xanmod-kernels.nix b/pkgs/os-specific/linux/kernel/xanmod-kernels.nix index a6de49ae3931..174be9aacc18 100644 --- a/pkgs/os-specific/linux/kernel/xanmod-kernels.nix +++ b/pkgs/os-specific/linux/kernel/xanmod-kernels.nix @@ -18,8 +18,8 @@ let hash = "sha256-OqO41rbb3nBegL+ZjgqR0RGwrka6oLv0QOYXJ8Wdpv8="; }; main = { - version = "6.14.6"; - hash = "sha256-wI383p+g03RrNUkPdo0TnB1n5yZI7bGwcSjB/9HpQH0="; + version = "6.14.8"; + hash = "sha256-ap3NaM/pk+TWg3RBRxFJPwnfv2n042r47bo66jhj3ck="; }; }; From 3d1f29646e4b57ed468d60f9d286cde23a8d1707 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Wed, 28 May 2025 01:30:43 +0000 Subject: [PATCH 219/220] gqrx: 2.17.6 -> 2.17.7 --- pkgs/applications/radio/gqrx/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/applications/radio/gqrx/default.nix b/pkgs/applications/radio/gqrx/default.nix index 8b2610cf88b6..61e77bba6824 100644 --- a/pkgs/applications/radio/gqrx/default.nix +++ b/pkgs/applications/radio/gqrx/default.nix @@ -32,13 +32,13 @@ assert !(pulseaudioSupport && portaudioSupport); gnuradioMinimal.pkgs.mkDerivation rec { pname = "gqrx"; - version = "2.17.6"; + version = "2.17.7"; src = fetchFromGitHub { owner = "gqrx-sdr"; repo = "gqrx"; rev = "v${version}"; - hash = "sha256-/ykKcwOotu8kn+EpJI+EUeqSkHZ2IrSh+o7lBGeHrZ0="; + hash = "sha256-uvKIxppnNkQge0QE5d1rw0qKo1fT8jwJPTiHilYaT28="; }; nativeBuildInputs = [ From 16cbbb374d78f9156a118ea816382ed3f03b4a9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marijan=20Petri=C4=8Devi=C4=87?= Date: Tue, 27 May 2025 11:24:56 -0500 Subject: [PATCH 220/220] ocamlPackages.lwt_eio: init at 0.5.1 --- .../ocaml-modules/lwt_eio/default.nix | 30 +++++++++++++++++++ pkgs/top-level/ocaml-packages.nix | 2 ++ 2 files changed, 32 insertions(+) create mode 100644 pkgs/development/ocaml-modules/lwt_eio/default.nix diff --git a/pkgs/development/ocaml-modules/lwt_eio/default.nix b/pkgs/development/ocaml-modules/lwt_eio/default.nix new file mode 100644 index 000000000000..2fa9aed6cca1 --- /dev/null +++ b/pkgs/development/ocaml-modules/lwt_eio/default.nix @@ -0,0 +1,30 @@ +{ + lib, + buildDunePackage, + fetchurl, + eio, + lwt, +}: +buildDunePackage rec { + pname = "lwt_eio"; + version = "0.5.1"; + + minimalOCamlVersion = "5.1"; + + src = fetchurl { + url = "https://github.com/ocaml-multicore/${pname}/releases/download/v${version}/${pname}-${version}.tbz"; + hash = "sha256-dlJnhHh4VNO60NZJZqc1HS8wPR95WhdeBJTK37pPbCE="; + }; + + propagatedBuildInputs = [ + eio + lwt + ]; + + meta = { + homepage = "https://github.com/ocaml-multicore/${pname}"; + changelog = "https://github.com/ocaml-multicore/${pname}/raw/v${version}/CHANGES.md"; + description = "Use Lwt libraries from within Eio"; + license = with lib.licenses; [ isc ]; + }; +} diff --git a/pkgs/top-level/ocaml-packages.nix b/pkgs/top-level/ocaml-packages.nix index e9bee00dcd49..54e297664378 100644 --- a/pkgs/top-level/ocaml-packages.nix +++ b/pkgs/top-level/ocaml-packages.nix @@ -1147,6 +1147,8 @@ let lwt-dllist = callPackage ../development/ocaml-modules/lwt-dllist { }; + lwt_eio = callPackage ../development/ocaml-modules/lwt_eio { }; + lwt-exit = callPackage ../development/ocaml-modules/lwt-exit { }; lwt_log = callPackage ../development/ocaml-modules/lwt_log { };