Merge commit 17a47125fac into haskell-updates

This commit is contained in:
sternenseemann 2025-05-05 14:40:48 +02:00
commit e2a42c12a3
1496 changed files with 10112 additions and 15859 deletions

View File

@ -375,16 +375,7 @@ let
nodePackages.prettier
];
inputs =
basePackages
++ lib.optionals stdenv.hostPlatform.isLinux [ inotify-tools ]
++ lib.optionals stdenv.hostPlatform.isDarwin (
with darwin.apple_sdk.frameworks;
[
CoreFoundation
CoreServices
]
);
inputs = basePackages ++ lib.optionals stdenv.hostPlatform.isLinux [ inotify-tools ];
# define shell startup command
hooks = ''

View File

@ -43,6 +43,9 @@
"no-broken-symlinks.sh": [
"index.html#no-broken-symlinks.sh"
],
"nostrictaliasing": [
"index.html#nostrictaliasing"
],
"pkgs-replacevars": [
"index.html#pkgs-replacevars",
"index.html#pkgs-substituteall",

View File

@ -206,6 +206,8 @@
- `mkBinaryCache` now defaults to using `zstd` compression for the binary caches it creates. The previous `xz` compression method can be used by passing `compression = "xz";`.
- `nodejs_latest` was updated from 23.x to 24.x. `nodejs_23` has been removed in favor of `nodejs_24`.
- `nodejs_18` package was removed due to upstream End-of-Life in April 2025.
- `nodePackages."@commitlint/config-conventional"` has been removed, as it is a library, and projects should depend on it instead.

View File

@ -1600,6 +1600,10 @@ This flag adds the `-fstack-clash-protection` compiler option, which causes grow
The following flags are disabled by default and should be enabled with `hardeningEnable` for packages that take untrusted input like network services.
#### `nostrictaliasing` {#nostrictaliasing}
This flag adds the `-fno-strict-aliasing` compiler option, which prevents the compiler from assuming code has been written strictly following the standard in regards to pointer aliasing and therefore performing optimizations that may be unsafe for code that has not followed these rules.
#### `pie` {#pie}
This flag is disabled by default for normal `glibc` based NixOS package builds, but enabled by default for

View File

@ -257,6 +257,11 @@ lib.mapAttrs mkLicense (
fullName = "BSD Protection License";
};
bsdSourceCode = {
spdxId = "BSD-Source-Code";
fullName = "BSD Source Code Attribution";
};
bsl11 = {
spdxId = "BUSL-1.1";
fullName = "Business Source License 1.1";
@ -452,6 +457,11 @@ lib.mapAttrs mkLicense (
fullName = "Common Public License 1.0";
};
cronyx = {
spdxId = "Cronyx";
fullName = "Cronyx License";
};
curl = {
spdxId = "curl";
fullName = "curl License";
@ -937,6 +947,11 @@ lib.mapAttrs mkLicense (
fullName = "MIT No Attribution";
};
mitOpenGroup = {
spdxId = "MIT-open-group";
fullName = "MIT Open Group variant";
};
mpl10 = {
spdxId = "MPL-1.0";
fullName = "Mozilla Public License 1.0";
@ -1328,6 +1343,11 @@ lib.mapAttrs mkLicense (
fullName = "Unicode License Agreement - Data Files and Software (2016)";
};
unicodeTOU = {
spdxId = "Unicode-TOU";
fullName = "Unicode Terms of Use";
};
unlicense = {
spdxId = "Unlicense";
fullName = "The Unlicense";

View File

@ -68,6 +68,16 @@ with lib.maintainers;
];
};
apparmor = {
scope = "AppArmor-related modules, userspace tool packages and profiles";
shortName = "apparmor";
members = [
julm
thoughtpolice
grimmauld
];
};
bazel = {
members = [
mboes

View File

@ -22,6 +22,8 @@
- the global Mesa version can now be managed without a mass rebuild by setting `hardware.graphics.package`
- packages that used to depend on Mesa for libgbm or libdri should use `libgbm` or `dri-pkgconfig-stub` as inputs, respectively
- OpenSSH has been updated from 9.9p2 to 10.0p2, dropping support for DSA keys and adding a new `ssh-auth` binary to handle user authentication in a different address space from unauthenticated sessions. Additionally, we now enable a configure option by default that attempts to lock sshd into RAM to prevent it from being swapped out, which may improve performance if the system is under memory pressure. See the [full changelog](https://www.openwall.com/lists/oss-security/2025/04/09/1) for more details.
- The `intel` video driver for X.org (from the xf86-video-intel package) which was previously removed because it was non-functional has been fixed and the driver has been re-introduced.
- The Mattermost module ({option}`services.mattermost`) and packages (`mattermost` and `mmctl`) have been substantially updated:
@ -164,6 +166,8 @@
- [OliveTin](https://www.olivetin.app/), gives safe and simple access to predefined shell commands from a web interface. Available as [services.olivetin](#opt-services.olivetin.enable).
- [alertmanager-ntfy](https://github.com/alexbakker/alertmanager-ntfy), forwards Prometheus Alertmanager notifications to ntfy.sh. Available as [services.prometheus.alertmanager-ntfy](#opt-services.prometheus.alertmanager-ntfy.enable).
- [Stash](https://github.com/stashapp/stash), An organizer for your adult videos/images, written in Go. Available as [services.stash](#opt-services.stash.enable).
- [vsmartcard-vpcd](https://frankmorgner.github.io/vsmartcard/virtualsmartcard/README.html), a virtual smart card driver. Available as [services.vsmartcard-vpcd](#opt-services.vsmartcard-vpcd.enable).
@ -267,6 +271,8 @@
to review the new defaults and description of
[](#opt-services.nextcloud.poolSettings).
- In `users.users` allocation on systems with multiple users it could happen that collided with others. Now these users get new subuid ranges assigned. When this happens, a warning is issued on the first activation. If the subuids were used (e.g. with rootless container managers like podman), please change the ownership of affected files accordingly.
- The `services.locate` module does no longer support findutil's `locate` due to its inferior performance compared to `mlocate` and `plocate`. The new default is `plocate`.
As the `service.locate.localuser` option only applied when using findutil's `locate`, it has also been removed.
@ -522,6 +528,8 @@
- `services.avahi.ipv6` now defaults to true.
- A new hardening flag, `nostrictaliasing` was made available, corresponding to the gcc/clang option `-fno-strict-aliasing`.
- In the `services.xserver.displayManager.startx` module, two new options [generateScript](#opt-services.xserver.displayManager.startx.generateScript) and [extraCommands](#opt-services.xserver.displayManager.startx.extraCommands) have been added to to declaratively configure the .xinitrc script.
- All services that require a root certificate bundle now use the value of a new read-only option, `security.pki.caBundle`.

View File

@ -54,15 +54,14 @@ sub dry_print {
# Functions for allocating free GIDs/UIDs. FIXME: respect ID ranges in
# /etc/login.defs.
sub allocId {
my ($used, $prevUsed, $idMin, $idMax, $up, $getid) = @_;
my $id = $up ? $idMin : $idMax;
my ($used, $prevUsed, $idMin, $idMax, $delta, $getid) = @_;
my $id = $delta > 0 ? $idMin : $idMax;
while ($id >= $idMin && $id <= $idMax) {
if (!$used->{$id} && !$prevUsed->{$id} && !defined &$getid($id)) {
$used->{$id} = 1;
return $id;
}
$used->{$id} = 1;
if ($up) { $id++; } else { $id--; }
$id += $delta;
}
die "$0: out of free UIDs or GIDs\n";
}
@ -77,19 +76,19 @@ sub allocGid {
$gidsUsed{$prevGid} = 1;
return $prevGid;
}
return allocId(\%gidsUsed, \%gidsPrevUsed, 400, 999, 0, sub { my ($gid) = @_; getgrgid($gid) });
return allocId(\%gidsUsed, \%gidsPrevUsed, 400, 999, -1, sub { my ($gid) = @_; getgrgid($gid) });
}
sub allocUid {
my ($name, $isSystemUser) = @_;
my ($min, $max, $up) = $isSystemUser ? (400, 999, 0) : (1000, 29999, 1);
my ($min, $max, $delta) = $isSystemUser ? (400, 999, -1) : (1000, 29999, 1);
my $prevUid = $uidMap->{$name};
if (defined $prevUid && $prevUid >= $min && $prevUid <= $max && !defined $uidsUsed{$prevUid}) {
dry_print("reviving", "would revive", "user '$name' with UID $prevUid");
$uidsUsed{$prevUid} = 1;
return $prevUid;
}
return allocId(\%uidsUsed, \%uidsPrevUsed, $min, $max, $up, sub { my ($uid) = @_; getpwuid($uid) });
return allocId(\%uidsUsed, \%uidsPrevUsed, $min, $max, $delta, sub { my ($uid) = @_; getpwuid($uid) });
}
# Read the declared users/groups
@ -336,18 +335,14 @@ sub allocSubUid {
my ($name, @rest) = @_;
# TODO: No upper bounds?
my ($min, $max, $up) = (100000, 100000 * 100, 1);
my ($min, $max, $delta) = (100000, 100000 + 100 * 65536, 65536);
my $prevId = $subUidMap->{$name};
if (defined $prevId && !defined $subUidsUsed{$prevId}) {
$subUidsUsed{$prevId} = 1;
return $prevId;
}
my $id = allocId(\%subUidsUsed, \%subUidsPrevUsed, $min, $max, $up, sub { my ($uid) = @_; getpwuid($uid) });
my $offset = $id - 100000;
my $count = $offset * 65536;
my $subordinate = 100000 + $count;
return $subordinate;
return allocId(\%subUidsUsed, \%subUidsPrevUsed, $min, $max, $delta, sub { undef });
}
my @subGids;
@ -367,6 +362,14 @@ foreach my $u (values %usersOut) {
if($u->{autoSubUidGidRange}) {
my $subordinate = allocSubUid($name);
if (defined $subUidMap->{$name} && $subUidMap->{$name} != $subordinate) {
print STDERR "warning: The subuids for '$name' changed, as they coincided with the subuids of a different user (see /etc/subuid). "
. "The range now starts with $subordinate instead of $subUidMap->{$name}. "
. "If the subuids were used (e.g. with rootless container managers like podman), please change the ownership of affected files accordingly. "
. "Alternatively, to keep the old overlapping ranges, add this to the system configuration: "
. "users.users.$name.subUidRanges = [{startUid = $subUidMap->{$name}; count = 65536;}]; "
. "users.users.$name.subGidRanges = [{startGid = $subUidMap->{$name}; count = 65536;}];\n";
}
$subUidMap->{$name} = $subordinate;
my $value = join(":", ($name, $subordinate, 65536));
push @subUids, $value;

View File

@ -985,6 +985,7 @@
./services/monitoring/pgscv.nix
./services/monitoring/prometheus/alertmanager-gotify-bridge.nix
./services/monitoring/prometheus/alertmanager-irc-relay.nix
./services/monitoring/prometheus/alertmanager-ntfy.nix
./services/monitoring/prometheus/alertmanager-webhook-logger.nix
./services/monitoring/prometheus/alertmanager.nix
./services/monitoring/prometheus/default.nix

View File

@ -172,7 +172,7 @@ in
logfiles = /dev/stdin
parser = ${pkgs.apparmor-parser}/bin/apparmor_parser
ldd = ${pkgs.glibc.bin}/bin/ldd
ldd = ${lib.getExe' pkgs.stdenv.cc.libc "ldd"}
logger = ${pkgs.util-linux}/bin/logger
# customize how file ownership permissions are presented
@ -275,8 +275,5 @@ in
};
};
meta.maintainers = with lib.maintainers; [
julm
grimmauld
];
meta.maintainers = lib.teams.apparmor.members;
}

View File

@ -40,7 +40,7 @@ let
# these config files will be merged one after the other to build the final config
configFiles = [
"${pkgs.mjolnir}/libexec/mjolnir/deps/mjolnir/config/default.yaml"
"${pkgs.mjolnir}/lib/node_modules/mjolnir/config/default.yaml"
moduleConfigFile
];

View File

@ -0,0 +1,201 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.prometheus.alertmanager-ntfy;
settingsFormat = pkgs.formats.yaml { };
settingsFile = settingsFormat.generate "settings.yml" cfg.settings;
configsArg = lib.concatStringsSep "," (
[ settingsFile ] ++ lib.imap0 (i: _: "%d/config-${toString i}.yml") cfg.extraConfigFiles
);
in
{
meta.maintainers = with lib.maintainers; [ defelo ];
options.services.prometheus.alertmanager-ntfy = {
enable = lib.mkEnableOption "alertmanager-ntfy";
package = lib.mkPackageOption pkgs "alertmanager-ntfy" { };
settings = lib.mkOption {
description = ''
Configuration of alertmanager-ntfy.
See <https://github.com/alexbakker/alertmanager-ntfy> for more information.
'';
default = { };
type = lib.types.submodule {
freeformType = settingsFormat.type;
options = {
http.addr = lib.mkOption {
type = lib.types.str;
description = "The address to listen on.";
default = "127.0.0.1:8000";
example = ":8000";
};
ntfy = {
baseurl = lib.mkOption {
type = lib.types.str;
description = "The base URL of the ntfy.sh instance.";
example = "https://ntfy.sh";
};
notification = {
topic = lib.mkOption {
type = lib.types.str;
description = ''
The topic to which alerts should be published.
Can either be a hardcoded string or a gval expression that evaluates to a string.
'';
example = "alertmanager";
};
priority = lib.mkOption {
type = lib.types.str;
description = ''
The ntfy.sh message priority (see <https://docs.ntfy.sh/publish/#message-priority> for more information).
Can either be a hardcoded string or a gval expression that evaluates to a string.
'';
default = ''status == "firing" ? "high" : "default"'';
};
tags = lib.mkOption {
type = lib.types.listOf (
lib.types.submodule {
options = {
tag = lib.mkOption {
type = lib.types.str;
description = ''
The tag to add.
See <https://docs.ntfy.sh/emojis> for a list of all supported emojis.
'';
example = "rotating_light";
};
condition = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = ''
The condition under which this tag should be added.
Tags with no condition are always included.
'';
default = null;
example = ''status == "firing"'';
};
};
}
);
description = ''
Tags to add to ntfy.sh messages.
See <https://docs.ntfy.sh/publish/#tags-emojis> for more information.
'';
default = [
{
tag = "green_circle";
condition = ''status == "resolved"'';
}
{
tag = "red_circle";
condition = ''status == "firing"'';
}
];
};
templates = {
title = lib.mkOption {
type = lib.types.str;
description = "The ntfy.sh message title template.";
default = ''
{{ if eq .Status "resolved" }}Resolved: {{ end }}{{ index .Annotations "summary" }}
'';
};
description = lib.mkOption {
type = lib.types.str;
description = "The ntfy.sh message description template.";
default = ''
{{ index .Annotations "description" }}
'';
};
};
};
};
};
};
};
extraConfigFiles = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [ ];
example = [ "/run/secrets/alertmanager-ntfy.yml" ];
description = ''
Config files to merge into the settings defined in [](#opt-services.prometheus.alertmanager-ntfy.settings).
This is useful to avoid putting secrets into the Nix store.
See <https://github.com/alexbakker/alertmanager-ntfy> for more information.
'';
};
};
config = lib.mkIf cfg.enable {
systemd.services.alertmanager-ntfy = {
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
serviceConfig = {
User = "alertmanager-ntfy";
Group = "alertmanager-ntfy";
DynamicUser = true;
LoadCredential = lib.imap0 (i: path: "config-${toString i}.yml:${path}") cfg.extraConfigFiles;
ExecStart = "${lib.getExe cfg.package} --configs ${configsArg}";
Restart = "always";
RestartSec = 5;
# Hardening
AmbientCapabilities = "";
CapabilityBoundingSet = [ "" ];
DevicePolicy = "closed";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
"~@resources"
];
UMask = "0077";
};
};
};
}

View File

@ -363,7 +363,7 @@ in
chmod 640 ${runConfig}
'';
serviceConfig = rec {
Type = "simple";
Type = "notify";
ExecStart = utils.escapeSystemdExecArgs [
(lib.getExe' pkgs.coturn "turnserver")
"-c"
@ -413,6 +413,7 @@ in
[
"AF_INET"
"AF_INET6"
"AF_UNIX"
]
++ lib.optionals (cfg.listening-ips == [ ]) [
# only used for interface discovery when no listening ips are configured

View File

@ -382,7 +382,7 @@ in
'';
example = literalExpression ''
{
inherit (pkgs.nextcloud31Packages.apps) mail calendar contact;
inherit (pkgs.nextcloud31Packages.apps) mail calendar contacts;
phonetrack = pkgs.fetchNextcloudApp {
name = "phonetrack";
sha256 = "0qf366vbahyl27p9mshfma1as4nvql6w75zy2zk5xwwbp343vsbc";

View File

@ -1117,7 +1117,7 @@ in
private-gpt = handleTest ./private-gpt.nix { };
privatebin = runTest ./privatebin.nix;
privoxy = handleTest ./privoxy.nix { };
prometheus = handleTest ./prometheus { };
prometheus = import ./prometheus { inherit runTest; };
prometheus-exporters = handleTest ./prometheus-exporters.nix { };
prosody = handleTest ./xmpp/prosody.nix { };
prosody-mysql = handleTest ./xmpp/prosody-mysql.nix { };

View File

@ -35,38 +35,6 @@ import ./make-test-python.nix (
];
};
server-x11 =
{ ... }:
{
environment.systemPackages = [ pkgs.xorg.xauth ];
services.openssh = {
enable = true;
settings.X11Forwarding = true;
};
users.users.root.openssh.authorizedKeys.keys = [
snakeOilPublicKey
];
};
server-x11-disable =
{ ... }:
{
environment.systemPackages = [ pkgs.xorg.xauth ];
services.openssh = {
enable = true;
settings = {
X11Forwarding = true;
# CVE-2025-32728: the following line is ineffectual
DisableForwarding = true;
};
};
users.users.root.openssh.authorizedKeys.keys = [
snakeOilPublicKey
];
};
server-allowed-users =
{ ... }:
@ -272,8 +240,6 @@ import ./make-test-python.nix (
start_all()
server.wait_for_unit("sshd", timeout=30)
server_x11.wait_for_unit("sshd", timeout=30)
server_x11_disable.wait_for_unit("sshd", timeout=30)
server_allowed_users.wait_for_unit("sshd", timeout=30)
server_localhost_only.wait_for_unit("sshd", timeout=30)
server_match_rule.wait_for_unit("sshd", timeout=30)
@ -341,16 +307,6 @@ import ./make-test-python.nix (
timeout=30
)
with subtest("x11-forwarding"):
client.succeed(
"[ \"$(ssh -Y -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-x11 'xauth list' | tee /dev/stderr | wc -l)\" -eq 1 ]",
timeout=30
)
client.succeed(
"[ \"$(ssh -Y -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-x11-disable 'xauth list' | tee /dev/stderr | wc -l)\" -eq 0 ]",
timeout=30
)
with subtest("localhost-only"):
server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'")
server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'")

View File

@ -117,15 +117,17 @@
xfce.succeed("xdotool click 1")
xfce.wait_for_text("Run Checks")
# Test 5: paretosecurity:// URL handler is registered
xfce.succeed("su - alice -c 'xdg-open paretosecurity://foo'")
# Test 6: Desktop entry
# Test 5: Desktop entry
xfce.succeed("xdotool mousemove 10 10")
xfce.succeed("xdotool click 1") # hide the tray icon window
xfce.succeed("xdotool click 1") # show the Applications menu
xfce.succeed("xdotool mousemove 10 200")
xfce.succeed("xdotool click 1")
xfce.wait_for_text("Pareto Security")
# Test 6: paretosecurity:// URL handler is registered
xfce.execute("su - alice -c 'xdg-open paretosecurity://foo >/dev/null &'")
xfce.wait_for_text("Failed to add device")
'';
}

View File

@ -20,7 +20,6 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
extensions = ps: [ ps.anonymizer ];
settings.shared_preload_libraries = [ "anon" ];
};

View File

@ -24,7 +24,6 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
extensions =
ps: with ps; [
pgjwt

View File

@ -51,5 +51,4 @@ let
in
genTests {
inherit makeTestFor;
filter = n: _: lib.hasSuffix "_jit" n;
}

View File

@ -51,7 +51,6 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
enableTCPIP = true;
ensureUsers = [
{

View File

@ -32,7 +32,6 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
settings = {
max_replication_slots = 10;
max_wal_senders = 10;

View File

@ -15,41 +15,33 @@ let
postgresql-clauses = makeEnsureTestFor package;
};
test-sql =
enablePLv8Test:
pkgs.writeText "postgresql-test" (
''
CREATE EXTENSION pgcrypto; -- just to check if lib loading works
CREATE TABLE sth (
id int
);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
CREATE TABLE xmltest ( doc xml );
INSERT INTO xmltest (doc) VALUES ('<test>ok</test>'); -- check if libxml2 enabled
''
+ lib.optionalString enablePLv8Test ''
-- check if hardening gets relaxed
CREATE EXTENSION plv8;
-- try to trigger the V8 JIT, which requires MemoryDenyWriteExecute
DO $$
let xs = [];
for (let i = 0, n = 400000; i < n; i++) {
xs.push(Math.round(Math.random() * n))
}
console.log(xs.reduce((acc, x) => acc + x, 0));
$$ LANGUAGE plv8;
''
test-sql = pkgs.writeText "postgresql-test" (''
CREATE EXTENSION pgcrypto; -- just to check if lib loading works
CREATE TABLE sth (
id int
);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
INSERT INTO sth (id) VALUES (1);
CREATE TABLE xmltest ( doc xml );
INSERT INTO xmltest (doc) VALUES ('<test>ok</test>'); -- check if libxml2 enabled
-- check if hardening gets relaxed
CREATE EXTENSION plv8;
-- try to trigger the V8 JIT, which requires MemoryDenyWriteExecute
DO $$
let xs = [];
for (let i = 0, n = 400000; i < n; i++) {
xs.push(Math.round(Math.random() * n))
}
console.log(xs.reduce((acc, x) => acc + x, 0));
$$ LANGUAGE plv8;
'');
makeTestForWithBackupAll =
package: backupAll:
let
enablePLv8Check = !package.pkgs.plv8.meta.broken;
in
makeTest {
name = "postgresql${lib.optionalString backupAll "-backup-all"}-${package.name}";
meta = with lib.maintainers; {
@ -62,12 +54,9 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
# plv8 doesn't support postgresql with JIT, so we only run the test
# for the non-jit variant.
# TODO(@Ma27) split this off into its own VM test and move a few other
# extension tests to use postgresqlTestExtension.
extensions = lib.mkIf enablePLv8Check (ps: with ps; [ plv8 ]);
extensions = ps: with ps; [ plv8 ];
};
services.postgresqlBackup = {
@ -94,7 +83,7 @@ let
with subtest("Postgresql is available just after unit start"):
machine.succeed(
"cat ${test-sql enablePLv8Check} | sudo -u postgres psql"
"cat ${test-sql} | sudo -u postgres psql"
)
with subtest("Postgresql survives restart (bug #1735)"):
@ -184,7 +173,6 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
ensureUsers = [
{
name = "all-clauses";

View File

@ -17,7 +17,6 @@ let
services.postgresql = {
inherit package;
enable = true;
enableJIT = lib.hasInfix "-jit-" package.name;
extensions = with package.pkgs; [ wal2json ];
settings = {
wal_level = "logical";

View File

@ -0,0 +1,99 @@
{ lib, ... }:
let
ports = {
alertmanager-ntfy = 8000;
ntfy-sh = 8001;
alertmanager = 8002;
};
in
{
name = "alertmanager-ntfy";
meta.maintainers = with lib.maintainers; [ defelo ];
nodes.machine = {
services.prometheus.alertmanager = {
enable = true;
listenAddress = "127.0.0.1";
port = ports.alertmanager;
configuration = {
route = {
receiver = "test";
group_by = [ "..." ];
group_wait = "0s";
group_interval = "1s";
repeat_interval = "2h";
};
receivers = [
{
name = "test";
webhook_configs = [ { url = "http://127.0.0.1:${toString ports.alertmanager-ntfy}/hook"; } ];
}
];
};
};
services.prometheus.alertmanager-ntfy = {
enable = true;
settings = {
http.addr = "127.0.0.1:${toString ports.alertmanager-ntfy}";
ntfy = {
baseurl = "http://127.0.0.1:${toString ports.ntfy-sh}";
notification.topic = "alertmanager";
};
};
};
services.ntfy-sh = {
enable = true;
settings = {
listen-http = "127.0.0.1:${toString ports.ntfy-sh}";
base-url = "http://127.0.0.1:${toString ports.ntfy-sh}";
};
};
};
interactive.nodes.machine = {
services.prometheus.alertmanager.listenAddress = lib.mkForce "0.0.0.0";
services.prometheus.alertmanager-ntfy.settings.http.addr =
lib.mkForce "0.0.0.0:${toString ports.alertmanager-ntfy}";
services.ntfy-sh.settings.listen-http = lib.mkForce "0.0.0.0:${toString ports.ntfy-sh}";
networking.firewall.enable = false;
virtualisation.forwardPorts = lib.mapAttrsToList (_: port: {
from = "host";
host = { inherit port; };
guest = { inherit port; };
}) ports;
};
testScript = ''
import json
import time
machine.wait_for_unit("alertmanager.service")
machine.wait_for_unit("alertmanager-ntfy.service")
machine.wait_for_unit("ntfy-sh.service")
machine.wait_for_open_port(${toString ports.alertmanager})
machine.wait_for_open_port(${toString ports.alertmanager-ntfy})
machine.wait_for_open_port(${toString ports.ntfy-sh})
machine.succeed("""curl 127.0.0.1:${toString ports.alertmanager}/api/v2/alerts \
-X POST -H 'Content-Type: application/json' \
-d '[{ \
"labels": {"alertname": "test"},
"annotations": {"summary": "alert summary", "description": "alert description"} \
}]'""")
while not (resp := machine.succeed("curl '127.0.0.1:${toString ports.ntfy-sh}/alertmanager/json?poll=1'")):
time.sleep(1)
msg = json.loads(resp)
assert msg["title"] == "alert summary"
assert msg["message"] == "alert description"
assert msg["priority"] == 4
assert "red_circle" in msg["tags"]
'';
}

View File

@ -1,160 +1,146 @@
import ../make-test-python.nix (
{ lib, pkgs, ... }:
{ pkgs, ... }:
{
name = "prometheus-alertmanager";
{
name = "prometheus-alertmanager";
nodes = {
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
nodes = {
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
alertmanagers = [
{
scheme = "http";
static_configs = [
{
targets = [
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
];
}
];
}
];
rules = [
''
groups:
- name: test
rules:
- alert: InstanceDown
expr: up == 0
for: 5s
labels:
severity: page
annotations:
summary: "Instance {{ $labels.instance }} down"
''
];
scrapeConfigs = [
{
job_name = "alertmanager";
static_configs = [
{
targets = [
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
];
}
];
}
{
job_name = "node";
static_configs = [
{
targets = [
"node:${toString config.services.prometheus.exporters.node.port}"
];
}
];
}
];
};
};
alertmanager =
{ config, pkgs, ... }:
{
services.prometheus.alertmanager = {
enable = true;
openFirewall = true;
configuration = {
global = {
resolve_timeout = "1m";
};
route = {
# Root route node
receiver = "test";
group_by = [ "..." ];
continue = false;
group_wait = "1s";
group_interval = "15s";
repeat_interval = "24h";
};
receivers = [
{
name = "test";
webhook_configs = [
{
url = "http://logger:6725";
send_resolved = true;
max_alerts = 0;
}
];
}
alertmanagers = [
{
scheme = "http";
static_configs = [
{ targets = [ "alertmanager:${toString config.services.prometheus.alertmanager.port}" ]; }
];
}
];
rules = [
''
groups:
- name: test
rules:
- alert: InstanceDown
expr: up == 0
for: 5s
labels:
severity: page
annotations:
summary: "Instance {{ $labels.instance }} down"
''
];
scrapeConfigs = [
{
job_name = "alertmanager";
static_configs = [
{ targets = [ "alertmanager:${toString config.services.prometheus.alertmanager.port}" ]; }
];
}
{
job_name = "node";
static_configs = [
{ targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ]; }
];
}
];
};
};
alertmanager =
{ config, pkgs, ... }:
{
services.prometheus.alertmanager = {
enable = true;
openFirewall = true;
configuration = {
global = {
resolve_timeout = "1m";
};
route = {
# Root route node
receiver = "test";
group_by = [ "..." ];
continue = false;
group_wait = "1s";
group_interval = "15s";
repeat_interval = "24h";
};
receivers = [
{
name = "test";
webhook_configs = [
{
url = "http://logger:6725";
send_resolved = true;
max_alerts = 0;
}
];
}
];
};
};
};
logger =
{ config, pkgs, ... }:
{
networking.firewall.allowedTCPPorts = [ 6725 ];
logger =
{ config, pkgs, ... }:
{
networking.firewall.allowedTCPPorts = [ 6725 ];
services.prometheus.alertmanagerWebhookLogger.enable = true;
};
};
services.prometheus.alertmanagerWebhookLogger.enable = true;
};
};
testScript = ''
alertmanager.wait_for_unit("alertmanager")
alertmanager.wait_for_open_port(9093)
alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
#alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
testScript = ''
alertmanager.wait_for_unit("alertmanager")
alertmanager.wait_for_open_port(9093)
alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
#alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
logger.wait_for_unit("alertmanager-webhook-logger")
logger.wait_for_open_port(6725)
logger.wait_for_unit("alertmanager-webhook-logger")
logger.wait_for_open_port(6725)
prometheus.wait_for_unit("prometheus")
prometheus.wait_for_open_port(9090)
prometheus.wait_for_unit("prometheus")
prometheus.wait_for_open_port(9090)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
+ "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
+ "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
)
logger.wait_until_succeeds(
"journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
)
logger.wait_until_succeeds(
"journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
)
logger.log(logger.succeed("systemd-analyze security alertmanager-webhook-logger.service | grep -v ''"))
logger.log(logger.succeed("systemd-analyze security alertmanager-webhook-logger.service | grep -v ''"))
alertmanager.log(alertmanager.succeed("systemd-analyze security alertmanager.service | grep -v ''"))
'';
}
)
alertmanager.log(alertmanager.succeed("systemd-analyze security alertmanager.service | grep -v ''"))
'';
}

View File

@ -1,120 +1,108 @@
import ../make-test-python.nix (
{ lib, pkgs, ... }:
{
name = "prometheus-config-reload";
{
name = "prometheus-config-reload";
nodes = {
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
nodes = {
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
enableReload = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [ { targets = [ "prometheus:${toString config.services.prometheus.port}" ]; } ];
}
];
};
services.prometheus = {
enable = true;
enableReload = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
specialisation = {
"prometheus-config-change" = {
configuration = {
environment.systemPackages = [ pkgs.yq ];
# This configuration just adds a new prometheus job
# to scrape the node_exporter metrics of the s3 machine.
services.prometheus = {
scrapeConfigs = [
{
targets = [
"prometheus:${toString config.services.prometheus.port}"
job_name = "node";
static_configs = [
{ targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ]; }
];
}
];
}
];
};
specialisation = {
"prometheus-config-change" = {
configuration = {
environment.systemPackages = [ pkgs.yq ];
# This configuration just adds a new prometheus job
# to scrape the node_exporter metrics of the s3 machine.
services.prometheus = {
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{
targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ];
}
];
}
];
};
};
};
};
};
};
};
};
testScript = ''
prometheus.wait_for_unit("prometheus")
prometheus.wait_for_open_port(9090)
testScript = ''
prometheus.wait_for_unit("prometheus")
prometheus.wait_for_open_port(9090)
# Check if switching to a NixOS configuration that changes the prometheus
# configuration reloads (instead of restarts) prometheus before the switch
# finishes successfully:
with subtest("config change reloads prometheus"):
import json
# We check if prometheus has finished reloading by looking for the message
# "Completed loading of configuration file" in the journal between the start
# and finish of switching to the new NixOS configuration.
#
# To mark the start we record the journal cursor before starting the switch:
cursor_before_switching = json.loads(
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
)["__CURSOR"]
# Check if switching to a NixOS configuration that changes the prometheus
# configuration reloads (instead of restarts) prometheus before the switch
# finishes successfully:
with subtest("config change reloads prometheus"):
import json
# We check if prometheus has finished reloading by looking for the message
# "Completed loading of configuration file" in the journal between the start
# and finish of switching to the new NixOS configuration.
#
# To mark the start we record the journal cursor before starting the switch:
cursor_before_switching = json.loads(
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
)["__CURSOR"]
# Now we switch:
prometheus_config_change = prometheus.succeed(
"readlink /run/current-system/specialisation/prometheus-config-change"
).strip()
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
# Now we switch:
prometheus_config_change = prometheus.succeed(
"readlink /run/current-system/specialisation/prometheus-config-change"
).strip()
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
# Next we retrieve all logs since the start of switching:
logs_after_starting_switching = prometheus.succeed(
"""
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
""".format(
cursor_before_switching=cursor_before_switching
)
)
# Finally we check if the message "Completed loading of configuration file"
# occurs before the "finished switching to system configuration" message:
finished_switching_msg = (
"finished switching to system configuration " + prometheus_config_change
)
reloaded_before_switching_finished = False
finished_switching = False
for log_line in logs_after_starting_switching.split("\n"):
msg = json.loads(log_line)["MESSAGE"]
if "Completed loading of configuration file" in msg:
reloaded_before_switching_finished = True
if msg == finished_switching_msg:
finished_switching = True
break
assert reloaded_before_switching_finished
assert finished_switching
# Check if the reloaded config includes the new node job:
prometheus.succeed(
# Next we retrieve all logs since the start of switching:
logs_after_starting_switching = prometheus.succeed(
"""
curl -sf http://127.0.0.1:9090/api/v1/status/config \
| jq -r .data.yaml \
| yq '.scrape_configs | any(.job_name == "node")' \
| grep true
"""
)
'';
}
)
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
""".format(
cursor_before_switching=cursor_before_switching
)
)
# Finally we check if the message "Completed loading of configuration file"
# occurs before the "finished switching to system configuration" message:
finished_switching_msg = (
"finished switching to system configuration " + prometheus_config_change
)
reloaded_before_switching_finished = False
finished_switching = False
for log_line in logs_after_starting_switching.split("\n"):
msg = json.loads(log_line)["MESSAGE"]
if "Completed loading of configuration file" in msg:
reloaded_before_switching_finished = True
if msg == finished_switching_msg:
finished_switching = True
break
assert reloaded_before_switching_finished
assert finished_switching
# Check if the reloaded config includes the new node job:
prometheus.succeed(
"""
curl -sf http://127.0.0.1:9090/api/v1/status/config \
| jq -r .data.yaml \
| yq '.scrape_configs | any(.job_name == "node")' \
| grep true
"""
)
'';
}

View File

@ -1,14 +1,11 @@
{
system ? builtins.currentSystem,
config ? { },
pkgs ? import ../../.. { inherit system config; },
}:
{ runTest }:
{
alertmanager = import ./alertmanager.nix { inherit system pkgs; };
config-reload = import ./config-reload.nix { inherit system pkgs; };
federation = import ./federation.nix { inherit system pkgs; };
prometheus-pair = import ./prometheus-pair.nix { inherit system pkgs; };
pushgateway = import ./pushgateway.nix { inherit system pkgs; };
remote-write = import ./remote-write.nix { inherit system pkgs; };
alertmanager = runTest ./alertmanager.nix;
alertmanager-ntfy = runTest ./alertmanager-ntfy.nix;
config-reload = runTest ./config-reload.nix;
federation = runTest ./federation.nix;
prometheus-pair = runTest ./prometheus-pair.nix;
pushgateway = runTest ./pushgateway.nix;
remote-write = runTest ./remote-write.nix;
}

View File

@ -1,227 +1,203 @@
import ../make-test-python.nix (
{ lib, pkgs, ... }:
{
name = "prometheus-federation";
{
name = "prometheus-federation";
nodes = {
global1 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
nodes = {
global1 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "federate";
honor_labels = true;
metrics_path = "/federate";
scrapeConfigs = [
{
job_name = "federate";
honor_labels = true;
metrics_path = "/federate";
params = {
"match[]" = [
"{job=\"node\"}"
"{job=\"prometheus\"}"
];
};
params = {
"match[]" = [
"{job=\"node\"}"
"{job=\"prometheus\"}"
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
};
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
{
job_name = "prometheus";
static_configs = [
{
targets = [
"global1:${toString config.services.prometheus.port}"
"global2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
};
global2 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "federate";
honor_labels = true;
metrics_path = "/federate";
params = {
"match[]" = [
"{job=\"node\"}"
"{job=\"prometheus\"}"
}
];
}
{
job_name = "prometheus";
static_configs = [
{
targets = [
"global1:${toString config.services.prometheus.port}"
"global2:${toString config.services.prometheus.port}"
];
};
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
{
job_name = "prometheus";
static_configs = [
{
targets = [
"global1:${toString config.services.prometheus.port}"
"global2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
}
];
}
];
};
};
prometheus1 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
global2 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{
targets = [
"node1:${toString config.services.prometheus.exporters.node.port}"
];
}
scrapeConfigs = [
{
job_name = "federate";
honor_labels = true;
metrics_path = "/federate";
params = {
"match[]" = [
"{job=\"node\"}"
"{job=\"prometheus\"}"
];
}
{
job_name = "prometheus";
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
];
}
];
}
];
};
};
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
{
job_name = "prometheus";
static_configs = [
{
targets = [
"global1:${toString config.services.prometheus.port}"
"global2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
};
prometheus2 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
prometheus1 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{
targets = [
"node2:${toString config.services.prometheus.exporters.node.port}"
];
}
];
}
{
job_name = "prometheus";
static_configs = [
{
targets = [
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{ targets = [ "node1:${toString config.services.prometheus.exporters.node.port}" ]; }
];
}
{
job_name = "prometheus";
static_configs = [ { targets = [ "prometheus1:${toString config.services.prometheus.port}" ]; } ];
}
];
};
};
node1 =
{ config, pkgs, ... }:
{
services.prometheus.exporters.node = {
enable = true;
openFirewall = true;
};
prometheus2 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{ targets = [ "node2:${toString config.services.prometheus.exporters.node.port}" ]; }
];
}
{
job_name = "prometheus";
static_configs = [ { targets = [ "prometheus2:${toString config.services.prometheus.port}" ]; } ];
}
];
};
};
node2 =
{ config, pkgs, ... }:
{
services.prometheus.exporters.node = {
enable = true;
openFirewall = true;
};
node1 =
{ config, pkgs, ... }:
{
services.prometheus.exporters.node = {
enable = true;
openFirewall = true;
};
};
};
testScript = ''
for machine in node1, node2:
machine.wait_for_unit("prometheus-node-exporter")
machine.wait_for_open_port(9100)
node2 =
{ config, pkgs, ... }:
{
services.prometheus.exporters.node = {
enable = true;
openFirewall = true;
};
};
};
for machine in prometheus1, prometheus2, global1, global2:
machine.wait_for_unit("prometheus")
machine.wait_for_open_port(9090)
testScript = ''
for machine in node1, node2:
machine.wait_for_unit("prometheus-node-exporter")
machine.wait_for_open_port(9100)
# Verify both servers got the same data from the exporter
for machine in prometheus1, prometheus2:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
for machine in prometheus1, prometheus2, global1, global2:
machine.wait_for_unit("prometheus")
machine.wait_for_open_port(9090)
for machine in global1, global2:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
)
# Verify both servers got the same data from the exporter
for machine in prometheus1, prometheus2:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
+ "jq '.data.result[0].value[1]' | grep '\"4\"'"
)
'';
}
)
for machine in global1, global2:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
+ "jq '.data.result[0].value[1]' | grep '\"4\"'"
)
'';
}

View File

@ -1,93 +1,91 @@
import ../make-test-python.nix (
{ lib, pkgs, ... }:
{ pkgs, ... }:
{
name = "prometheus-pair";
{
name = "prometheus-pair";
nodes = {
prometheus1 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
nodes = {
prometheus1 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
};
prometheus2 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
prometheus2 =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{
targets = [
"prometheus1:${toString config.services.prometheus.port}"
"prometheus2:${toString config.services.prometheus.port}"
];
}
];
}
];
};
};
};
};
testScript = ''
for machine in prometheus1, prometheus2:
machine.wait_for_unit("prometheus")
machine.wait_for_open_port(9090)
machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
testScript = ''
for machine in prometheus1, prometheus2:
machine.wait_for_unit("prometheus")
machine.wait_for_open_port(9090)
machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
# Prometheii ready - run some queries
for machine in prometheus1, prometheus2:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
# Prometheii ready - run some queries
for machine in prometheus1, prometheus2:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
)
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
)
prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v ''"))
'';
}
)
prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v ''"))
'';
}

View File

@ -1,102 +1,91 @@
import ../make-test-python.nix (
{ lib, pkgs, ... }:
{ pkgs, ... }:
{
name = "prometheus-pushgateway";
{
name = "prometheus-pushgateway";
nodes = {
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
nodes = {
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
scrapeConfigs = [
{
job_name = "pushgateway";
static_configs = [
{
targets = [
"pushgateway:9091"
];
}
];
}
];
};
scrapeConfigs = [
{
job_name = "pushgateway";
static_configs = [ { targets = [ "pushgateway:9091" ]; } ];
}
];
};
};
pushgateway =
{ config, pkgs, ... }:
{
networking.firewall.allowedTCPPorts = [ 9091 ];
pushgateway =
{ config, pkgs, ... }:
{
networking.firewall.allowedTCPPorts = [ 9091 ];
services.prometheus.pushgateway = {
enable = true;
};
services.prometheus.pushgateway = {
enable = true;
};
};
client =
{ config, pkgs, ... }:
{
};
};
client = { config, pkgs, ... }: { };
};
testScript = ''
pushgateway.wait_for_unit("pushgateway")
pushgateway.wait_for_open_port(9091)
pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
testScript = ''
pushgateway.wait_for_unit("pushgateway")
pushgateway.wait_for_open_port(9091)
pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
prometheus.wait_for_unit("prometheus")
prometheus.wait_for_open_port(9090)
prometheus.wait_for_unit("prometheus")
prometheus.wait_for_open_port(9090)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
)
# Add a metric and check in Prometheus
client.wait_until_succeeds(
"echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
)
# Add a metric and check in Prometheus
client.wait_until_succeeds(
"echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
+ "jq '.data.result[0].value[1]' | grep 'null'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
+ "jq '.data.result[0].value[1]' | grep 'null'"
)
# Delete the metric, check not in Prometheus
client.wait_until_succeeds(
"curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
)
# Delete the metric, check not in Prometheus
client.wait_until_succeeds(
"curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
)
prometheus.wait_until_fails(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
)
prometheus.wait_until_fails(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
prometheus.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
pushgateway.log(pushgateway.succeed("systemd-analyze security pushgateway.service | grep -v ''"))
'';
}
)
pushgateway.log(pushgateway.succeed("systemd-analyze security pushgateway.service | grep -v ''"))
'';
}

View File

@ -1,81 +1,69 @@
import ../make-test-python.nix (
{ lib, pkgs, ... }:
{
name = "prometheus-remote-write";
{
name = "prometheus-remote-write";
nodes = {
receiver =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
nodes = {
receiver =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
extraFlags = [ "--web.enable-remote-write-receiver" ];
};
extraFlags = [ "--web.enable-remote-write-receiver" ];
};
};
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
prometheus =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.jq ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "2s";
remoteWrite = [
{
url = "http://receiver:9090/api/v1/write";
}
];
remoteWrite = [ { url = "http://receiver:9090/api/v1/write"; } ];
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{
targets = [
"node:${toString config.services.prometheus.exporters.node.port}"
];
}
];
}
];
};
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{ targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ]; }
];
}
];
};
};
node =
{ config, pkgs, ... }:
{
services.prometheus.exporters.node = {
enable = true;
openFirewall = true;
};
node =
{ config, pkgs, ... }:
{
services.prometheus.exporters.node = {
enable = true;
openFirewall = true;
};
};
};
};
testScript = ''
node.wait_for_unit("prometheus-node-exporter")
node.wait_for_open_port(9100)
testScript = ''
node.wait_for_unit("prometheus-node-exporter")
node.wait_for_open_port(9100)
for machine in prometheus, receiver:
machine.wait_for_unit("prometheus")
machine.wait_for_open_port(9090)
for machine in prometheus, receiver:
machine.wait_for_unit("prometheus")
machine.wait_for_open_port(9090)
# Verify both servers got the same data from the exporter
for machine in prometheus, receiver:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
'';
}
)
# Verify both servers got the same data from the exporter
for machine in prometheus, receiver:
machine.wait_until_succeeds(
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
)
'';
}

View File

@ -2,7 +2,6 @@
fetchurl,
lib,
stdenv,
IOKit ? null,
}:
stdenv.mkDerivation rec {
@ -19,8 +18,6 @@ stdenv.mkDerivation rec {
"INSTALL=install"
];
buildInputs = [ ] ++ lib.optional stdenv.hostPlatform.isDarwin IOKit;
meta = with lib; {
homepage = "http://linukz.org/cd-discid.shtml";
license = licenses.gpl2Plus;

View File

@ -9,13 +9,6 @@
which,
DarwinTools,
xcbuild,
AppKit,
Carbon,
CoreAudio,
CoreMIDI,
CoreServices,
Kernel,
MultitouchSupport,
}:
stdenv.mkDerivation rec {
@ -38,18 +31,7 @@ stdenv.mkDerivation rec {
xcbuild
];
buildInputs =
[ libsndfile ]
++ lib.optional (!stdenv.hostPlatform.isDarwin) alsa-lib
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
Carbon
CoreAudio
CoreMIDI
CoreServices
Kernel
MultitouchSupport
];
buildInputs = [ libsndfile ] ++ lib.optional (!stdenv.hostPlatform.isDarwin) alsa-lib;
patches = [ ./darwin-limits.patch ];

View File

@ -6,9 +6,6 @@
ncurses,
pkg-config,
libiconv,
CoreAudio,
AudioUnit,
VideoToolbox,
alsaSupport ? stdenv.hostPlatform.isLinux,
alsa-lib ? null,
@ -152,9 +149,6 @@ stdenv.mkDerivation rec {
[ ncurses ]
++ lib.optionals stdenv.hostPlatform.isDarwin [
libiconv
CoreAudio
AudioUnit
VideoToolbox
]
++ lib.flatten (lib.concatMap (a: a.deps) opts);

View File

@ -9,10 +9,6 @@
bison,
boost,
gettext,
Accelerate,
AudioUnit,
CoreAudio,
CoreMIDI,
portaudio,
alsa-lib ? null,
libpulseaudio ? null,
@ -61,10 +57,6 @@ stdenv.mkDerivation {
boost
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Accelerate
AudioUnit
CoreAudio
CoreMIDI
portaudio
]
++ lib.optionals stdenv.hostPlatform.isLinux (

View File

@ -17,9 +17,6 @@
pcaudiolib,
sonicSupport ? true,
sonic,
CoreAudio,
AudioToolbox,
AudioUnit,
alsa-plugins,
makeWrapper,
}:
@ -63,12 +60,7 @@ stdenv.mkDerivation rec {
buildInputs =
lib.optional mbrolaSupport mbrola
++ lib.optional pcaudiolibSupport pcaudiolib
++ lib.optional sonicSupport sonic
++ lib.optionals stdenv.hostPlatform.isDarwin [
CoreAudio
AudioToolbox
AudioUnit
];
++ lib.optional sonicSupport sonic;
# touch ChangeLog to avoid below error on darwin:
# Makefile.am: error: required file './ChangeLog.md' not found

View File

@ -7,10 +7,6 @@
alsa-lib,
SDL2,
libiconv,
CoreAudio,
CoreMIDI,
CoreServices,
Cocoa,
}:
stdenv.mkDerivation rec {
@ -30,10 +26,6 @@ stdenv.mkDerivation rec {
++ lib.optional stdenv.hostPlatform.isLinux alsa-lib
++ lib.optionals stdenv.hostPlatform.isDarwin [
libiconv
CoreAudio
CoreMIDI
CoreServices
Cocoa
];
passthru.tests = {

View File

@ -16,7 +16,6 @@
libicns,
yaml-cpp,
makeWrapper,
Cocoa,
includeDemo ? true,
}:
@ -54,7 +53,6 @@ stdenv.mkDerivation rec {
alsa-lib
udev
]
++ lib.optionals stdenv.hostPlatform.isDarwin [ Cocoa ]
++ lib.optional jackaudioSupport libjack2;
cmakeFlags =

View File

@ -7,9 +7,6 @@
openssl,
libiconv,
sqlite,
Security,
SystemConfiguration,
CoreFoundation,
installShellFiles,
asciidoctor,
}:
@ -41,9 +38,6 @@ rustPlatform.buildRustPackage rec {
if stdenv.hostPlatform.isDarwin then
[
libiconv
Security
SystemConfiguration
CoreFoundation
]
else
[

View File

@ -5,7 +5,6 @@
SDL2,
python3,
jack2,
Foundation,
alsa-lib,
pkg-config,
}:
@ -31,7 +30,6 @@ stdenv.mkDerivation (finalAttrs: {
buildInputs =
[ SDL2 ]
++ lib.optional (lib.meta.availableOn stdenv.hostPlatform alsa-lib) alsa-lib
++ lib.optional stdenv.hostPlatform.isDarwin Foundation
++ lib.optional stdenv.hostPlatform.isLinux jack2;
preBuild = ''

View File

@ -8,7 +8,6 @@
libXrandr,
pkg-config,
python3,
Cocoa,
}:
stdenv.mkDerivation rec {
pname = "master_me";
@ -28,7 +27,6 @@ stdenv.mkDerivation rec {
libGL
python3
]
++ lib.optionals stdenv.hostPlatform.isDarwin [ Cocoa ]
++ lib.optionals stdenv.hostPlatform.isLinux [
libX11
libXext

View File

@ -12,13 +12,6 @@
libXcursor,
gtk3,
ffmpeg-full,
AppKit,
Carbon,
Cocoa,
CoreAudio,
CoreMIDI,
CoreServices,
Kernel,
}:
stdenv.mkDerivation (finalAttrs: {
@ -49,15 +42,6 @@ stdenv.mkDerivation (finalAttrs: {
libXinerama
libXcursor
gtk3
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
Carbon
Cocoa
CoreAudio
CoreMIDI
CoreServices
Kernel
];
installPhase =

View File

@ -4,8 +4,6 @@
rustPlatform,
stdenv,
libusb1,
AppKit,
IOKit,
pkg-config,
}:
rustPlatform.buildRustPackage rec {
@ -24,12 +22,7 @@ rustPlatform.buildRustPackage rec {
cargoBuildFlags = [ "-p minidsp -p minidsp-daemon" ];
buildInputs =
lib.optionals stdenv.hostPlatform.isLinux [ libusb1 ]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
IOKit
];
buildInputs = lib.optionals stdenv.hostPlatform.isLinux [ libusb1 ];
nativeBuildInputs = lib.optionals stdenv.hostPlatform.isLinux [ pkg-config ];

View File

@ -9,8 +9,6 @@
libX11,
libXau,
libXdmcp,
Carbon,
Cocoa,
cppunit,
}:
@ -61,10 +59,6 @@ stdenv.mkDerivation {
libX11
libXau
libXdmcp
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Carbon
Cocoa
];
checkInputs = [

View File

@ -11,7 +11,6 @@
popt,
libtool,
libiconv,
CoreServices,
# Sound sub-systems
alsaSupport ? (!stdenv.hostPlatform.isDarwin),
alsa-lib,
@ -125,7 +124,6 @@ stdenv.mkDerivation {
++ lib.optional samplerateSupport libsamplerate
++ lib.optionals stdenv.hostPlatform.isDarwin [
libiconv
CoreServices
];
configureFlags = [

View File

@ -10,8 +10,6 @@
withPulse ? stdenv.hostPlatform.isLinux,
libpulseaudio,
withCoreAudio ? stdenv.hostPlatform.isDarwin,
AudioUnit,
AudioToolbox,
withJack ? stdenv.hostPlatform.isUnix,
jack,
withConplay ? !stdenv.hostPlatform.isWindows,
@ -45,8 +43,6 @@ stdenv.mkDerivation rec {
++ lib.optionals withAlsa [ alsa-lib ]
++ lib.optionals withPulse [ libpulseaudio ]
++ lib.optionals withCoreAudio [
AudioUnit
AudioToolbox
]
++ lib.optionals withJack [ jack ]
);

View File

@ -5,7 +5,6 @@
rustPlatform,
pkg-config,
wrapGAppsHook3,
CoreServices,
}:
rustPlatform.buildRustPackage rec {
@ -23,7 +22,6 @@ rustPlatform.buildRustPackage rec {
pkg-config
wrapGAppsHook3
];
buildInputs = lib.optional stdenv.hostPlatform.isDarwin CoreServices;
preConfigure = ''
substituteInPlace lib/utils.rs \

View File

@ -9,9 +9,7 @@
unstableGitUpdater,
openssl,
pkg-config,
stdenv,
yt-dlp,
Security,
}:
rustPlatform.buildRustPackage {
pname = "parrot";
@ -36,7 +34,7 @@ rustPlatform.buildRustPackage {
buildInputs = [
libopus
openssl
] ++ lib.optionals stdenv.hostPlatform.isDarwin [ Security ];
];
postInstall = ''
wrapProgram $out/bin/parrot \

View File

@ -9,7 +9,6 @@
SDL2,
libX11,
libXext,
Cocoa,
utf8proc,
nix-update-script,
}:
@ -64,8 +63,7 @@ stdenv.mkDerivation rec {
++ lib.optionals stdenv.hostPlatform.isLinux [
alsa-lib
libXext
]
++ lib.optionals stdenv.hostPlatform.isDarwin [ Cocoa ];
];
enableParallelBuilding = true;

View File

@ -13,8 +13,6 @@
libvorbis,
libopus,
soxr,
IOKit,
AudioToolbox,
aixlog,
popl,
pulseaudioSupport ? false,
@ -55,11 +53,7 @@ stdenv.mkDerivation rec {
openssl
]
++ lib.optional pulseaudioSupport libpulseaudio
++ lib.optional stdenv.hostPlatform.isLinux alsa-lib
++ lib.optionals stdenv.hostPlatform.isDarwin [
IOKit
AudioToolbox
];
++ lib.optional stdenv.hostPlatform.isLinux alsa-lib;
TARGET = lib.optionalString stdenv.hostPlatform.isDarwin "MACOS";

View File

@ -6,8 +6,6 @@
pkg-config,
alsa-lib,
libxmp,
AudioUnit,
CoreAudio,
}:
stdenv.mkDerivation rec {
@ -25,13 +23,7 @@ stdenv.mkDerivation rec {
autoreconfHook
pkg-config
];
buildInputs =
[ libxmp ]
++ lib.optionals stdenv.hostPlatform.isLinux [ alsa-lib ]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AudioUnit
CoreAudio
];
buildInputs = [ libxmp ] ++ lib.optionals stdenv.hostPlatform.isLinux [ alsa-lib ];
meta = with lib; {
description = "Extended module player";

View File

@ -7,9 +7,6 @@
makeWrapper,
webkitgtk_4_0,
zenity,
Cocoa,
Security,
WebKit,
withGui ? true,
}:
@ -31,13 +28,7 @@ rustPlatform.buildRustPackage rec {
pkg-config
makeWrapper
];
buildInputs =
lib.optional stdenv.hostPlatform.isDarwin Security
++ lib.optional (withGui && stdenv.hostPlatform.isLinux) webkitgtk_4_0
++ lib.optionals (withGui && stdenv.hostPlatform.isDarwin) [
Cocoa
WebKit
];
buildInputs = lib.optional (withGui && stdenv.hostPlatform.isLinux) webkitgtk_4_0;
buildNoDefaultFeatures = true;
buildFeatures = [ "doh" ] ++ lib.optional withGui "webgui";

View File

@ -1,10 +1,8 @@
{
lib,
stdenv,
rustPlatform,
fetchFromGitHub,
rocksdb_7_10,
Security,
}:
let
@ -31,8 +29,6 @@ rustPlatform.buildRustPackage rec {
ROCKSDB_INCLUDE_DIR = "${rocksdb}/include";
ROCKSDB_LIB_DIR = "${rocksdb}/lib";
buildInputs = lib.optionals stdenv.hostPlatform.isDarwin [ Security ];
passthru.updateScript = ./update.sh;
meta = with lib; {

View File

@ -1,6 +1,5 @@
{
cmake,
CoreFoundation,
fetchFromGitHub,
fetchurl,
lib,
@ -11,10 +10,8 @@
protobuf,
rustPlatform,
rust-jemalloc-sys,
Security,
sqlite,
stdenv,
SystemConfiguration,
testers,
}:
@ -59,11 +56,6 @@ rustPlatform.buildRustPackage rec {
]
++ lib.optionals (!stdenv.hostPlatform.isDarwin) [
openssl
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
CoreFoundation
Security
SystemConfiguration
];
depositContractSpec = fetchurl {

View File

@ -16,10 +16,6 @@
unbound,
zeromq,
# darwin
CoreData,
IOKit,
trezorSupport ? true,
hidapi,
libusb1,
@ -88,10 +84,6 @@ stdenv.mkDerivation rec {
unbound
zeromq
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
IOKit
CoreData
]
++ lib.optionals trezorSupport [
python3
hidapi

View File

@ -1,10 +1,7 @@
{
stdenv,
lib,
buildGoModule,
fetchFromGitHub,
libobjc,
IOKit,
}:
buildGoModule rec {
@ -36,12 +33,6 @@ buildGoModule rec {
vendorHash = "sha256-pcIydpKWZt3vwShwzGlPKGq+disdxYFOB8gxHou3mVU=";
# Fix for usb-related segmentation faults on darwin
propagatedBuildInputs = lib.optionals stdenv.hostPlatform.isDarwin [
libobjc
IOKit
];
ldflags = [
"-s"
"-w"

View File

@ -10,8 +10,6 @@
rustPlatform,
rustc,
stdenv,
Security,
SystemConfiguration,
}:
let
@ -60,13 +58,9 @@ rustPlatform.buildRustPackage rec {
];
# NOTE: jemalloc is used by default on Linux with unprefixed enabled
buildInputs =
[ openssl ]
++ lib.optionals stdenv.hostPlatform.isLinux [ rust-jemalloc-sys-unprefixed ]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Security
SystemConfiguration
];
buildInputs = [
openssl
] ++ lib.optionals stdenv.hostPlatform.isLinux [ rust-jemalloc-sys-unprefixed ];
checkInputs = [
cacert

View File

@ -3,7 +3,6 @@
fetchFromGitHub,
lib,
rustPlatform,
Security,
curl,
pkg-config,
openssl,
@ -41,7 +40,6 @@ rustPlatform.buildRustPackage rec {
# ROCKSDB_LIB_DIR="${rocksdb}/lib";
buildInputs = lib.optionals stdenv.hostPlatform.isDarwin [
Security
curl
];

View File

@ -4,8 +4,6 @@
fetchFromGitHub,
protobuf,
rustfmt,
stdenv,
darwin,
pkg-config,
openssl,
}:
@ -42,10 +40,6 @@ in
rustfmt
];
buildInputs = lib.optionals stdenv.hostPlatform.isDarwin [
darwin.apple_sdk.frameworks.Security
];
passthru.updateScript = updateScript;
__darwinAllowLocalNetworking = true;
@ -70,13 +64,9 @@ in
rustfmt
];
buildInputs =
[
openssl
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
darwin.apple_sdk.frameworks.SystemConfiguration
];
buildInputs = [
openssl
];
passthru.updateScript = updateScript;

View File

@ -16,7 +16,6 @@
rustc,
rustPlatform,
pkg-config,
Security,
stdenv,
testers,
tl-expected,
@ -60,19 +59,15 @@ stdenv.mkDerivation rec {
rustPlatform.cargoSetupHook
];
buildInputs =
[
boost
db62
libevent
libsodium
tl-expected
utf8cpp
zeromq
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Security
];
buildInputs = [
boost
db62
libevent
libsodium
tl-expected
utf8cpp
zeromq
];
CXXFLAGS = [
"-I${lib.getDev utf8cpp}/include/utf8cpp"

View File

@ -6,21 +6,6 @@ lib.makeScope pkgs.newScope (
inherit (self) callPackage;
inheritedArgs = {
inherit (pkgs.darwin) sigtool;
inherit (pkgs.darwin.apple_sdk.frameworks)
Accelerate
AppKit
Carbon
Cocoa
GSS
ImageCaptureCore
ImageIO
IOKit
OSAKit
Quartz
QuartzCore
WebKit
;
inherit (pkgs.darwin.apple_sdk_11_0.frameworks) UniformTypeIdentifiers;
};
in
{

View File

@ -810,12 +810,6 @@ let
osx-dictionary =
if pkgs.stdenv.hostPlatform.isDarwin then
super.osx-dictionary.overrideAttrs (old: {
buildInputs =
old.buildInputs
++ (with pkgs.darwin.apple_sdk.frameworks; [
CoreServices
Foundation
]);
postBuild =
(old.postBuild or "")
+ ''

View File

@ -118,21 +118,6 @@
"lucid"
),
# macOS dependencies for NS and macPort
Accelerate,
AppKit,
Carbon,
Cocoa,
GSS,
IOKit,
ImageCaptureCore,
ImageIO,
OSAKit,
Quartz,
QuartzCore,
UniformTypeIdentifiers,
WebKit,
# test
callPackage,
}:
@ -376,27 +361,6 @@ mkDerivation (finalAttrs: {
]
++ lib.optionals withNS [
librsvg
AppKit
GSS
ImageIO
]
++ lib.optionals (variant == "macport") [
Accelerate
AppKit
Carbon
Cocoa
IOKit
OSAKit
Quartz
QuartzCore
WebKit
# TODO are these optional?
GSS
ImageCaptureCore
ImageIO
]
++ lib.optionals (variant == "macport" && stdenv.hostPlatform.isAarch64) [
UniformTypeIdentifiers
];
# Emacs needs to find movemail at run time, see info (emacs) Movemail

View File

@ -13,7 +13,6 @@
gtk3,
openssl,
libGL,
libobjc,
libxkbcommon,
wrapGAppsHook3,
wayland,
@ -77,9 +76,6 @@ rustPlatform.buildRustPackage rec {
]
++ lib.optionals stdenv.hostPlatform.isLinux [
fontconfig
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
libobjc
];
postInstall =

View File

@ -22,9 +22,6 @@
SDL2,
SDL2_image,
lua,
AppKit,
Cocoa,
Foundation,
nixosTests,
}:
@ -48,29 +45,23 @@ stdenv.mkDerivation (finalAttrs: {
gtest
];
buildInputs =
[
curl
freetype
giflib
libjpeg
libpng
libwebp
libarchive
libX11
pixman
tinyxml-2
zlib
SDL2
SDL2_image
lua
# no v8 due to missing libplatform and libbase
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
Cocoa
Foundation
];
buildInputs = [
curl
freetype
giflib
libjpeg
libpng
libwebp
libarchive
libX11
pixman
tinyxml-2
zlib
SDL2
SDL2_image
lua
# no v8 due to missing libplatform and libbase
];
cmakeFlags = [
"-DWITH_DESKTOP_INTEGRATION=ON"

View File

@ -1,6 +1,5 @@
{
fetchFromGitHub,
Foundation,
freetype,
lib,
lua5_4,
@ -29,16 +28,12 @@ stdenv.mkDerivation rec {
pkg-config
];
buildInputs =
[
freetype
lua5_4
pcre2
SDL2
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Foundation
];
buildInputs = [
freetype
lua5_4
pcre2
SDL2
];
mesonFlags = [
"-Duse_system_lua=true"

View File

@ -1,6 +1,6 @@
{ lib, fetchFromGitHub }:
rec {
version = "9.1.1231";
version = "9.1.1336";
outputs = [
"out"
@ -11,7 +11,7 @@ rec {
owner = "vim";
repo = "vim";
rev = "v${version}";
hash = "sha256-buqA6OM2FcxPefGQIGm8kD8ZWcskw7K/VO3xCRw7SbI=";
hash = "sha256-fF1qRPdVzQiYH/R0PSmKR/zFVVuCtT6lPN1x1Th5SgA=";
};
enableParallelBuilding = true;

View File

@ -14,9 +14,6 @@
url = "https://raw.githubusercontent.com/archlinux/svntogit-packages/68f6d131750aa778807119e03eed70286a17b1cb/trunk/archlinux.vim";
sha256 = "18ifhv5q9prd175q3vxbqf6qyvkk6bc7d2lhqdk0q78i68kv9y0c";
},
# apple frameworks
Carbon,
Cocoa,
}:
let
@ -41,16 +38,11 @@ stdenv.mkDerivation {
gettext
pkg-config
];
buildInputs =
[
ncurses
bash
gawk
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Carbon
Cocoa
];
buildInputs = [
ncurses
bash
gawk
];
strictDeps = true;

View File

@ -30,13 +30,6 @@
makeWrapper,
wrapGAppsHook3,
# apple frameworks
CoreServices,
CoreData,
Cocoa,
Foundation,
libobjc,
features ? "huge", # One of tiny, small, normal, big or huge
wrapPythonDrv ? false,
guiSupport ? config.vim.gui or (if stdenv.hostPlatform.isDarwin then "gtk2" else "gtk3"),
@ -195,13 +188,6 @@ stdenv.mkDerivation {
]
++ lib.optional (guiSupport == "gtk2") gtk2-x11
++ lib.optional (guiSupport == "gtk3") gtk3-x11
++ lib.optionals darwinSupport [
CoreServices
CoreData
Cocoa
Foundation
libobjc
]
++ lib.optional luaSupport lua
++ lib.optional pythonSupport python3
++ lib.optional tclSupport tcl

View File

@ -150,7 +150,6 @@ stdenv.mkDerivation (finalAttrs: {
postConfigure = ''
substituteInPlace src/auto/config.mk \
--replace " -L${stdenv.cc.libc}/lib" "" \
--replace " -L${darwin.libobjc}/lib" "" \
--replace " -L${darwin.libunwind}/lib" "" \
--replace " -L${libiconv}/lib" ""

View File

@ -10,8 +10,6 @@
esbuild,
pkg-config,
libsecret,
stdenv,
darwin,
setDefaultServerPath ? true,
}:
@ -39,18 +37,13 @@ let
buildInputs = [
pkgsBuildBuild.libsecret
];
nativeBuildInputs =
[
jq
moreutils
esbuild
# Required by `keytar`, which is a dependency of `vsce`.
pkg-config
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
darwin.apple_sdk.frameworks.AppKit
darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
jq
moreutils
esbuild
# Required by `keytar`, which is a dependency of `vsce`.
pkg-config
];
# Follows https://github.com/rust-lang/rust-analyzer/blob/41949748a6123fd6061eb984a47f4fe780525e63/xtask/src/dist.rs#L39-L65
installPhase = ''

View File

@ -3,10 +3,7 @@
stdenv,
fetchFromGitHub,
alsa-lib,
AudioUnit,
autoreconfHook,
Carbon,
Cocoa,
ffmpeg,
fluidsynth,
freetype,
@ -86,11 +83,6 @@ stdenv.mkDerivation (finalAttrs: {
alsa-lib
libxkbfile
libXrandr
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AudioUnit
Carbon
Cocoa
];
# Tests for SDL_net.h for modem & IPX support, not automatically picked up due to being in SDL2 subdirectory

View File

@ -11,7 +11,6 @@
graphicsmagick,
libGL,
libGLU,
OpenGL,
libpng,
binutils,
makeDesktopItem,
@ -54,17 +53,10 @@ stdenv.mkDerivation rec {
SDL_sound
libpng
]
++ (
if stdenv.hostPlatform.isDarwin then
[
OpenGL
]
else
[
libGL
libGLU
]
);
++ lib.optionals (!stdenv.hostPlatform.isDarwin) [
libGL
libGLU
];
# Tests for SDL_net.h for modem & IPX support, not automatically picked up due to being in SDL subdirectory
env.NIX_CFLAGS_COMPILE = "-I${lib.getDev SDL_net}/include/SDL";

View File

@ -32,12 +32,8 @@
which,
writeScript,
zlib,
darwin,
}:
let
inherit (darwin.apple_sdk.frameworks) CoreAudioKit ForceFeedback;
in
stdenv.mkDerivation rec {
pname = "mame";
version = "0.276";
@ -105,8 +101,6 @@ stdenv.mkDerivation rec {
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
libpcap
CoreAudioKit
ForceFeedback
];
nativeBuildInputs = [

View File

@ -1,6 +1,5 @@
{
lib,
stdenv,
cmake,
rustPlatform,
pkg-config,
@ -9,7 +8,6 @@
gtk3,
glib,
openssl,
Security,
nix-update-script,
}:
@ -32,7 +30,7 @@ rustPlatform.buildRustPackage rec {
glib
gtk3
openssl
] ++ lib.optional stdenv.hostPlatform.isDarwin Security;
];
nativeBuildInputs = [
cmake

View File

@ -45,8 +45,6 @@
libde265Support ? true,
libde265,
fftw,
ApplicationServices,
Foundation,
testers,
}:
@ -124,11 +122,7 @@ stdenv.mkDerivation (finalAttrs: {
++ lib.optional djvulibreSupport djvulibre
++ lib.optional openexrSupport openexr
++ lib.optional librsvgSupport librsvg
++ lib.optional openjpegSupport openjpeg
++ lib.optionals stdenv.hostPlatform.isDarwin [
ApplicationServices
Foundation
];
++ lib.optional openjpegSupport openjpeg;
propagatedBuildInputs =
[ fftw ]

View File

@ -52,8 +52,6 @@
potrace,
coreutils,
curl,
ApplicationServices,
Foundation,
testers,
nixos-icons,
perlPackages,
@ -147,11 +145,7 @@ stdenv.mkDerivation (finalAttrs: {
librsvg
pango
]
++ lib.optional openjpegSupport openjpeg
++ lib.optionals stdenv.hostPlatform.isDarwin [
ApplicationServices
Foundation
];
++ lib.optional openjpegSupport openjpeg;
propagatedBuildInputs =
[ curl ]

View File

@ -4,7 +4,6 @@
fetchFromGitHub,
cmake,
itk,
Cocoa,
}:
stdenv.mkDerivation {
@ -19,7 +18,7 @@ stdenv.mkDerivation {
};
nativeBuildInputs = [ cmake ];
buildInputs = [ itk ] ++ lib.optional stdenv.hostPlatform.isDarwin Cocoa;
buildInputs = [ itk ];
cmakeFlags = [ "-DCONVERT3D_USE_ITK_REMOTE_MODULES=OFF" ];

View File

@ -12,7 +12,6 @@
qttools,
xorg,
libtiff,
darwin,
}:
mkDerivation rec {
@ -42,7 +41,7 @@ mkDerivation rec {
qtbase
xorg.libXt
libtiff
] ++ lib.optional stdenv.hostPlatform.isDarwin darwin.apple_sdk.frameworks.AGL;
];
preConfigure = ''
NOCONFIGURE=1 ./autogen.sh

View File

@ -16,11 +16,6 @@
libxcb,
libxkbcommon,
wayland,
AppKit,
CoreGraphics,
CoreServices,
Foundation,
OpenGL,
}:
let
rpathLibs =
@ -59,15 +54,7 @@ rustPlatform.buildRustPackage rec {
python3
];
buildInputs =
rpathLibs
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
CoreGraphics
CoreServices
Foundation
OpenGL
];
buildInputs = rpathLibs;
postFixup = lib.optionalString stdenv.hostPlatform.isLinux ''
patchelf --set-rpath "${lib.makeLibraryPath rpathLibs}" $out/bin/emulsion

View File

@ -9,7 +9,6 @@
freetype,
libGL,
xorg,
AppKit,
}:
rustPlatform.buildRustPackage rec {
@ -31,19 +30,15 @@ rustPlatform.buildRustPackage rec {
pkg-config
];
buildInputs =
lib.optionals stdenv.hostPlatform.isLinux [
expat
fontconfig
freetype
xorg.libX11
xorg.libXcursor
xorg.libXi
xorg.libXrandr
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
];
buildInputs = lib.optionals stdenv.hostPlatform.isLinux [
expat
fontconfig
freetype
xorg.libX11
xorg.libXcursor
xorg.libXi
xorg.libXrandr
];
postInstall = ''
install -Dm444 assets/epick.desktop -t $out/share/applications

View File

@ -11,8 +11,6 @@
# See https://github.com/NixOS/nixpkgs/pull/324022. This may change later.
vtk_9,
autoPatchelfHook,
Cocoa,
OpenGL,
python3Packages,
opencascade-occt,
assimp,
@ -64,10 +62,6 @@ stdenv.mkDerivation rec {
assimp
fontconfig
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Cocoa
OpenGL
]
++ lib.optionals withPythonBinding [
python3Packages.python
# Using C++ header files, not Python import

View File

@ -17,12 +17,6 @@
libGLU,
alsa-lib,
fontconfig,
AVFoundation,
Carbon,
Cocoa,
CoreAudio,
Kernel,
OpenGL,
}:
stdenv.mkDerivation rec {
@ -75,14 +69,6 @@ stdenv.mkDerivation rec {
alsa-lib
fontconfig
libGLU
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AVFoundation
Carbon
Cocoa
CoreAudio
Kernel
OpenGL
];
env.NIX_CFLAGS_COMPILE = toString [

View File

@ -47,8 +47,6 @@
libgudev,
openexr,
desktopToDarwinBundle,
AppKit,
Cocoa,
gtk-mac-integration-gtk2,
withPython ? false,
python2,
@ -144,8 +142,6 @@ stdenv.mkDerivation (finalAttrs: {
mypaint-brushes1
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
Cocoa
gtk-mac-integration-gtk2
]
++ lib.optionals stdenv.hostPlatform.isLinux [

View File

@ -66,8 +66,6 @@
adwaita-icon-theme,
alsa-lib,
desktopToDarwinBundle,
AppKit,
Cocoa,
}:
let
@ -202,8 +200,6 @@ stdenv.mkDerivation (finalAttrs: {
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
llvmPackages.openmp
AppKit
Cocoa
]
++ lib.optionals stdenv.hostPlatform.isLinux [
libgudev

View File

@ -17,7 +17,6 @@
gtk4,
gdk-pixbuf,
libadwaita,
Foundation,
nix-update-script,
}:
@ -60,16 +59,12 @@ stdenv.mkDerivation (finalAttrs: {
desktop-file-utils
];
buildInputs =
[
glib
gtk4
gdk-pixbuf
libadwaita
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Foundation
];
buildInputs = [
glib
gtk4
gdk-pixbuf
libadwaita
];
passthru = {
updateScript = nix-update-script { };

View File

@ -8,7 +8,6 @@
withSixel ? false,
libsixel,
xorg,
AppKit,
withSki ? true,
}:
@ -36,8 +35,7 @@ rustPlatform.buildRustPackage rec {
libX11
libXrandr
]
)
++ lib.optional stdenv.hostPlatform.isDarwin AppKit;
);
buildNoDefaultFeatures = !withSki;
buildFeatures = lib.optional withSixel "sixel";

View File

@ -18,7 +18,6 @@
qtbase,
qtsvg,
qttools,
VideoDecodeAcceleration,
wrapQtAppsHook,
copyDesktopItems,
# needed to run natively on wayland
@ -44,26 +43,22 @@ stdenv.mkDerivation rec {
copyDesktopItems
];
buildInputs =
[
boost
bzip2
ffmpeg
fftwSinglePrec
hdf5
muparser
netcdf
openssl
python3
qscintilla
qtbase
qtsvg
qttools
qtwayland
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
VideoDecodeAcceleration
];
buildInputs = [
boost
bzip2
ffmpeg
fftwSinglePrec
hdf5
muparser
netcdf
openssl
python3
qscintilla
qtbase
qtsvg
qttools
qtwayland
];
# manually create a desktop file
desktopItems = [

View File

@ -1,17 +1,12 @@
{
callPackage,
lowPrio,
Accelerate,
CoreGraphics,
CoreVideo,
}:
let
base3 = callPackage ./tesseract3.nix { };
base4 = callPackage ./tesseract4.nix { };
base5 = callPackage ./tesseract5.nix {
inherit Accelerate CoreGraphics CoreVideo;
};
base5 = callPackage ./tesseract5.nix { };
languages = callPackage ./languages.nix { };
in
{

View File

@ -12,9 +12,6 @@
libtiff,
icu,
pango,
Accelerate,
CoreGraphics,
CoreVideo,
}:
stdenv.mkDerivation rec {
@ -35,21 +32,15 @@ stdenv.mkDerivation rec {
autoreconfHook
];
buildInputs =
[
curl
leptonica
libarchive
libpng
libtiff
icu
pango
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Accelerate
CoreGraphics
CoreVideo
];
buildInputs = [
curl
leptonica
libarchive
libpng
libtiff
icu
pango
];
passthru.updateScript = nix-update-script { };
meta = {

View File

@ -6,7 +6,6 @@
autoreconfHook,
autoconf-archive,
pkg-config,
CoreAudio,
enableAlsa ? true,
alsa-lib,
enableLibao ? true,
@ -84,8 +83,7 @@ stdenv.mkDerivation {
amrnb
amrwb
]
++ lib.optional enableLibpulseaudio libpulseaudio
++ lib.optional stdenv.hostPlatform.isDarwin CoreAudio;
++ lib.optional enableLibpulseaudio libpulseaudio;
enableParallelBuilding = true;

View File

@ -4,13 +4,6 @@
rustPlatform,
fetchFromGitHub,
makeWrapper,
AppKit,
CoreFoundation,
CoreGraphics,
CoreVideo,
Foundation,
Metal,
QuartzCore,
xorg,
vulkan-loader,
}:
@ -33,16 +26,6 @@ rustPlatform.buildRustPackage rec {
makeWrapper
];
buildInputs = lib.optionals stdenv.hostPlatform.isDarwin [
AppKit
CoreFoundation
CoreGraphics
CoreVideo
Foundation
Metal
QuartzCore
];
postInstall = lib.optionalString (!stdenv.hostPlatform.isDarwin) ''
wrapProgram $out/bin/binocle \
--suffix LD_LIBRARY_PATH : ${

View File

@ -8,7 +8,6 @@
cairo,
poppler,
wxGTK,
Cocoa,
}:
stdenv.mkDerivation rec {
@ -31,7 +30,7 @@ stdenv.mkDerivation rec {
cairo
poppler
wxGTK
] ++ lib.optionals stdenv.hostPlatform.isDarwin [ Cocoa ];
];
preConfigure = "./bootstrap";

View File

@ -5,8 +5,6 @@
pkg-config,
libgit2,
openssl,
stdenv,
Security,
}:
rustPlatform.buildRustPackage rec {
@ -25,14 +23,10 @@ rustPlatform.buildRustPackage rec {
nativeBuildInputs = [ pkg-config ];
buildInputs =
[
libgit2
openssl
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
Security
];
buildInputs = [
libgit2
openssl
];
useNextest = true;

View File

@ -17,12 +17,6 @@
gnutar,
p7zip,
xz,
IOKit,
Carbon,
Cocoa,
AudioToolbox,
OpenGL,
System,
withTTYX ? true,
libX11,
withGUI ? true,
@ -94,15 +88,7 @@ stdenv.mkDerivation rec {
debugpy
pcpp
]
)
++ lib.optionals stdenv.hostPlatform.isDarwin [
IOKit
Carbon
Cocoa
AudioToolbox
OpenGL
System
];
);
postPatch = ''
patchShebangs python/src/prebuild.sh

View File

@ -4,9 +4,6 @@
rustPlatform,
pkg-config,
openssl,
stdenv,
CoreServices,
Security,
}:
rustPlatform.buildRustPackage rec {
@ -25,14 +22,9 @@ rustPlatform.buildRustPackage rec {
nativeBuildInputs = [ pkg-config ];
buildInputs =
[
openssl
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
CoreServices
Security
];
buildInputs = [
openssl
];
meta = with lib; {
description = "Knowledge management meta-framework for geeks";

View File

@ -11,7 +11,6 @@
wayland,
libGL,
openssl,
darwin,
}:
rustPlatform.buildRustPackage rec {
@ -36,20 +35,16 @@ rustPlatform.buildRustPackage rec {
pkg-config
];
buildInputs =
lib.optionals stdenv.hostPlatform.isLinux [
fontconfig
xorg.libXcursor
xorg.libXi
xorg.libXrandr
xorg.libxcb
wayland
libxkbcommon
openssl
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
darwin.apple_sdk_11_0.frameworks.AppKit
];
buildInputs = lib.optionals stdenv.hostPlatform.isLinux [
fontconfig
xorg.libXcursor
xorg.libXi
xorg.libXrandr
xorg.libxcb
wayland
libxkbcommon
openssl
];
checkFlags = lib.optionals stdenv.hostPlatform.isDarwin [
# time out on darwin

View File

@ -6,8 +6,6 @@
pkg-config,
openssl,
libgit2,
Security,
SystemConfiguration,
}:
rustPlatform.buildRustPackage rec {
@ -30,8 +28,6 @@ rustPlatform.buildRustPackage rec {
[ openssl ]
++ lib.optionals stdenv.hostPlatform.isDarwin [
libgit2
Security
SystemConfiguration
];
nativeBuildInputs = [ pkg-config ];

View File

@ -3,7 +3,6 @@
stdenv,
fetchFromGitHub,
rustPlatform,
CoreServices,
}:
rustPlatform.buildRustPackage rec {
@ -25,8 +24,6 @@ rustPlatform.buildRustPackage rec {
useFetchCargoVendor = true;
cargoHash = "sha256-+x4pOtszvdzI/zR55ezcxlS52GrWQTuBn7vbnqDTVac=";
buildInputs = lib.optionals stdenv.hostPlatform.isDarwin [ CoreServices ];
meta = with lib; {
description = "Plain text Zettelkasten based on mdBook";
homepage = "https://github.com/mdzk-rs/mdzk/";

Some files were not shown because too many files have changed in this diff Show More