Merge branch 'master' into staging-next
This commit is contained in:
commit
535b720589
@ -567,7 +567,12 @@ If you have any problems with formatting, please ping the [formatting team](http
|
||||
That is, write
|
||||
|
||||
```nix
|
||||
{ stdenv, fetchurl, perl }: <...>
|
||||
{
|
||||
stdenv,
|
||||
fetchurl,
|
||||
perl,
|
||||
}:
|
||||
<...>
|
||||
```
|
||||
|
||||
instead of
|
||||
@ -579,17 +584,25 @@ If you have any problems with formatting, please ping the [formatting team](http
|
||||
or
|
||||
|
||||
```nix
|
||||
{ stdenv, fetchurl, perl, ... }: <...>
|
||||
{
|
||||
stdenv,
|
||||
fetchurl,
|
||||
perl,
|
||||
...
|
||||
}:
|
||||
<...>
|
||||
```
|
||||
|
||||
For functions that are truly generic in the number of arguments, but have some required arguments, you should write them using an `@`-pattern:
|
||||
|
||||
```nix
|
||||
{ stdenv, doCoverageAnalysis ? false, ... } @ args:
|
||||
{
|
||||
stdenv,
|
||||
doCoverageAnalysis ? false,
|
||||
...
|
||||
}@args:
|
||||
|
||||
stdenv.mkDerivation (args // {
|
||||
foo = if doCoverageAnalysis then "bla" else "";
|
||||
})
|
||||
stdenv.mkDerivation (args // { foo = if doCoverageAnalysis then "bla" else ""; })
|
||||
```
|
||||
|
||||
instead of
|
||||
@ -597,42 +610,37 @@ If you have any problems with formatting, please ping the [formatting team](http
|
||||
```nix
|
||||
args:
|
||||
|
||||
args.stdenv.mkDerivation (args // {
|
||||
foo = if args ? doCoverageAnalysis && args.doCoverageAnalysis then "bla" else "";
|
||||
})
|
||||
args.stdenv.mkDerivation (
|
||||
args
|
||||
// {
|
||||
foo = if args ? doCoverageAnalysis && args.doCoverageAnalysis then "bla" else "";
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
- Unnecessary string conversions should be avoided.
|
||||
Do
|
||||
|
||||
```nix
|
||||
{
|
||||
rev = version;
|
||||
}
|
||||
{ rev = version; }
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
{
|
||||
rev = "${version}";
|
||||
}
|
||||
{ rev = "${version}"; }
|
||||
```
|
||||
|
||||
- Building lists conditionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
|
||||
|
||||
```nix
|
||||
{
|
||||
buildInputs = lib.optional stdenv.hostPlatform.isDarwin iconv;
|
||||
}
|
||||
{ buildInputs = lib.optional stdenv.hostPlatform.isDarwin iconv; }
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
{
|
||||
buildInputs = if stdenv.hostPlatform.isDarwin then [ iconv ] else null;
|
||||
}
|
||||
{ buildInputs = if stdenv.hostPlatform.isDarwin then [ iconv ] else null; }
|
||||
```
|
||||
|
||||
As an exception, an explicit conditional expression with null can be used when fixing a important bug without triggering a mass rebuild.
|
||||
|
@ -110,7 +110,8 @@ rec {
|
||||
parse = pkgs.lib.recurseIntoAttrs {
|
||||
latest = pkgs.callPackage ./parse.nix { nix = pkgs.nixVersions.latest; };
|
||||
lix = pkgs.callPackage ./parse.nix { nix = pkgs.lix; };
|
||||
minimum = pkgs.callPackage ./parse.nix { nix = pkgs.nixVersions.minimum; };
|
||||
# TODO: Raise nixVersions.minimum to 2.24 and flip back to it.
|
||||
minimum = pkgs.callPackage ./parse.nix { nix = pkgs.nixVersions.nix_2_24; };
|
||||
};
|
||||
shell = import ../shell.nix { inherit nixpkgs system; };
|
||||
tarball = import ../pkgs/top-level/make-tarball.nix {
|
||||
|
@ -9,9 +9,9 @@
|
||||
},
|
||||
"branch": "nixpkgs-unstable",
|
||||
"submodules": false,
|
||||
"revision": "6afe187897bef7933475e6af374c893f4c84a293",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/6afe187897bef7933475e6af374c893f4c84a293.tar.gz",
|
||||
"hash": "1x3yas2aingswrw7hpn43d9anlb08bpyk42dqg6v8f3p3yk83p1b"
|
||||
"revision": "2baf8e1658cba84a032c3a8befb1e7b06629242a",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/2baf8e1658cba84a032c3a8befb1e7b06629242a.tar.gz",
|
||||
"hash": "0l48zkf2zs7r53fjq46j770vpb5avxihyfypra3fv429akqnsmm1"
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"type": "Git",
|
||||
@ -22,9 +22,9 @@
|
||||
},
|
||||
"branch": "main",
|
||||
"submodules": false,
|
||||
"revision": "a05be418a1af1198ca0f63facb13c985db4cb3c5",
|
||||
"url": "https://github.com/numtide/treefmt-nix/archive/a05be418a1af1198ca0f63facb13c985db4cb3c5.tar.gz",
|
||||
"hash": "1yadm9disc59an4a6c1zidq82530rd7i7idzzsirv6dlwirbqk3q"
|
||||
"revision": "421b56313c65a0815a52b424777f55acf0b56ddf",
|
||||
"url": "https://github.com/numtide/treefmt-nix/archive/421b56313c65a0815a52b424777f55acf0b56ddf.tar.gz",
|
||||
"hash": "1l57hzz704s7izkkcl3xsg77xjfza57cl0fchs24rdpdhmry2dmp"
|
||||
}
|
||||
},
|
||||
"version": 5
|
||||
|
@ -60,10 +60,7 @@ lib.extendMkDerivation {
|
||||
}@args:
|
||||
{
|
||||
# Arguments to pass
|
||||
inherit
|
||||
preferLocalBuild
|
||||
allowSubstitute
|
||||
;
|
||||
inherit preferLocalBuild allowSubstitute;
|
||||
# Some expressions involving specialArg
|
||||
greeting = if specialArg "hi" then "hi" else "hello";
|
||||
};
|
||||
|
@ -37,9 +37,7 @@ let
|
||||
hash = "sha256-he1uGC1M/nFcKpMM9JKY4oeexJcnzV0ZRxhTjtJz6xw=";
|
||||
};
|
||||
in
|
||||
appimageTools.wrapType2 {
|
||||
inherit pname version src;
|
||||
}
|
||||
appimageTools.wrapType2 { inherit pname version src; }
|
||||
```
|
||||
|
||||
:::
|
||||
@ -104,9 +102,7 @@ let
|
||||
hash = "sha256-/hMPvYdnVB1XjKgU2v47HnVvW4+uC3rhRjbucqin4iI=";
|
||||
};
|
||||
|
||||
appimageContents = appimageTools.extract {
|
||||
inherit pname version src;
|
||||
};
|
||||
appimageContents = appimageTools.extract { inherit pname version src; };
|
||||
in
|
||||
appimageTools.wrapType2 {
|
||||
inherit pname version src;
|
||||
|
@ -33,10 +33,7 @@ You may also want to consider [dockerTools](#sec-pkgs-dockerTools) for your cont
|
||||
The following derivation will construct a flat-file binary cache containing the closure of `hello`.
|
||||
|
||||
```nix
|
||||
{ mkBinaryCache, hello }:
|
||||
mkBinaryCache {
|
||||
rootPaths = [ hello ];
|
||||
}
|
||||
{ mkBinaryCache, hello }: mkBinaryCache { rootPaths = [ hello ]; }
|
||||
```
|
||||
|
||||
Build the cache on a machine.
|
||||
|
@ -1577,9 +1577,7 @@ This example uses [](#ex-dockerTools-streamNixShellImage-hello) as a starting po
|
||||
dockerTools.streamNixShellImage {
|
||||
tag = "latest";
|
||||
drv = hello.overrideAttrs (old: {
|
||||
nativeBuildInputs = old.nativeBuildInputs or [ ] ++ [
|
||||
cowsay
|
||||
];
|
||||
nativeBuildInputs = old.nativeBuildInputs or [ ] ++ [ cowsay ];
|
||||
});
|
||||
}
|
||||
```
|
||||
|
@ -82,9 +82,7 @@ This example uses `ociTools.buildContainer` to create a simple container that ru
|
||||
bash,
|
||||
}:
|
||||
ociTools.buildContainer {
|
||||
args = [
|
||||
(lib.getExe bash)
|
||||
];
|
||||
args = [ (lib.getExe bash) ];
|
||||
|
||||
readonly = false;
|
||||
}
|
||||
|
@ -7,20 +7,18 @@ For hermeticity, Nix derivations do not allow any state to be carried over betwe
|
||||
However, we can tell Nix explicitly what the previous build state was, by representing that previous state as a derivation output. This allows the passed build state to be used for an incremental build.
|
||||
|
||||
To change a normal derivation to a checkpoint based build, these steps must be taken:
|
||||
- apply `prepareCheckpointBuild` on the desired derivation, e.g.
|
||||
```nix
|
||||
{
|
||||
checkpointArtifacts = (pkgs.checkpointBuildTools.prepareCheckpointBuild pkgs.virtualbox);
|
||||
}
|
||||
```
|
||||
- change something you want in the sources of the package, e.g. use a source override:
|
||||
```nix
|
||||
{
|
||||
changedVBox = pkgs.virtualbox.overrideAttrs (old: {
|
||||
src = path/to/vbox/sources;
|
||||
});
|
||||
}
|
||||
```
|
||||
```nix
|
||||
{
|
||||
checkpointArtifacts = (pkgs.checkpointBuildTools.prepareCheckpointBuild pkgs.virtualbox);
|
||||
}
|
||||
```
|
||||
```nix
|
||||
{
|
||||
changedVBox = pkgs.virtualbox.overrideAttrs (old: {
|
||||
src = path/to/vbox/sources;
|
||||
});
|
||||
}
|
||||
```
|
||||
- use `mkCheckpointBuild changedVBox checkpointArtifacts`
|
||||
- enjoy shorter build times
|
||||
|
||||
@ -30,10 +28,7 @@ To change a normal derivation to a checkpoint based build, these steps must be t
|
||||
pkgs ? import <nixpkgs> { },
|
||||
}:
|
||||
let
|
||||
inherit (pkgs.checkpointBuildTools)
|
||||
prepareCheckpointBuild
|
||||
mkCheckpointBuild
|
||||
;
|
||||
inherit (pkgs.checkpointBuildTools) prepareCheckpointBuild mkCheckpointBuild;
|
||||
helloCheckpoint = prepareCheckpointBuild pkgs.hello;
|
||||
changedHello = pkgs.hello.overrideAttrs (_: {
|
||||
doCheck = false;
|
||||
|
@ -15,9 +15,7 @@ If the `moduleNames` argument is omitted, `hasPkgConfigModules` will use `meta.p
|
||||
|
||||
```nix
|
||||
{
|
||||
passthru.tests.pkg-config = testers.hasPkgConfigModules {
|
||||
package = finalAttrs.finalPackage;
|
||||
};
|
||||
passthru.tests.pkg-config = testers.hasPkgConfigModules { package = finalAttrs.finalPackage; };
|
||||
|
||||
meta.pkgConfigModules = [ "libfoo" ];
|
||||
}
|
||||
@ -74,9 +72,7 @@ If you have a static site that can be built with Nix, you can use `lycheeLinkChe
|
||||
# Check hyperlinks in the `nix` documentation
|
||||
|
||||
```nix
|
||||
testers.lycheeLinkCheck {
|
||||
site = nix.doc + "/share/doc/nix/manual";
|
||||
}
|
||||
testers.lycheeLinkCheck { site = nix.doc + "/share/doc/nix/manual"; }
|
||||
```
|
||||
|
||||
:::
|
||||
@ -269,9 +265,7 @@ The default argument to the command is `--version`, and the version to be checke
|
||||
This example will run the command `hello --version`, and then check that the version of the `hello` package is in the output of the command.
|
||||
|
||||
```nix
|
||||
{
|
||||
passthru.tests.version = testers.testVersion { package = hello; };
|
||||
}
|
||||
{ passthru.tests.version = testers.testVersion { package = hello; }; }
|
||||
```
|
||||
|
||||
:::
|
||||
|
@ -152,9 +152,7 @@ runCommandWith {
|
||||
|
||||
Likewise, `runCommandCC name derivationArgs buildCommand` is equivalent to
|
||||
```nix
|
||||
runCommandWith {
|
||||
inherit name derivationArgs;
|
||||
} buildCommand
|
||||
runCommandWith { inherit name derivationArgs; } buildCommand
|
||||
```
|
||||
:::
|
||||
|
||||
@ -713,7 +711,10 @@ concatTextFile
|
||||
# Writes contents of files to /nix/store/<store path>
|
||||
concatText
|
||||
"my-file"
|
||||
[ file1 file2 ]
|
||||
[
|
||||
file1
|
||||
file2
|
||||
]
|
||||
|
||||
# Writes contents of files to /nix/store/<store path>
|
||||
concatScript
|
||||
@ -790,7 +791,7 @@ The result is equivalent to the output of `nix-store -q --requisites`.
|
||||
For example,
|
||||
|
||||
```nix
|
||||
writeClosure [ (writeScriptBin "hi" ''${hello}/bin/hello'') ]
|
||||
writeClosure [ (writeScriptBin "hi" "${hello}/bin/hello") ]
|
||||
```
|
||||
|
||||
produces an output path `/nix/store/<hash>-runtime-deps` containing
|
||||
@ -816,7 +817,7 @@ This produces the equivalent of `nix-store -q --references`.
|
||||
For example,
|
||||
|
||||
```nix
|
||||
writeDirectReferencesToFile (writeScriptBin "hi" ''${hello}/bin/hello'')
|
||||
writeDirectReferencesToFile (writeScriptBin "hi" "${hello}/bin/hello")
|
||||
```
|
||||
|
||||
produces an output path `/nix/store/<hash>-runtime-references` containing
|
||||
|
@ -27,8 +27,8 @@ let
|
||||
} ":";
|
||||
};
|
||||
|
||||
# the INI file can now be given as plain old nix values
|
||||
in
|
||||
# the INI file can now be given as plain old nix values
|
||||
customToINI {
|
||||
main = {
|
||||
pushinfo = true;
|
||||
|
@ -15,13 +15,24 @@
|
||||
src = nix-gitignore.gitignoreSource [ ] ./source;
|
||||
# Simplest version
|
||||
|
||||
src = nix-gitignore.gitignoreSource "supplemental-ignores\n" ./source;
|
||||
src = nix-gitignore.gitignoreSource ''
|
||||
supplemental-ignores
|
||||
'' ./source;
|
||||
# This one reads the ./source/.gitignore and concats the auxiliary ignores
|
||||
|
||||
src = nix-gitignore.gitignoreSourcePure "ignore-this\nignore-that\n" ./source;
|
||||
src = nix-gitignore.gitignoreSourcePure ''
|
||||
ignore-this
|
||||
ignore-that
|
||||
'' ./source;
|
||||
# Use this string as gitignore, don't read ./source/.gitignore.
|
||||
|
||||
src = nix-gitignore.gitignoreSourcePure [ "ignore-this\nignore-that\n" ~/.gitignore ] ./source;
|
||||
src = nix-gitignore.gitignoreSourcePure [
|
||||
''
|
||||
ignore-this
|
||||
ignore-that
|
||||
''
|
||||
~/.gitignore
|
||||
] ./source;
|
||||
# It also accepts a list (of strings and paths) that will be concatenated
|
||||
# once the paths are turned to strings via readFile.
|
||||
}
|
||||
@ -41,9 +52,7 @@ Those filter functions accept the same arguments the `builtins.filterSource` fun
|
||||
If you want to make your own filter from scratch, you may use
|
||||
|
||||
```nix
|
||||
{
|
||||
gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root;
|
||||
}
|
||||
{ gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root; }
|
||||
```
|
||||
|
||||
## gitignore files in subdirectories {#sec-pkgs-nix-gitignore-usage-recursive}
|
||||
|
@ -3,9 +3,7 @@
|
||||
This hook makes a build pause instead of stopping when a failure occurs. It prevents Nix from cleaning up the build environment immediately and allows the user to attach to the build environment. Upon a build error, it will print instructions that can be used to enter the environment for debugging. breakpointHook is only available on Linux. To use it, add `breakpointHook` to `nativeBuildInputs` in the package to be inspected.
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeBuildInputs = [ breakpointHook ];
|
||||
}
|
||||
{ nativeBuildInputs = [ breakpointHook ]; }
|
||||
```
|
||||
|
||||
When a build failure occurs, an instruction will be printed showing how to attach to the build sandbox.
|
||||
|
@ -4,17 +4,12 @@
|
||||
This hook starts a Memcached server during `checkPhase`. Example:
|
||||
|
||||
```nix
|
||||
{
|
||||
stdenv,
|
||||
memcachedTestHook,
|
||||
}:
|
||||
{ stdenv, memcachedTestHook }:
|
||||
stdenv.mkDerivation {
|
||||
|
||||
# ...
|
||||
|
||||
nativeCheckInputs = [
|
||||
memcachedTestHook
|
||||
];
|
||||
nativeCheckInputs = [ memcachedTestHook ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -45,11 +40,10 @@ stdenv.mkDerivation {
|
||||
|
||||
# ...
|
||||
|
||||
nativeCheckInputs = [
|
||||
memcachedTestHook
|
||||
];
|
||||
nativeCheckInputs = [ memcachedTestHook ];
|
||||
|
||||
preCheck = ''
|
||||
memcachedTestPort=1234;
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
@ -38,9 +38,7 @@ stdenv.mkDerivation {
|
||||
|
||||
# ...
|
||||
|
||||
nativeBuildInputs = [
|
||||
patchRcPathFish
|
||||
];
|
||||
nativeBuildInputs = [ patchRcPathFish ];
|
||||
|
||||
postFixup = ''
|
||||
patchRcPathFish $out/bin/this-foo.fish ${
|
||||
|
@ -13,9 +13,7 @@ stdenv.mkDerivation {
|
||||
|
||||
# ...
|
||||
|
||||
nativeCheckInputs = [
|
||||
redisTestHook
|
||||
];
|
||||
nativeCheckInputs = [ redisTestHook ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -56,9 +54,7 @@ stdenv.mkDerivation {
|
||||
|
||||
# ...
|
||||
|
||||
nativeCheckInputs = [
|
||||
redisTestHook
|
||||
];
|
||||
nativeCheckInputs = [ redisTestHook ];
|
||||
|
||||
preCheck = ''
|
||||
redisTestPort=6390;
|
||||
|
@ -35,21 +35,18 @@ rustPlatform.buildRustPackage (finalAttrs: {
|
||||
hash = "...";
|
||||
};
|
||||
|
||||
nativeBuildInputs =
|
||||
[
|
||||
# Pull in our main hook
|
||||
cargo-tauri.hook
|
||||
nativeBuildInputs = [
|
||||
# Pull in our main hook
|
||||
cargo-tauri.hook
|
||||
|
||||
# Setup npm
|
||||
nodejs
|
||||
npmHooks.npmConfigHook
|
||||
# Setup npm
|
||||
nodejs
|
||||
npmHooks.npmConfigHook
|
||||
|
||||
# Make sure we can find our libraries
|
||||
pkg-config
|
||||
]
|
||||
++ lib.optionals stdenv.hostPlatform.isLinux [
|
||||
wrapGAppsHook4
|
||||
];
|
||||
# Make sure we can find our libraries
|
||||
pkg-config
|
||||
]
|
||||
++ lib.optionals stdenv.hostPlatform.isLinux [ wrapGAppsHook4 ];
|
||||
|
||||
buildInputs = lib.optionals stdenv.hostPlatform.isLinux [
|
||||
glib-networking # Most Tauri apps need networking
|
||||
|
@ -16,9 +16,7 @@ The hook runs in `installCheckPhase`, requiring `doInstallCheck` is enabled for
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
# ...
|
||||
|
||||
nativeInstallCheckInputs = [
|
||||
udevCheckHook
|
||||
];
|
||||
nativeInstallCheckInputs = [ udevCheckHook ];
|
||||
doInstallCheck = true;
|
||||
|
||||
# ...
|
||||
|
@ -15,9 +15,7 @@ You use it like this:
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
# ...
|
||||
|
||||
nativeInstallCheckInputs = [
|
||||
versionCheckHook
|
||||
];
|
||||
nativeInstallCheckInputs = [ versionCheckHook ];
|
||||
doInstallCheck = true;
|
||||
|
||||
# ...
|
||||
|
@ -16,9 +16,7 @@ In Nixpkgs, `zig.hook` overrides the default build, check and install phases.
|
||||
stdenv.mkDerivation {
|
||||
# . . .
|
||||
|
||||
nativeBuildInputs = [
|
||||
zig.hook
|
||||
];
|
||||
nativeBuildInputs = [ zig.hook ];
|
||||
|
||||
zigBuildFlags = [ "-Dman-pages=true" ];
|
||||
|
||||
|
@ -79,9 +79,7 @@ let
|
||||
sha256,
|
||||
...
|
||||
}:
|
||||
pkgs.fetchzip {
|
||||
inherit name url sha256;
|
||||
};
|
||||
pkgs.fetchzip { inherit name url sha256; };
|
||||
};
|
||||
|
||||
in
|
||||
|
@ -144,9 +144,7 @@ agdaPackages.mkDerivation {
|
||||
version = "1.0";
|
||||
pname = "my-agda-lib";
|
||||
src = ./.;
|
||||
buildInputs = [
|
||||
agdaPackages.standard-library
|
||||
];
|
||||
buildInputs = [ agdaPackages.standard-library ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -8,17 +8,13 @@ supporting features.
|
||||
Use the `android-studio-full` attribute for a very complete Android SDK, including system images:
|
||||
|
||||
```nix
|
||||
{
|
||||
buildInputs = [ android-studio-full ];
|
||||
}
|
||||
{ buildInputs = [ android-studio-full ]; }
|
||||
```
|
||||
|
||||
This is identical to:
|
||||
|
||||
```nix
|
||||
{
|
||||
buildInputs = [ androidStudioPackages.stable.full ];
|
||||
}
|
||||
{ buildInputs = [ androidStudioPackages.stable.full ]; }
|
||||
```
|
||||
|
||||
Alternatively, you can pass composeAndroidPackages to the `withSdk` passthru:
|
||||
@ -26,11 +22,7 @@ Alternatively, you can pass composeAndroidPackages to the `withSdk` passthru:
|
||||
```nix
|
||||
{
|
||||
buildInputs = [
|
||||
(android-studio.withSdk
|
||||
(androidenv.composeAndroidPackages {
|
||||
includeNDK = true;
|
||||
}).androidsdk
|
||||
)
|
||||
(android-studio.withSdk (androidenv.composeAndroidPackages { includeNDK = true; }).androidsdk)
|
||||
];
|
||||
}
|
||||
```
|
||||
@ -58,9 +50,7 @@ let
|
||||
"arm64-v8a"
|
||||
];
|
||||
includeNDK = true;
|
||||
includeExtras = [
|
||||
"extras;google;auto"
|
||||
];
|
||||
includeExtras = [ "extras;google;auto" ];
|
||||
};
|
||||
in
|
||||
androidComposition.androidsdk
|
||||
|
@ -65,9 +65,7 @@ let
|
||||
overlays = [ ];
|
||||
};
|
||||
in
|
||||
pkgs.mkShell {
|
||||
packages = [ pkgs.beamPackages.rebar3 ];
|
||||
}
|
||||
pkgs.mkShell { packages = [ pkgs.beamPackages.rebar3 ]; }
|
||||
```
|
||||
:::
|
||||
|
||||
@ -324,9 +322,7 @@ with pkgs;
|
||||
let
|
||||
elixir = beam.packages.erlang_27.elixir_1_18;
|
||||
in
|
||||
mkShell {
|
||||
buildInputs = [ elixir ];
|
||||
}
|
||||
mkShell { buildInputs = [ elixir ]; }
|
||||
```
|
||||
|
||||
### Using an overlay {#beam-using-overlays}
|
||||
@ -348,11 +344,7 @@ let
|
||||
pkgs = import <nixpkgs> { overlays = [ elixir_1_18_1_overlay ]; };
|
||||
in
|
||||
with pkgs;
|
||||
mkShell {
|
||||
buildInputs = [
|
||||
elixir_1_18
|
||||
];
|
||||
}
|
||||
mkShell { buildInputs = [ elixir_1_18 ]; }
|
||||
```
|
||||
|
||||
#### Elixir - Phoenix project {#elixir---phoenix-project}
|
||||
|
@ -77,8 +77,8 @@ let
|
||||
);
|
||||
}
|
||||
);
|
||||
# Here, `myChickenPackages.chickenEggs.json-rpc`, which depends on `srfi-180` will use
|
||||
# the local copy of `srfi-180`.
|
||||
in
|
||||
# Here, `myChickenPackages.chickenEggs.json-rpc`, which depends on `srfi-180` will use
|
||||
# the local copy of `srfi-180`.
|
||||
<...>
|
||||
```
|
||||
|
@ -145,17 +145,13 @@ There are three distinct ways of changing a Coq package by overriding one of its
|
||||
For example, assuming you have a special `mathcomp` dependency you want to use, here is how you could override the `mathcomp` dependency:
|
||||
|
||||
```nix
|
||||
multinomials.override {
|
||||
mathcomp = my-special-mathcomp;
|
||||
}
|
||||
multinomials.override { mathcomp = my-special-mathcomp; }
|
||||
```
|
||||
|
||||
In Nixpkgs, all Coq derivations take a `version` argument. This can be overridden in order to easily use a different version:
|
||||
|
||||
```nix
|
||||
coqPackages.multinomials.override {
|
||||
version = "1.5.1";
|
||||
}
|
||||
coqPackages.multinomials.override { version = "1.5.1"; }
|
||||
```
|
||||
|
||||
Refer to [](#coq-packages-attribute-sets-coqpackages) for all the different formats that you can potentially pass to `version`, as well as the restrictions.
|
||||
@ -181,10 +177,8 @@ For instance, here is how you could add some code to be performed in the derivat
|
||||
|
||||
```nix
|
||||
coqPackages.multinomials.overrideAttrs (oldAttrs: {
|
||||
postInstall =
|
||||
oldAttrs.postInstall or ""
|
||||
+ ''
|
||||
echo "you can do anything you want here"
|
||||
'';
|
||||
postInstall = oldAttrs.postInstall or "" + ''
|
||||
echo "you can do anything you want here"
|
||||
'';
|
||||
})
|
||||
```
|
||||
|
@ -146,9 +146,7 @@ These settings ensure that the CUDA setup hooks function as intended.
|
||||
When using `callPackage`, you can choose to pass in a different variant, e.g. when a package requires a specific version of CUDA:
|
||||
|
||||
```nix
|
||||
{
|
||||
mypkg = callPackage { cudaPackages = cudaPackages_12_2; };
|
||||
}
|
||||
{ mypkg = callPackage { cudaPackages = cudaPackages_12_2; }; }
|
||||
```
|
||||
|
||||
::: {.caution}
|
||||
@ -208,9 +206,7 @@ It is possible to run Docker or Podman containers with CUDA support. The recomme
|
||||
The NVIDIA Container Toolkit can be enabled in NixOS like follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
hardware.nvidia-container-toolkit.enable = true;
|
||||
}
|
||||
{ hardware.nvidia-container-toolkit.enable = true; }
|
||||
```
|
||||
|
||||
This will automatically enable a service that generates a CDI specification (located at `/var/run/cdi/nvidia-container-toolkit.json`) based on the auto-detected hardware of your machine. You can check this service by running:
|
||||
|
@ -94,9 +94,7 @@ let
|
||||
hash = "sha256-B4Q3c6IvTLg3Q92qYa8y+i4uTaphtFdjp+Ir3QQjdN0=";
|
||||
};
|
||||
|
||||
dhallOverlay = self: super: {
|
||||
true = self.callPackage ./true.nix { };
|
||||
};
|
||||
dhallOverlay = self: super: { true = self.callPackage ./true.nix { }; };
|
||||
|
||||
overlay = self: super: {
|
||||
dhallPackages = super.dhallPackages.override (old: {
|
||||
|
@ -10,9 +10,7 @@ with import <nixpkgs> { };
|
||||
|
||||
mkShell {
|
||||
name = "dotnet-env";
|
||||
packages = [
|
||||
dotnet-sdk
|
||||
];
|
||||
packages = [ dotnet-sdk ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -161,7 +159,9 @@ buildDotnetModule rec {
|
||||
projectFile = "src/project.sln";
|
||||
nugetDeps = ./deps.json; # see "Generating and updating NuGet dependencies" section for details
|
||||
|
||||
buildInputs = [ referencedProject ]; # `referencedProject` must contain `nupkg` in the folder structure.
|
||||
buildInputs = [
|
||||
referencedProject
|
||||
]; # `referencedProject` must contain `nupkg` in the folder structure.
|
||||
|
||||
dotnet-sdk = dotnetCorePackages.sdk_8_0;
|
||||
dotnet-runtime = dotnetCorePackages.runtime_8_0;
|
||||
|
@ -38,78 +38,75 @@ One advantage is that when `pkgs.zlib` is updated, it will automatically update
|
||||
|
||||
|
||||
```nix
|
||||
(pkgs.zlib.override {
|
||||
stdenv = pkgs.emscriptenStdenv;
|
||||
}).overrideAttrs
|
||||
(old: {
|
||||
buildInputs = old.buildInputs ++ [ pkg-config ];
|
||||
# we need to reset this setting!
|
||||
env = (old.env or { }) // {
|
||||
NIX_CFLAGS_COMPILE = "";
|
||||
};
|
||||
(pkgs.zlib.override { stdenv = pkgs.emscriptenStdenv; }).overrideAttrs (old: {
|
||||
buildInputs = old.buildInputs ++ [ pkg-config ];
|
||||
# we need to reset this setting!
|
||||
env = (old.env or { }) // {
|
||||
NIX_CFLAGS_COMPILE = "";
|
||||
};
|
||||
|
||||
configurePhase = ''
|
||||
# FIXME: Some tests require writing at $HOME
|
||||
HOME=$TMPDIR
|
||||
runHook preConfigure
|
||||
configurePhase = ''
|
||||
# FIXME: Some tests require writing at $HOME
|
||||
HOME=$TMPDIR
|
||||
runHook preConfigure
|
||||
|
||||
#export EMCC_DEBUG=2
|
||||
emconfigure ./configure --prefix=$out --shared
|
||||
#export EMCC_DEBUG=2
|
||||
emconfigure ./configure --prefix=$out --shared
|
||||
|
||||
runHook postConfigure
|
||||
'';
|
||||
runHook postConfigure
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
outputs = [ "out" ];
|
||||
dontStrip = true;
|
||||
outputs = [ "out" ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
|
||||
emmake make
|
||||
emmake make
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
emmake make install
|
||||
emmake make install
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
checkPhase = ''
|
||||
runHook preCheck
|
||||
checkPhase = ''
|
||||
runHook preCheck
|
||||
|
||||
echo "================= testing zlib using node ================="
|
||||
echo "================= testing zlib using node ================="
|
||||
|
||||
echo "Compiling a custom test"
|
||||
set -x
|
||||
emcc -O2 -s EMULATE_FUNCTION_POINTER_CASTS=1 test/example.c -DZ_SOLO \
|
||||
libz.so.${old.version} -I . -o example.js
|
||||
echo "Compiling a custom test"
|
||||
set -x
|
||||
emcc -O2 -s EMULATE_FUNCTION_POINTER_CASTS=1 test/example.c -DZ_SOLO \
|
||||
libz.so.${old.version} -I . -o example.js
|
||||
|
||||
echo "Using node to execute the test"
|
||||
${pkgs.nodejs}/bin/node ./example.js
|
||||
echo "Using node to execute the test"
|
||||
${pkgs.nodejs}/bin/node ./example.js
|
||||
|
||||
set +x
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "test failed for some reason"
|
||||
exit 1;
|
||||
else
|
||||
echo "it seems to work! very good."
|
||||
fi
|
||||
echo "================= /testing zlib using node ================="
|
||||
set +x
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "test failed for some reason"
|
||||
exit 1;
|
||||
else
|
||||
echo "it seems to work! very good."
|
||||
fi
|
||||
echo "================= /testing zlib using node ================="
|
||||
|
||||
runHook postCheck
|
||||
'';
|
||||
runHook postCheck
|
||||
'';
|
||||
|
||||
postPatch = pkgs.lib.optionalString pkgs.stdenv.hostPlatform.isDarwin ''
|
||||
substituteInPlace configure \
|
||||
--replace-fail '/usr/bin/libtool' 'ar' \
|
||||
--replace-fail 'AR="libtool"' 'AR="ar"' \
|
||||
--replace-fail 'ARFLAGS="-o"' 'ARFLAGS="-r"'
|
||||
'';
|
||||
})
|
||||
postPatch = pkgs.lib.optionalString pkgs.stdenv.hostPlatform.isDarwin ''
|
||||
substituteInPlace configure \
|
||||
--replace-fail '/usr/bin/libtool' 'ar' \
|
||||
--replace-fail 'AR="libtool"' 'AR="ar"' \
|
||||
--replace-fail 'ARFLAGS="-o"' 'ARFLAGS="-r"'
|
||||
'';
|
||||
})
|
||||
```
|
||||
|
||||
:::{.example #usage-2-pkgs.buildemscriptenpackage}
|
||||
|
@ -81,10 +81,7 @@ The function understands several forms of source directory trees:
|
||||
|
||||
For instance, packaging the Bresenham algorithm for line interpolation looks like this, see `pkgs/development/compilers/factor-lang/vocabs/bresenham` for the complete file:
|
||||
```nix
|
||||
{
|
||||
factorPackages,
|
||||
fetchFromGitHub,
|
||||
}:
|
||||
{ factorPackages, fetchFromGitHub }:
|
||||
|
||||
factorPackages.buildFactorVocab {
|
||||
pname = "bresenham";
|
||||
|
@ -48,9 +48,7 @@ In the rare case you need to use icons from dependencies (e.g. when an app force
|
||||
|
||||
```nix
|
||||
{
|
||||
buildInputs = [
|
||||
pantheon.elementary-icon-theme
|
||||
];
|
||||
buildInputs = [ pantheon.elementary-icon-theme ];
|
||||
preFixup = ''
|
||||
gappsWrapperArgs+=(
|
||||
# The icon theme is hardcoded.
|
||||
|
@ -147,9 +147,7 @@ A string list of [Go build tags (also called build constraints)](https://pkg.go.
|
||||
Tags can also be set conditionally:
|
||||
|
||||
```nix
|
||||
{
|
||||
tags = [ "production" ] ++ lib.optionals withSqlite [ "sqlite" ];
|
||||
}
|
||||
{ tags = [ "production" ] ++ lib.optionals withSqlite [ "sqlite" ]; }
|
||||
```
|
||||
|
||||
### `deleteVendor` {#var-go-deleteVendor}
|
||||
@ -283,9 +281,7 @@ For example, only a selection of tests could be run with:
|
||||
```nix
|
||||
{
|
||||
# -run and -skip accept regular expressions
|
||||
checkFlags = [
|
||||
"-run=^Test(Simple|Fast)$"
|
||||
];
|
||||
checkFlags = [ "-run=^Test(Simple|Fast)$" ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -783,9 +783,7 @@ need to build `nix-tree` with a more recent version of `brick` than the default
|
||||
one provided by `haskellPackages`:
|
||||
|
||||
```nix
|
||||
haskellPackages.nix-tree.override {
|
||||
brick = haskellPackages.brick_0_67;
|
||||
}
|
||||
haskellPackages.nix-tree.override { brick = haskellPackages.brick_0_67; }
|
||||
```
|
||||
|
||||
<!-- TODO(@sternenseemann): This belongs in the next section
|
||||
@ -841,8 +839,8 @@ let
|
||||
install -Dm644 man/${drv.pname}.1 -t "$out/share/man/man1"
|
||||
'';
|
||||
});
|
||||
in
|
||||
|
||||
in
|
||||
installManPage haskellPackages.pnbackup
|
||||
```
|
||||
|
||||
@ -1310,8 +1308,8 @@ let
|
||||
ghcName = "ghc92";
|
||||
# Desired new setting
|
||||
enableProfiling = true;
|
||||
in
|
||||
|
||||
in
|
||||
[
|
||||
# The first overlay modifies the GHC derivation so that it does or does not
|
||||
# build profiling versions of the core libraries bundled with it. It is
|
||||
@ -1322,8 +1320,8 @@ in
|
||||
final: prev:
|
||||
let
|
||||
inherit (final) lib;
|
||||
in
|
||||
|
||||
in
|
||||
{
|
||||
haskell = prev.haskell // {
|
||||
compiler = prev.haskell.compiler // {
|
||||
@ -1341,8 +1339,8 @@ in
|
||||
let
|
||||
inherit (final) lib;
|
||||
haskellLib = final.haskell.lib.compose;
|
||||
in
|
||||
|
||||
in
|
||||
{
|
||||
haskell = prev.haskell // {
|
||||
packages = prev.haskell.packages // {
|
||||
|
@ -31,9 +31,7 @@ Xcode.
|
||||
let
|
||||
pkgs = import <nixpkgs> { };
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
xcodeenv = import ./xcodeenv { inherit (pkgs) stdenv; };
|
||||
in
|
||||
xcodeenv.composeXcodeWrapper {
|
||||
version = "9.2";
|
||||
@ -65,9 +63,7 @@ executing the `xcodeenv.buildApp {}` function:
|
||||
let
|
||||
pkgs = import <nixpkgs> { };
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
xcodeenv = import ./xcodeenv { inherit (pkgs) stdenv; };
|
||||
in
|
||||
xcodeenv.buildApp {
|
||||
name = "MyApp";
|
||||
@ -161,9 +157,7 @@ instances:
|
||||
let
|
||||
pkgs = import <nixpkgs> { };
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
xcodeenv = import ./xcodeenv { inherit (pkgs) stdenv; };
|
||||
in
|
||||
xcode.simulateApp {
|
||||
name = "simulate";
|
||||
@ -195,9 +189,7 @@ app in the requested simulator instance:
|
||||
let
|
||||
pkgs = import <nixpkgs> { };
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
xcodeenv = import ./xcodeenv { inherit (pkgs) stdenv; };
|
||||
in
|
||||
xcode.simulateApp {
|
||||
name = "simulate";
|
||||
|
@ -108,11 +108,7 @@ You can also specify what JDK your JRE should be based on, for example
|
||||
selecting a 'headless' build to avoid including a link to GTK+:
|
||||
|
||||
```nix
|
||||
{
|
||||
my_jre = pkgs.jre_minimal.override {
|
||||
jdk = jdk11_headless;
|
||||
};
|
||||
}
|
||||
{ my_jre = pkgs.jre_minimal.override { jdk = jdk11_headless; }; }
|
||||
```
|
||||
|
||||
Note all JDKs passthru `home`, so if your application requires
|
||||
|
@ -303,9 +303,7 @@ buildNpmPackage {
|
||||
version = "0.1.0";
|
||||
src = ./.;
|
||||
|
||||
npmDeps = importNpmLock {
|
||||
npmRoot = ./.;
|
||||
};
|
||||
npmDeps = importNpmLock { npmRoot = ./.; };
|
||||
|
||||
npmConfigHook = importNpmLock.npmConfigHook;
|
||||
}
|
||||
@ -456,9 +454,7 @@ In case you are patching `package.json` or `pnpm-lock.yaml`, make sure to pass `
|
||||
`pnpm.configHook` supports adding additional `pnpm install` flags via `pnpmInstallFlags` which can be set to a Nix string array:
|
||||
|
||||
```nix
|
||||
{
|
||||
pnpm,
|
||||
}:
|
||||
{ pnpm }:
|
||||
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "foo";
|
||||
@ -470,9 +466,7 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
|
||||
pnpmInstallFlags = [ "--shamefully-hoist" ];
|
||||
|
||||
pnpmDeps = pnpm.fetchDeps {
|
||||
inherit (finalAttrs) pnpmInstallFlags;
|
||||
};
|
||||
pnpmDeps = pnpm.fetchDeps { inherit (finalAttrs) pnpmInstallFlags; };
|
||||
})
|
||||
```
|
||||
|
||||
@ -699,9 +693,7 @@ It's important to use the `--offline` flag. For example if you script is `"build
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeBuildInputs = [
|
||||
writableTmpDirAsHomeHook
|
||||
];
|
||||
nativeBuildInputs = [ writableTmpDirAsHomeHook ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
@ -716,9 +708,7 @@ It's important to use the `--offline` flag. For example if you script is `"build
|
||||
The `distPhase` is packing the package's dependencies in a tarball using `yarn pack`. You can disable it using:
|
||||
|
||||
```nix
|
||||
{
|
||||
doDist = false;
|
||||
}
|
||||
{ doDist = false; }
|
||||
```
|
||||
|
||||
The configure phase can sometimes fail because it makes many assumptions which may not always apply. One common override is:
|
||||
@ -837,8 +827,8 @@ It's recommended to ensure you're explicitly pinning the major version used, for
|
||||
|
||||
let
|
||||
yarn-berry = yarn-berry_4;
|
||||
in
|
||||
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "foo";
|
||||
version = "0-unstable-1980-01-01";
|
||||
@ -892,8 +882,8 @@ To compensate for this, the `yarn-berry-fetcher missing-hashes` subcommand can b
|
||||
|
||||
let
|
||||
yarn-berry = yarn-berry_4;
|
||||
in
|
||||
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "foo";
|
||||
version = "0-unstable-1980-01-01";
|
||||
|
@ -49,9 +49,7 @@ Also one can create a `pkgs.mkShell` environment in `shell.nix`/`flake.nix`:
|
||||
let
|
||||
sbcl' = sbcl.withPackages (ps: [ ps.alexandria ]);
|
||||
in
|
||||
mkShell {
|
||||
packages = [ sbcl' ];
|
||||
}
|
||||
mkShell { packages = [ sbcl' ]; }
|
||||
```
|
||||
|
||||
Such a Lisp can be now used e.g. to compile your sources:
|
||||
@ -192,11 +190,7 @@ let
|
||||
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
|
||||
};
|
||||
};
|
||||
sbcl' = sbcl.withOverrides (
|
||||
self: super: {
|
||||
inherit alexandria;
|
||||
}
|
||||
);
|
||||
sbcl' = sbcl.withOverrides (self: super: { inherit alexandria; });
|
||||
in
|
||||
sbcl'.pkgs.alexandria
|
||||
```
|
||||
|
@ -117,9 +117,7 @@ top-level while luarocks installs them in various subfolders by default.
|
||||
For instance:
|
||||
```nix
|
||||
{
|
||||
rtp-nvim = neovimUtils.buildNeovimPlugin {
|
||||
luaAttr = luaPackages.rtp-nvim;
|
||||
};
|
||||
rtp-nvim = neovimUtils.buildNeovimPlugin { luaAttr = luaPackages.rtp-nvim; };
|
||||
}
|
||||
```
|
||||
To update these packages, you should use the lua updater rather than vim's.
|
||||
|
@ -26,9 +26,7 @@ buildNimPackage (finalAttrs: {
|
||||
|
||||
lockFile = ./lock.json;
|
||||
|
||||
nimFlags = [
|
||||
"-d:NimblePkgVersion=${finalAttrs.version}"
|
||||
];
|
||||
nimFlags = [ "-d:NimblePkgVersion=${finalAttrs.version}" ];
|
||||
})
|
||||
```
|
||||
|
||||
|
@ -44,9 +44,7 @@ This will also work in a `shell.nix` file.
|
||||
}:
|
||||
|
||||
pkgs.mkShell {
|
||||
nativeBuildInputs = with pkgs; [
|
||||
(octave.withPackages (opkgs: with opkgs; [ symbolic ]))
|
||||
];
|
||||
nativeBuildInputs = with pkgs; [ (octave.withPackages (opkgs: with opkgs; [ symbolic ])) ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -267,13 +267,7 @@ php.buildComposerProject2 (finalAttrs: {
|
||||
|
||||
# PHP version containing the `ast` extension enabled
|
||||
php = php.buildEnv {
|
||||
extensions = (
|
||||
{ enabled, all }:
|
||||
enabled
|
||||
++ (with all; [
|
||||
ast
|
||||
])
|
||||
);
|
||||
extensions = ({ enabled, all }: enabled ++ (with all; [ ast ]));
|
||||
};
|
||||
|
||||
# The composer vendor hash
|
||||
|
@ -126,9 +126,7 @@ buildPythonPackage rec {
|
||||
pluggy
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
hypothesis
|
||||
];
|
||||
nativeCheckInputs = [ hypothesis ];
|
||||
|
||||
meta = {
|
||||
changelog = "https://github.com/pytest-dev/pytest/releases/tag/${version}";
|
||||
@ -271,16 +269,8 @@ be used through out all of the Python package set:
|
||||
python3MyBlas = pkgs.python3.override {
|
||||
packageOverrides = self: super: {
|
||||
# We need toPythonModule for the package set to evaluate this
|
||||
blas = super.toPythonModule (
|
||||
super.pkgs.blas.override {
|
||||
blasProvider = super.pkgs.mkl;
|
||||
}
|
||||
);
|
||||
lapack = super.toPythonModule (
|
||||
super.pkgs.lapack.override {
|
||||
lapackProvider = super.pkgs.mkl;
|
||||
}
|
||||
);
|
||||
blas = super.toPythonModule (super.pkgs.blas.override { blasProvider = super.pkgs.mkl; });
|
||||
lapack = super.toPythonModule (super.pkgs.lapack.override { lapackProvider = super.pkgs.mkl; });
|
||||
};
|
||||
};
|
||||
}
|
||||
@ -323,9 +313,7 @@ python3Packages.buildPythonApplication rec {
|
||||
hash = "sha256-Pe229rT0aHwA98s+nTHQMEFKZPo/yw6sot8MivFDvAw=";
|
||||
};
|
||||
|
||||
build-system = with python3Packages; [
|
||||
setuptools
|
||||
];
|
||||
build-system = with python3Packages; [ setuptools ];
|
||||
|
||||
dependencies = with python3Packages; [
|
||||
tornado
|
||||
@ -357,9 +345,7 @@ the attribute in `python-packages.nix`, and the `toPythonApplication` shall be
|
||||
applied to the reference:
|
||||
|
||||
```nix
|
||||
{
|
||||
python3Packages,
|
||||
}:
|
||||
{ python3Packages }:
|
||||
|
||||
python3Packages.toPythonApplication python3Packages.youtube-dl
|
||||
```
|
||||
@ -395,9 +381,7 @@ mkPythonMetaPackage {
|
||||
pname = "psycopg2-binary";
|
||||
inherit (psycopg2) optional-dependencies version;
|
||||
dependencies = [ psycopg2 ];
|
||||
meta = {
|
||||
inherit (psycopg2.meta) description homepage;
|
||||
};
|
||||
meta = { inherit (psycopg2.meta) description homepage; };
|
||||
}
|
||||
```
|
||||
|
||||
@ -443,9 +427,7 @@ let
|
||||
pythonEnv = myPython.withPackages (ps: [ ps.my-editable ]);
|
||||
|
||||
in
|
||||
pkgs.mkShell {
|
||||
packages = [ pythonEnv ];
|
||||
}
|
||||
pkgs.mkShell { packages = [ pythonEnv ]; }
|
||||
```
|
||||
|
||||
#### `python.buildEnv` function {#python.buildenv-function}
|
||||
@ -942,9 +924,7 @@ buildPythonPackage rec {
|
||||
hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
setuptools
|
||||
];
|
||||
build-system = [ setuptools ];
|
||||
|
||||
# has no tests
|
||||
doCheck = false;
|
||||
@ -1001,9 +981,7 @@ with import <nixpkgs> { };
|
||||
hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
python313.pkgs.setuptools
|
||||
];
|
||||
build-system = [ python313.pkgs.setuptools ];
|
||||
|
||||
# has no tests
|
||||
doCheck = false;
|
||||
@ -1080,9 +1058,7 @@ buildPythonPackage rec {
|
||||
hash = "sha256-FLLvdm1MllKrgTGC6Gb0k0deZeVYvtCCLji/B7uhong=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
setuptools
|
||||
];
|
||||
build-system = [ setuptools ];
|
||||
|
||||
dependencies = [
|
||||
multipledispatch
|
||||
@ -1090,9 +1066,7 @@ buildPythonPackage rec {
|
||||
python-dateutil
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
pytestCheckHook
|
||||
];
|
||||
nativeCheckInputs = [ pytestCheckHook ];
|
||||
|
||||
meta = {
|
||||
changelog = "https://github.com/blaze/datashape/releases/tag/${version}";
|
||||
@ -1133,9 +1107,7 @@ buildPythonPackage rec {
|
||||
hash = "sha256-s9NiusRxFydHzaNRMjjxFcvWxfi45jGb9ql6eJJyQJk=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
setuptools
|
||||
];
|
||||
build-system = [ setuptools ];
|
||||
|
||||
buildInputs = [
|
||||
libxml2
|
||||
@ -1197,9 +1169,7 @@ buildPythonPackage rec {
|
||||
hash = "sha256-9ru2r6kwhUCaskiFoaPNuJCfCVoUL01J40byvRt4kHQ=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
setuptools
|
||||
];
|
||||
build-system = [ setuptools ];
|
||||
|
||||
buildInputs = [
|
||||
fftw
|
||||
@ -1307,11 +1277,7 @@ To use `pytestCheckHook`, add it to `nativeCheckInputs`.
|
||||
Adding `pytest` is not required, since it is included with `pytestCheckHook`.
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeCheckInputs = [
|
||||
pytestCheckHook
|
||||
];
|
||||
}
|
||||
{ nativeCheckInputs = [ pytestCheckHook ]; }
|
||||
```
|
||||
|
||||
`pytestCheckHook` recognizes the following attributes:
|
||||
@ -1340,9 +1306,7 @@ The following example demonstrates usage of various `pytestCheckHook` attributes
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeCheckInputs = [
|
||||
pytestCheckHook
|
||||
];
|
||||
nativeCheckInputs = [ pytestCheckHook ];
|
||||
|
||||
# Allow running the following test paths and test objects.
|
||||
enabledTestPaths = [
|
||||
@ -1402,9 +1366,7 @@ by disabling tests that match both `"Foo"` **and** `"bar"`:
|
||||
{
|
||||
__structuredAttrs = true;
|
||||
|
||||
disabledTests = [
|
||||
"Foo and bar"
|
||||
];
|
||||
disabledTests = [ "Foo and bar" ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -1414,20 +1376,19 @@ This is especially helpful to select tests or specify flags conditionally:
|
||||
|
||||
```nix
|
||||
{
|
||||
disabledTests =
|
||||
[
|
||||
# touches network
|
||||
"download"
|
||||
"update"
|
||||
]
|
||||
++ lib.optionals (pythonAtLeast "3.8") [
|
||||
# broken due to python3.8 async changes
|
||||
"async"
|
||||
]
|
||||
++ lib.optionals stdenv.buildPlatform.isDarwin [
|
||||
# can fail when building with other packages
|
||||
"socket"
|
||||
];
|
||||
disabledTests = [
|
||||
# touches network
|
||||
"download"
|
||||
"update"
|
||||
]
|
||||
++ lib.optionals (pythonAtLeast "3.8") [
|
||||
# broken due to python3.8 async changes
|
||||
"async"
|
||||
]
|
||||
++ lib.optionals stdenv.buildPlatform.isDarwin [
|
||||
# can fail when building with other packages
|
||||
"socket"
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
@ -1492,9 +1453,7 @@ we can do:
|
||||
"pkg1"
|
||||
"pkg3"
|
||||
];
|
||||
pythonRemoveDeps = [
|
||||
"pkg2"
|
||||
];
|
||||
pythonRemoveDeps = [ "pkg2" ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -1509,9 +1468,7 @@ Another option is to pass `true`, that will relax/remove all dependencies, for
|
||||
example:
|
||||
|
||||
```nix
|
||||
{
|
||||
pythonRelaxDeps = true;
|
||||
}
|
||||
{ pythonRelaxDeps = true; }
|
||||
```
|
||||
|
||||
which would result in the following `requirements.txt` file:
|
||||
@ -1547,9 +1504,7 @@ automatically add `pythonRelaxDepsHook` if either `pythonRelaxDeps` or
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeCheckInputs = [
|
||||
unittestCheckHook
|
||||
];
|
||||
nativeCheckInputs = [ unittestCheckHook ];
|
||||
|
||||
unittestFlags = [
|
||||
"-s"
|
||||
@ -1575,9 +1530,7 @@ render them using the default `html` style.
|
||||
"doc"
|
||||
];
|
||||
|
||||
nativeBuildInputs = [
|
||||
sphinxHook
|
||||
];
|
||||
nativeBuildInputs = [ sphinxHook ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -1652,9 +1605,7 @@ buildPythonPackage rec {
|
||||
hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
setuptools
|
||||
];
|
||||
build-system = [ setuptools ];
|
||||
|
||||
meta = {
|
||||
changelog = "https://github.com/pytoolz/toolz/releases/tag/${version}";
|
||||
@ -1717,14 +1668,10 @@ with import <nixpkgs> { };
|
||||
});
|
||||
};
|
||||
in
|
||||
pkgs.python310.override {
|
||||
inherit packageOverrides;
|
||||
};
|
||||
pkgs.python310.override { inherit packageOverrides; };
|
||||
|
||||
in
|
||||
python.withPackages (ps: [
|
||||
ps.pandas
|
||||
])
|
||||
python.withPackages (ps: [ ps.pandas ])
|
||||
).env
|
||||
```
|
||||
|
||||
@ -1743,16 +1690,9 @@ with import <nixpkgs> { };
|
||||
|
||||
(
|
||||
let
|
||||
packageOverrides = self: super: {
|
||||
scipy = super.scipy_0_17;
|
||||
};
|
||||
packageOverrides = self: super: { scipy = super.scipy_0_17; };
|
||||
in
|
||||
(pkgs.python310.override {
|
||||
inherit packageOverrides;
|
||||
}).withPackages
|
||||
(ps: [
|
||||
ps.blaze
|
||||
])
|
||||
(pkgs.python310.override { inherit packageOverrides; }).withPackages (ps: [ ps.blaze ])
|
||||
).env
|
||||
```
|
||||
|
||||
@ -2000,11 +1940,7 @@ this snippet:
|
||||
|
||||
```nix
|
||||
{
|
||||
myPythonPackages = python3Packages.override {
|
||||
overrides = self: super: {
|
||||
twisted = <...>;
|
||||
};
|
||||
};
|
||||
myPythonPackages = python3Packages.override { overrides = self: super: { twisted = <...>; }; };
|
||||
}
|
||||
```
|
||||
|
||||
@ -2098,7 +2034,8 @@ and letting the package requiring the extra add the list to its dependencies
|
||||
{
|
||||
dependencies = [
|
||||
# ...
|
||||
] ++ dask.optional-dependencies.complete;
|
||||
]
|
||||
++ dask.optional-dependencies.complete;
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -52,7 +52,7 @@ Add entries to `qtWrapperArgs` are to modify the wrappers created by
|
||||
stdenv.mkDerivation {
|
||||
# ...
|
||||
nativeBuildInputs = [ qt6.wrapQtAppsHook ];
|
||||
qtWrapperArgs = [ ''--prefix PATH : /path/to/bin'' ];
|
||||
qtWrapperArgs = [ "--prefix PATH : /path/to/bin" ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -172,7 +172,9 @@ let
|
||||
myRuby = pkgs.ruby.override {
|
||||
defaultGemConfig = pkgs.defaultGemConfig // {
|
||||
pg = attrs: {
|
||||
buildFlags = [ "--with-pg-config=${pkgs."postgresql_${pg_version}".pg_config}/bin/pg_config" ];
|
||||
buildFlags = [
|
||||
"--with-pg-config=${pkgs."postgresql_${pg_version}".pg_config}/bin/pg_config"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -193,7 +195,9 @@ let
|
||||
gemdir = ./.;
|
||||
gemConfig = pkgs.defaultGemConfig // {
|
||||
pg = attrs: {
|
||||
buildFlags = [ "--with-pg-config=${pkgs."postgresql_${pg_version}".pg_config}/bin/pg_config" ];
|
||||
buildFlags = [
|
||||
"--with-pg-config=${pkgs."postgresql_${pg_version}".pg_config}/bin/pg_config"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -62,9 +62,7 @@ hash using `nix-hash --to-sri --type sha256 "<original sha256>"`.
|
||||
:::
|
||||
|
||||
```nix
|
||||
{
|
||||
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
|
||||
}
|
||||
{ cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8="; }
|
||||
```
|
||||
|
||||
If this method does not work, you can resort to copying the `Cargo.lock` file into nixpkgs
|
||||
@ -77,9 +75,7 @@ then be taken from the failed build. A fake hash can be used for
|
||||
`cargoHash` as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
cargoHash = lib.fakeHash;
|
||||
}
|
||||
{ cargoHash = lib.fakeHash; }
|
||||
```
|
||||
|
||||
Per the instructions in the [Cargo Book](https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html)
|
||||
@ -478,11 +474,7 @@ and fetches every dependency as a separate fixed-output derivation.
|
||||
`importCargoLock` can be used as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
cargoDeps = rustPlatform.importCargoLock {
|
||||
lockFile = ./Cargo.lock;
|
||||
};
|
||||
}
|
||||
{ cargoDeps = rustPlatform.importCargoLock { lockFile = ./Cargo.lock; }; }
|
||||
```
|
||||
|
||||
If the `Cargo.lock` file includes git dependencies, then their output
|
||||
@ -1000,8 +992,8 @@ let
|
||||
cargo = rust-bin.selectLatestNightlyWith (toolchain: toolchain.default);
|
||||
rustc = rust-bin.selectLatestNightlyWith (toolchain: toolchain.default);
|
||||
};
|
||||
in
|
||||
|
||||
in
|
||||
rustPlatform.buildRustPackage (finalAttrs: {
|
||||
pname = "ripgrep";
|
||||
version = "14.1.1";
|
||||
|
@ -25,7 +25,6 @@ primarily for Chez Scheme in a derivation, one might write:
|
||||
akkuPackages.chez-srfi
|
||||
];
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The package index is located in `pkgs/tools/package-management/akku`
|
||||
|
@ -80,8 +80,8 @@ expression. The next step is to write that expression:
|
||||
let
|
||||
# Pass the generated files to the helper.
|
||||
generated = swiftpm2nix.helpers ./nix;
|
||||
in
|
||||
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "myproject";
|
||||
version = "0.0.0";
|
||||
@ -131,17 +131,13 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
If you'd like to build a different configuration than `release`:
|
||||
|
||||
```nix
|
||||
{
|
||||
swiftpmBuildConfig = "debug";
|
||||
}
|
||||
{ swiftpmBuildConfig = "debug"; }
|
||||
```
|
||||
|
||||
It is also possible to provide additional flags to `swift build`:
|
||||
|
||||
```nix
|
||||
{
|
||||
swiftpmFlags = [ "--disable-dead-strip" ];
|
||||
}
|
||||
{ swiftpmFlags = [ "--disable-dead-strip" ]; }
|
||||
```
|
||||
|
||||
The default `buildPhase` already passes `-j` for parallel building.
|
||||
@ -155,9 +151,7 @@ Including `swiftpm` in your `nativeBuildInputs` also provides a default
|
||||
`checkPhase`, but it must be enabled with:
|
||||
|
||||
```nix
|
||||
{
|
||||
doCheck = true;
|
||||
}
|
||||
{ doCheck = true; }
|
||||
```
|
||||
|
||||
This essentially runs: `swift test -c release`
|
||||
|
@ -214,24 +214,20 @@ let
|
||||
|
||||
latex_with_foiltex = texliveSmall.withPackages (_: [ foiltex ]);
|
||||
in
|
||||
runCommand "test.pdf"
|
||||
{
|
||||
nativeBuildInputs = [ latex_with_foiltex ];
|
||||
}
|
||||
''
|
||||
cat >test.tex <<EOF
|
||||
\documentclass{foils}
|
||||
runCommand "test.pdf" { nativeBuildInputs = [ latex_with_foiltex ]; } ''
|
||||
cat >test.tex <<EOF
|
||||
\documentclass{foils}
|
||||
|
||||
\title{Presentation title}
|
||||
\date{}
|
||||
\title{Presentation title}
|
||||
\date{}
|
||||
|
||||
\begin{document}
|
||||
\maketitle
|
||||
\end{document}
|
||||
EOF
|
||||
pdflatex test.tex
|
||||
cp test.pdf $out
|
||||
''
|
||||
\begin{document}
|
||||
\maketitle
|
||||
\end{document}
|
||||
EOF
|
||||
pdflatex test.tex
|
||||
cp test.pdf $out
|
||||
''
|
||||
```
|
||||
|
||||
## LuaLaTeX font cache {#sec-language-texlive-lualatex-font-cache}
|
||||
@ -239,15 +235,11 @@ runCommand "test.pdf"
|
||||
The font cache for LuaLaTeX is written to `$HOME`.
|
||||
Therefore, it is necessary to set `$HOME` to a writable path, e.g. [before using LuaLaTeX in nix derivations](https://github.com/NixOS/nixpkgs/issues/180639):
|
||||
```nix
|
||||
runCommandNoCC "lualatex-hello-world"
|
||||
{
|
||||
buildInputs = [ texliveFull ];
|
||||
}
|
||||
''
|
||||
mkdir $out
|
||||
echo '\documentclass{article} \begin{document} Hello world \end{document}' > main.tex
|
||||
env HOME=$(mktemp -d) lualatex -interaction=nonstopmode -output-format=pdf -output-directory=$out ./main.tex
|
||||
''
|
||||
runCommandNoCC "lualatex-hello-world" { buildInputs = [ texliveFull ]; } ''
|
||||
mkdir $out
|
||||
echo '\documentclass{article} \begin{document} Hello world \end{document}' > main.tex
|
||||
env HOME=$(mktemp -d) lualatex -interaction=nonstopmode -output-format=pdf -output-directory=$out ./main.tex
|
||||
''
|
||||
```
|
||||
|
||||
Additionally, [the cache of a user can diverge from the nix store](https://github.com/NixOS/nixpkgs/issues/278718).
|
||||
|
@ -25,9 +25,7 @@ typst.withPackages.override
|
||||
typstPackages = old.typstPackages.extend (
|
||||
_: previous: {
|
||||
polylux_0_4_0 = previous.polylux_0_4_0.overrideAttrs (oldPolylux: {
|
||||
src = oldPolylux.src.overrideAttrs {
|
||||
outputHash = YourUpToDatePolyluxHash;
|
||||
};
|
||||
src = oldPolylux.src.overrideAttrs { outputHash = YourUpToDatePolyluxHash; };
|
||||
});
|
||||
}
|
||||
);
|
||||
@ -47,10 +45,7 @@ typst.withPackages.override
|
||||
Here's how to define a custom Typst package:
|
||||
|
||||
```nix
|
||||
{
|
||||
buildTypstPackage,
|
||||
typstPackages,
|
||||
}:
|
||||
{ buildTypstPackage, typstPackages }:
|
||||
|
||||
buildTypstPackage (finalAttrs: {
|
||||
pname = "my-typst-package";
|
||||
|
@ -29,9 +29,7 @@ The default configuration directory is `~/.cataclysm-dda`. If you prefer
|
||||
`$XDG_CONFIG_HOME/cataclysm-dda`, override the derivation:
|
||||
|
||||
```nix
|
||||
cataclysm-dda.override {
|
||||
useXdgDir = true;
|
||||
}
|
||||
cataclysm-dda.override { useXdgDir = true; }
|
||||
```
|
||||
|
||||
## Important note for overriding packages {#important-note-for-overriding-packages}
|
||||
@ -62,10 +60,10 @@ let
|
||||
|
||||
# or by using a helper function `attachPkgs`.
|
||||
goodExample2 = attachPkgs pkgs myCDDA;
|
||||
in
|
||||
|
||||
# badExample # parallel building disabled
|
||||
# goodExample1.withMods (_: []) # parallel building enabled
|
||||
# badExample # parallel building disabled
|
||||
# goodExample1.withMods (_: []) # parallel building enabled
|
||||
in
|
||||
goodExample2.withMods (_: [ ]) # parallel building enabled
|
||||
```
|
||||
|
||||
@ -75,11 +73,7 @@ To install Cataclysm DDA with mods of your choice, you can use `withMods`
|
||||
attribute:
|
||||
|
||||
```nix
|
||||
cataclysm-dda.withMods (
|
||||
mods: with mods; [
|
||||
tileset.UndeadPeople
|
||||
]
|
||||
)
|
||||
cataclysm-dda.withMods (mods: with mods; [ tileset.UndeadPeople ])
|
||||
```
|
||||
|
||||
All mods, soundpacks, and tilesets available in nixpkgs are found in
|
||||
|
@ -8,9 +8,7 @@ To enable them, use an override on `inkscape-with-extensions`:
|
||||
|
||||
```nix
|
||||
inkscape-with-extensions.override {
|
||||
inkscapeExtensions = with inkscape-extensions; [
|
||||
inkstitch
|
||||
];
|
||||
inkscapeExtensions = with inkscape-extensions; [ inkstitch ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -3,7 +3,5 @@
|
||||
Kakoune can be built to autoload plugins:
|
||||
|
||||
```nix
|
||||
(kakoune.override {
|
||||
plugins = with pkgs.kakounePlugins; [ parinfer-rust ];
|
||||
})
|
||||
(kakoune.override { plugins = with pkgs.kakounePlugins; [ parinfer-rust ]; })
|
||||
```
|
||||
|
@ -77,9 +77,7 @@ A plugin can be any kind of derivation, the only requirement is that it should a
|
||||
If the plugin is itself a Perl package that needs to be imported from other plugins or scripts, add the following passthrough:
|
||||
|
||||
```nix
|
||||
{
|
||||
passthru.perlPackages = [ "self" ];
|
||||
}
|
||||
{ passthru.perlPackages = [ "self" ]; }
|
||||
```
|
||||
|
||||
This will make the urxvt wrapper pick up the dependency and set up the Perl path accordingly.
|
||||
|
@ -496,8 +496,8 @@
|
||||
```nix
|
||||
let
|
||||
pkgs = import <nixpkgs> { };
|
||||
in
|
||||
|
||||
in
|
||||
pkgs.caddy.withPlugins {
|
||||
plugins = [
|
||||
# tagged upstream
|
||||
|
@ -135,9 +135,7 @@ Some frequently encountered problems when packaging for cross-compilation should
|
||||
Many packages assume that an unprefixed binutils (`cc`/`ar`/`ld` etc.) is available, but Nix doesn't provide one. It only provides a prefixed one, just as it only does for all the other binutils programs. It may be necessary to patch the package to fix the build system to use a prefix. For instance, instead of `cc`, use `${stdenv.cc.targetPrefix}cc`.
|
||||
|
||||
```nix
|
||||
{
|
||||
makeFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ];
|
||||
}
|
||||
{ makeFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ]; }
|
||||
```
|
||||
|
||||
#### How do I avoid compiling a GCC cross-compiler from source? {#cross-qa-avoid-compiling-gcc-cross-compiler}
|
||||
@ -152,9 +150,7 @@ $ nix-build '<nixpkgs>' -A pkgsCross.raspberryPi.hello
|
||||
Add the following to your `mkDerivation` invocation.
|
||||
|
||||
```nix
|
||||
{
|
||||
depsBuildBuild = [ buildPackages.stdenv.cc ];
|
||||
}
|
||||
{ depsBuildBuild = [ buildPackages.stdenv.cc ]; }
|
||||
```
|
||||
|
||||
#### My package’s testsuite needs to run host platform code. {#cross-testsuite-runs-host-code}
|
||||
@ -162,9 +158,7 @@ Add the following to your `mkDerivation` invocation.
|
||||
Add the following to your `mkDerivation` invocation.
|
||||
|
||||
```nix
|
||||
{
|
||||
doCheck = stdenv.buildPlatform.canExecute stdenv.hostPlatform;
|
||||
}
|
||||
{ doCheck = stdenv.buildPlatform.canExecute stdenv.hostPlatform; }
|
||||
```
|
||||
|
||||
#### Package using Meson needs to run binaries for the host platform during build. {#cross-meson-runs-host-code}
|
||||
@ -175,13 +169,10 @@ e.g.
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeBuildInputs =
|
||||
[
|
||||
meson
|
||||
]
|
||||
++ lib.optionals (!stdenv.buildPlatform.canExecute stdenv.hostPlatform) [
|
||||
mesonEmulatorHook
|
||||
];
|
||||
nativeBuildInputs = [
|
||||
meson
|
||||
]
|
||||
++ lib.optionals (!stdenv.buildPlatform.canExecute stdenv.hostPlatform) [ mesonEmulatorHook ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -108,9 +108,7 @@ The *priority* of the package, used by `nix-env` to resolve file name conflicts
|
||||
The list of Nix platform types on which the package is supported. Hydra builds packages according to the platform specified. If no platform is specified, the package does not have prebuilt binaries. An example is:
|
||||
|
||||
```nix
|
||||
{
|
||||
meta.platforms = lib.platforms.linux;
|
||||
}
|
||||
{ meta.platforms = lib.platforms.linux; }
|
||||
```
|
||||
|
||||
Attribute Set `lib.platforms` defines [various common lists](https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix) of platforms types.
|
||||
@ -164,9 +162,7 @@ This means that `broken` can be used to express constraints, for example:
|
||||
- Does not cross compile
|
||||
|
||||
```nix
|
||||
{
|
||||
meta.broken = !(stdenv.buildPlatform.canExecute stdenv.hostPlatform);
|
||||
}
|
||||
{ meta.broken = !(stdenv.buildPlatform.canExecute stdenv.hostPlatform); }
|
||||
```
|
||||
|
||||
- Broken if all of a certain set of its dependencies are broken
|
||||
|
@ -541,11 +541,7 @@ let
|
||||
# An example of an attribute containing a function
|
||||
passthru.appendPackages =
|
||||
packages':
|
||||
finalAttrs.finalPackage.overrideAttrs (
|
||||
newSelf: super: {
|
||||
packages = super.packages ++ packages';
|
||||
}
|
||||
);
|
||||
finalAttrs.finalPackage.overrideAttrs (newSelf: super: { packages = super.packages ++ packages'; });
|
||||
|
||||
# For illustration purposes; referenced as
|
||||
# `(pkg.overrideAttrs(x)).finalAttrs` etc in the text below.
|
||||
@ -787,9 +783,7 @@ The file name of the Makefile.
|
||||
A list of strings passed as additional flags to `make`. These flags are also used by the default install and check phase. For setting make flags specific to the build phase, use `buildFlags` (see below).
|
||||
|
||||
```nix
|
||||
{
|
||||
makeFlags = [ "PREFIX=$(out)" ];
|
||||
}
|
||||
{ makeFlags = [ "PREFIX=$(out)" ]; }
|
||||
```
|
||||
|
||||
::: {.note}
|
||||
@ -839,9 +833,7 @@ It is highly recommended, for packages' sources that are not distributed with an
|
||||
Controls whether the check phase is executed. By default it is skipped, but if `doCheck` is set to true, the check phase is usually executed. Thus you should set
|
||||
|
||||
```nix
|
||||
{
|
||||
doCheck = true;
|
||||
}
|
||||
{ doCheck = true; }
|
||||
```
|
||||
|
||||
in the derivation to enable checks. The exception is cross compilation. Cross compiled builds never run tests, no matter how `doCheck` is set, as the newly-built program won’t run on the platform used to build it.
|
||||
@ -894,9 +886,7 @@ See the [build phase](#var-stdenv-makeFlags) for details.
|
||||
The make targets that perform the installation. Defaults to `install`. Example:
|
||||
|
||||
```nix
|
||||
{
|
||||
installTargets = "install-bin install-doc";
|
||||
}
|
||||
{ installTargets = "install-bin install-doc"; }
|
||||
```
|
||||
|
||||
##### `installFlags` / `installFlagsArray` {#var-stdenv-installFlags}
|
||||
@ -1085,9 +1075,7 @@ It is often better to add tests that are not part of the source distribution to
|
||||
Controls whether the installCheck phase is executed. By default it is skipped, but if `doInstallCheck` is set to true, the installCheck phase is usually executed. Thus you should set
|
||||
|
||||
```nix
|
||||
{
|
||||
doInstallCheck = true;
|
||||
}
|
||||
{ doInstallCheck = true; }
|
||||
```
|
||||
|
||||
in the derivation to enable install checks. The exception is cross compilation. Cross compiled builds never run tests, no matter how `doInstallCheck` is set, as the newly-built program won’t run on the platform used to build it.
|
||||
|
@ -21,9 +21,7 @@ In particular, all build-time dependencies are checked.
|
||||
A user's Nixpkgs configuration is stored in a user-specific configuration file located at `~/.config/nixpkgs/config.nix`. For example:
|
||||
|
||||
```nix
|
||||
{
|
||||
allowUnfree = true;
|
||||
}
|
||||
{ allowUnfree = true; }
|
||||
```
|
||||
|
||||
:::{.caution}
|
||||
@ -44,9 +42,7 @@ There are two ways to try compiling a package which has been marked as broken.
|
||||
- For permanently allowing broken packages to be built, you may add `allowBroken = true;` to your user's configuration file, like this:
|
||||
|
||||
```nix
|
||||
{
|
||||
allowBroken = true;
|
||||
}
|
||||
{ allowBroken = true; }
|
||||
```
|
||||
|
||||
|
||||
@ -63,9 +59,7 @@ There are also two ways to try compiling a package which has been marked as unsu
|
||||
- For permanently allowing unsupported packages to be built, you may add `allowUnsupportedSystem = true;` to your user's configuration file, like this:
|
||||
|
||||
```nix
|
||||
{
|
||||
allowUnsupportedSystem = true;
|
||||
}
|
||||
{ allowUnsupportedSystem = true; }
|
||||
```
|
||||
|
||||
The difference between a package being unsupported on some system and being broken is admittedly a bit fuzzy. If a program *ought* to work on a certain platform, but doesn't, the platform should be included in `meta.platforms`, but marked as broken with e.g. `meta.broken = !hostPlatform.isWindows`. Of course, this begs the question of what "ought" means exactly. That is left to the package maintainer.
|
||||
@ -90,9 +84,7 @@ There are several ways to tweak how Nix handles a package which has been marked
|
||||
This option is a function which accepts a package as a parameter, and returns a boolean. The following example configuration accepts a package and always returns false:
|
||||
|
||||
```nix
|
||||
{
|
||||
allowUnfreePredicate = (pkg: false);
|
||||
}
|
||||
{ allowUnfreePredicate = (pkg: false); }
|
||||
```
|
||||
|
||||
For a more useful example, try the following. This configuration only allows unfree packages named roon-server and visual studio code:
|
||||
@ -151,11 +143,7 @@ There are several ways to tweak how Nix handles a package which has been marked
|
||||
The following example configuration permits the installation of the hypothetically insecure package `hello`, version `1.2.3`:
|
||||
|
||||
```nix
|
||||
{
|
||||
permittedInsecurePackages = [
|
||||
"hello-1.2.3"
|
||||
];
|
||||
}
|
||||
{ permittedInsecurePackages = [ "hello-1.2.3" ]; }
|
||||
```
|
||||
|
||||
- It is also possible to create a custom policy around which insecure packages to allow and deny, by overriding the `allowInsecurePredicate` configuration option.
|
||||
@ -165,13 +153,7 @@ There are several ways to tweak how Nix handles a package which has been marked
|
||||
The following configuration example allows any version of the `ovftool` package:
|
||||
|
||||
```nix
|
||||
{
|
||||
allowInsecurePredicate =
|
||||
pkg:
|
||||
builtins.elem (lib.getName pkg) [
|
||||
"ovftool"
|
||||
];
|
||||
}
|
||||
{ allowInsecurePredicate = pkg: builtins.elem (lib.getName pkg) [ "ovftool" ]; }
|
||||
```
|
||||
|
||||
Note that `permittedInsecurePackages` is only checked if `allowInsecurePredicate` is not specified.
|
||||
|
@ -48,12 +48,8 @@ Overlays are Nix functions which accept two arguments, conventionally called `se
|
||||
self: super:
|
||||
|
||||
{
|
||||
boost = super.boost.override {
|
||||
python = self.python3;
|
||||
};
|
||||
rr = super.callPackage ./pkgs/rr {
|
||||
stdenv = self.stdenv_32bit;
|
||||
};
|
||||
boost = super.boost.override { python = self.python3; };
|
||||
rr = super.callPackage ./pkgs/rr { stdenv = self.stdenv_32bit; };
|
||||
}
|
||||
```
|
||||
|
||||
@ -99,13 +95,9 @@ Introduced in [PR #83888](https://github.com/NixOS/nixpkgs/pull/83888), we are a
|
||||
self: super:
|
||||
|
||||
{
|
||||
blas = super.blas.override {
|
||||
blasProvider = self.mkl;
|
||||
};
|
||||
blas = super.blas.override { blasProvider = self.mkl; };
|
||||
|
||||
lapack = super.lapack.override {
|
||||
lapackProvider = self.mkl;
|
||||
};
|
||||
lapack = super.lapack.override { lapackProvider = self.mkl; };
|
||||
}
|
||||
```
|
||||
|
||||
@ -123,13 +115,9 @@ To override `blas` and `lapack` with its reference implementations (i.e. for dev
|
||||
self: super:
|
||||
|
||||
{
|
||||
blas = super.blas.override {
|
||||
blasProvider = self.lapack-reference;
|
||||
};
|
||||
blas = super.blas.override { blasProvider = self.lapack-reference; };
|
||||
|
||||
lapack = super.lapack.override {
|
||||
lapackProvider = self.lapack-reference;
|
||||
};
|
||||
lapack = super.lapack.override { lapackProvider = self.lapack-reference; };
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -31,11 +31,7 @@ pkgs.foo.override (previous: {
|
||||
|
||||
```nix
|
||||
import pkgs.path {
|
||||
overlays = [
|
||||
(self: super: {
|
||||
foo = super.foo.override { barSupport = true; };
|
||||
})
|
||||
];
|
||||
overlays = [ (self: super: { foo = super.foo.override { barSupport = true; }; }) ];
|
||||
}
|
||||
```
|
||||
|
||||
@ -67,9 +63,7 @@ Example usages:
|
||||
```nix
|
||||
{
|
||||
helloBar = pkgs.hello.overrideAttrs (
|
||||
finalAttrs: previousAttrs: {
|
||||
pname = previousAttrs.pname + "-bar";
|
||||
}
|
||||
finalAttrs: previousAttrs: { pname = previousAttrs.pname + "-bar"; }
|
||||
);
|
||||
}
|
||||
```
|
||||
@ -85,11 +79,7 @@ If only a one-argument function is written, the argument has the meaning of `pre
|
||||
Function arguments can be omitted entirely if there is no need to access `previousAttrs` or `finalAttrs`.
|
||||
|
||||
```nix
|
||||
{
|
||||
helloWithDebug = pkgs.hello.overrideAttrs {
|
||||
separateDebugInfo = true;
|
||||
};
|
||||
}
|
||||
{ helloWithDebug = pkgs.hello.overrideAttrs { separateDebugInfo = true; }; }
|
||||
```
|
||||
|
||||
In the above example, the `separateDebugInfo` attribute is overridden to be true, thus building debug info for `helloWithDebug`.
|
||||
|
@ -446,16 +446,15 @@ rec {
|
||||
let
|
||||
outputs = drv.outputs or [ "out" ];
|
||||
|
||||
commonAttrs =
|
||||
{
|
||||
inherit (drv) name system meta;
|
||||
inherit outputs;
|
||||
}
|
||||
// optionalAttrs (drv._hydraAggregate or false) {
|
||||
_hydraAggregate = true;
|
||||
constituents = map hydraJob (flatten drv.constituents);
|
||||
}
|
||||
// (listToAttrs outputsList);
|
||||
commonAttrs = {
|
||||
inherit (drv) name system meta;
|
||||
inherit outputs;
|
||||
}
|
||||
// optionalAttrs (drv._hydraAggregate or false) {
|
||||
_hydraAggregate = true;
|
||||
constituents = map hydraJob (flatten drv.constituents);
|
||||
}
|
||||
// (listToAttrs outputsList);
|
||||
|
||||
makeOutput =
|
||||
outputName:
|
||||
|
@ -355,8 +355,11 @@ let
|
||||
mergeAttrByFunc =
|
||||
x: y:
|
||||
let
|
||||
mergeAttrBy2 =
|
||||
{ mergeAttrBy = mergeAttrs; } // (maybeAttr "mergeAttrBy" { } x) // (maybeAttr "mergeAttrBy" { } y);
|
||||
mergeAttrBy2 = {
|
||||
mergeAttrBy = mergeAttrs;
|
||||
}
|
||||
// (maybeAttr "mergeAttrBy" { } x)
|
||||
// (maybeAttr "mergeAttrBy" { } y);
|
||||
in
|
||||
foldr mergeAttrs { } [
|
||||
x
|
||||
|
@ -886,19 +886,18 @@ let
|
||||
path = showOption loc;
|
||||
depth = length loc;
|
||||
|
||||
paragraphs =
|
||||
[
|
||||
"In module ${file}: expected an option declaration at option path `${path}` but got an attribute set with type ${actualTag}"
|
||||
]
|
||||
++ optional (actualTag == "option-type") ''
|
||||
When declaring an option, you must wrap the type in a `mkOption` call. It should look somewhat like:
|
||||
${comment}
|
||||
${name} = lib.mkOption {
|
||||
description = ...;
|
||||
type = <the type you wrote for ${name}>;
|
||||
...
|
||||
};
|
||||
'';
|
||||
paragraphs = [
|
||||
"In module ${file}: expected an option declaration at option path `${path}` but got an attribute set with type ${actualTag}"
|
||||
]
|
||||
++ optional (actualTag == "option-type") ''
|
||||
When declaring an option, you must wrap the type in a `mkOption` call. It should look somewhat like:
|
||||
${comment}
|
||||
${name} = lib.mkOption {
|
||||
description = ...;
|
||||
type = <the type you wrote for ${name}>;
|
||||
...
|
||||
};
|
||||
'';
|
||||
|
||||
# Ideally we'd know the exact syntax they used, but short of that,
|
||||
# we can only reliably repeat the last. However, we repeat the
|
||||
@ -1627,25 +1626,24 @@ let
|
||||
) from
|
||||
);
|
||||
|
||||
config =
|
||||
{
|
||||
warnings = filter (x: x != "") (
|
||||
map (
|
||||
f:
|
||||
let
|
||||
val = getAttrFromPath f config;
|
||||
opt = getAttrFromPath f options;
|
||||
in
|
||||
optionalString (val != "_mkMergedOptionModule")
|
||||
"The option `${showOption f}' defined in ${showFiles opt.files} has been changed to `${showOption to}' that has a different type. Please read `${showOption to}' documentation and update your configuration accordingly."
|
||||
) from
|
||||
);
|
||||
}
|
||||
// setAttrByPath to (
|
||||
mkMerge (
|
||||
optional (any (f: (getAttrFromPath f config) != "_mkMergedOptionModule") from) (mergeFn config)
|
||||
)
|
||||
config = {
|
||||
warnings = filter (x: x != "") (
|
||||
map (
|
||||
f:
|
||||
let
|
||||
val = getAttrFromPath f config;
|
||||
opt = getAttrFromPath f options;
|
||||
in
|
||||
optionalString (val != "_mkMergedOptionModule")
|
||||
"The option `${showOption f}' defined in ${showFiles opt.files} has been changed to `${showOption to}' that has a different type. Please read `${showOption to}' documentation and update your configuration accordingly."
|
||||
) from
|
||||
);
|
||||
}
|
||||
// setAttrByPath to (
|
||||
mkMerge (
|
||||
optional (any (f: (getAttrFromPath f config) != "_mkMergedOptionModule") from) (mergeFn config)
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -572,30 +572,29 @@ rec {
|
||||
opt:
|
||||
let
|
||||
name = showOption opt.loc;
|
||||
docOption =
|
||||
{
|
||||
loc = opt.loc;
|
||||
inherit name;
|
||||
description = opt.description or null;
|
||||
declarations = filter (x: x != unknownModule) opt.declarations;
|
||||
internal = opt.internal or false;
|
||||
visible = if (opt ? visible && opt.visible == "shallow") then true else opt.visible or true;
|
||||
readOnly = opt.readOnly or false;
|
||||
type = opt.type.description or "unspecified";
|
||||
}
|
||||
// optionalAttrs (opt ? example) {
|
||||
example = builtins.addErrorContext "while evaluating the example of option `${name}`" (
|
||||
renderOptionValue opt.example
|
||||
);
|
||||
}
|
||||
// optionalAttrs (opt ? defaultText || opt ? default) {
|
||||
default = builtins.addErrorContext "while evaluating the ${
|
||||
if opt ? defaultText then "defaultText" else "default value"
|
||||
} of option `${name}`" (renderOptionValue (opt.defaultText or opt.default));
|
||||
}
|
||||
// optionalAttrs (opt ? relatedPackages && opt.relatedPackages != null) {
|
||||
inherit (opt) relatedPackages;
|
||||
};
|
||||
docOption = {
|
||||
loc = opt.loc;
|
||||
inherit name;
|
||||
description = opt.description or null;
|
||||
declarations = filter (x: x != unknownModule) opt.declarations;
|
||||
internal = opt.internal or false;
|
||||
visible = if (opt ? visible && opt.visible == "shallow") then true else opt.visible or true;
|
||||
readOnly = opt.readOnly or false;
|
||||
type = opt.type.description or "unspecified";
|
||||
}
|
||||
// optionalAttrs (opt ? example) {
|
||||
example = builtins.addErrorContext "while evaluating the example of option `${name}`" (
|
||||
renderOptionValue opt.example
|
||||
);
|
||||
}
|
||||
// optionalAttrs (opt ? defaultText || opt ? default) {
|
||||
default = builtins.addErrorContext "while evaluating the ${
|
||||
if opt ? defaultText then "defaultText" else "default value"
|
||||
} of option `${name}`" (renderOptionValue (opt.defaultText or opt.default));
|
||||
}
|
||||
// optionalAttrs (opt ? relatedPackages && opt.relatedPackages != null) {
|
||||
inherit (opt) relatedPackages;
|
||||
};
|
||||
|
||||
subOptions =
|
||||
let
|
||||
|
@ -14,14 +14,13 @@
|
||||
|
||||
pkgs.runCommand "lib-path-tests"
|
||||
{
|
||||
nativeBuildInputs =
|
||||
[
|
||||
nixVersions.stable
|
||||
]
|
||||
++ (with pkgs; [
|
||||
jq
|
||||
bc
|
||||
]);
|
||||
nativeBuildInputs = [
|
||||
nixVersions.stable
|
||||
]
|
||||
++ (with pkgs; [
|
||||
jq
|
||||
bc
|
||||
]);
|
||||
}
|
||||
''
|
||||
# Needed to make Nix evaluation work
|
||||
|
@ -477,7 +477,8 @@ rec {
|
||||
"armv9.1-a" = [
|
||||
"armv9-a"
|
||||
"armv8.6-a"
|
||||
] ++ inferiors."armv8.6-a";
|
||||
]
|
||||
++ inferiors."armv8.6-a";
|
||||
"armv9.2-a" = lib.unique (
|
||||
[
|
||||
"armv9.1-a"
|
||||
@ -503,12 +504,14 @@ rec {
|
||||
"armv8.2-a"
|
||||
"cortex-a53"
|
||||
"cortex-a72"
|
||||
] ++ inferiors."armv8.2-a";
|
||||
]
|
||||
++ inferiors."armv8.2-a";
|
||||
cortex-a76 = [
|
||||
"armv8.2-a"
|
||||
"cortex-a53"
|
||||
"cortex-a72"
|
||||
] ++ inferiors."armv8.2-a";
|
||||
]
|
||||
++ inferiors."armv8.2-a";
|
||||
|
||||
# Ampere
|
||||
ampere1 = withInferiors [
|
||||
|
@ -83,486 +83,485 @@ let
|
||||
# TODO: deprecate args.rustc in favour of args.rust after 23.05 is EOL.
|
||||
rust = args.rust or args.rustc or { };
|
||||
|
||||
final =
|
||||
{
|
||||
# Prefer to parse `config` as it is strictly more informative.
|
||||
parsed = parse.mkSystemFromString (args.config or allArgs.system);
|
||||
# This can be losslessly-extracted from `parsed` iff parsing succeeds.
|
||||
system = parse.doubleFromSystem final.parsed;
|
||||
# TODO: This currently can't be losslessly-extracted from `parsed`, for example
|
||||
# because of -mingw32.
|
||||
config = parse.tripleFromSystem final.parsed;
|
||||
# Determine whether we can execute binaries built for the provided platform.
|
||||
canExecute =
|
||||
platform:
|
||||
final.isAndroid == platform.isAndroid
|
||||
&& parse.isCompatible final.parsed.cpu platform.parsed.cpu
|
||||
&& final.parsed.kernel == platform.parsed.kernel
|
||||
&& (
|
||||
# Only perform this check when cpus have the same type;
|
||||
# assume compatible cpu have all the instructions included
|
||||
final.parsed.cpu == platform.parsed.cpu
|
||||
->
|
||||
# if both have gcc.arch defined, check whether final can execute the given platform
|
||||
(
|
||||
(final ? gcc.arch && platform ? gcc.arch)
|
||||
-> architectures.canExecute final.gcc.arch platform.gcc.arch
|
||||
)
|
||||
# if platform has gcc.arch defined but final doesn't, don't assume it can be executed
|
||||
|| (platform ? gcc.arch -> !(final ? gcc.arch))
|
||||
);
|
||||
final = {
|
||||
# Prefer to parse `config` as it is strictly more informative.
|
||||
parsed = parse.mkSystemFromString (args.config or allArgs.system);
|
||||
# This can be losslessly-extracted from `parsed` iff parsing succeeds.
|
||||
system = parse.doubleFromSystem final.parsed;
|
||||
# TODO: This currently can't be losslessly-extracted from `parsed`, for example
|
||||
# because of -mingw32.
|
||||
config = parse.tripleFromSystem final.parsed;
|
||||
# Determine whether we can execute binaries built for the provided platform.
|
||||
canExecute =
|
||||
platform:
|
||||
final.isAndroid == platform.isAndroid
|
||||
&& parse.isCompatible final.parsed.cpu platform.parsed.cpu
|
||||
&& final.parsed.kernel == platform.parsed.kernel
|
||||
&& (
|
||||
# Only perform this check when cpus have the same type;
|
||||
# assume compatible cpu have all the instructions included
|
||||
final.parsed.cpu == platform.parsed.cpu
|
||||
->
|
||||
# if both have gcc.arch defined, check whether final can execute the given platform
|
||||
(
|
||||
(final ? gcc.arch && platform ? gcc.arch)
|
||||
-> architectures.canExecute final.gcc.arch platform.gcc.arch
|
||||
)
|
||||
# if platform has gcc.arch defined but final doesn't, don't assume it can be executed
|
||||
|| (platform ? gcc.arch -> !(final ? gcc.arch))
|
||||
);
|
||||
|
||||
isCompatible =
|
||||
_:
|
||||
throw "2022-05-23: isCompatible has been removed in favor of canExecute, refer to the 22.11 changelog for details";
|
||||
# Derived meta-data
|
||||
useLLVM = final.isFreeBSD || final.isOpenBSD;
|
||||
isCompatible =
|
||||
_:
|
||||
throw "2022-05-23: isCompatible has been removed in favor of canExecute, refer to the 22.11 changelog for details";
|
||||
# Derived meta-data
|
||||
useLLVM = final.isFreeBSD || final.isOpenBSD;
|
||||
|
||||
libc =
|
||||
if final.isDarwin then
|
||||
"libSystem"
|
||||
else if final.isMinGW then
|
||||
"msvcrt"
|
||||
else if final.isWasi then
|
||||
"wasilibc"
|
||||
else if final.isWasm && !final.isWasi then
|
||||
null
|
||||
else if final.isRedox then
|
||||
"relibc"
|
||||
else if final.isMusl then
|
||||
"musl"
|
||||
else if final.isUClibc then
|
||||
"uclibc"
|
||||
else if final.isAndroid then
|
||||
"bionic"
|
||||
else if
|
||||
final.isLinux # default
|
||||
then
|
||||
"glibc"
|
||||
else if final.isFreeBSD then
|
||||
"fblibc"
|
||||
else if final.isOpenBSD then
|
||||
"oblibc"
|
||||
else if final.isNetBSD then
|
||||
"nblibc"
|
||||
else if final.isAvr then
|
||||
"avrlibc"
|
||||
else if final.isGhcjs then
|
||||
null
|
||||
else if final.isNone then
|
||||
"newlib"
|
||||
# TODO(@Ericson2314) think more about other operating systems
|
||||
else
|
||||
"native/impure";
|
||||
# Choose what linker we wish to use by default. Someday we might also
|
||||
# choose the C compiler, runtime library, C++ standard library, etc. in
|
||||
# this way, nice and orthogonally, and deprecate `useLLVM`. But due to
|
||||
# the monolithic GCC build we cannot actually make those choices
|
||||
# independently, so we are just doing `linker` and keeping `useLLVM` for
|
||||
# now.
|
||||
linker =
|
||||
if final.useLLVM or false then
|
||||
"lld"
|
||||
libc =
|
||||
if final.isDarwin then
|
||||
"libSystem"
|
||||
else if final.isMinGW then
|
||||
"msvcrt"
|
||||
else if final.isWasi then
|
||||
"wasilibc"
|
||||
else if final.isWasm && !final.isWasi then
|
||||
null
|
||||
else if final.isRedox then
|
||||
"relibc"
|
||||
else if final.isMusl then
|
||||
"musl"
|
||||
else if final.isUClibc then
|
||||
"uclibc"
|
||||
else if final.isAndroid then
|
||||
"bionic"
|
||||
else if
|
||||
final.isLinux # default
|
||||
then
|
||||
"glibc"
|
||||
else if final.isFreeBSD then
|
||||
"fblibc"
|
||||
else if final.isOpenBSD then
|
||||
"oblibc"
|
||||
else if final.isNetBSD then
|
||||
"nblibc"
|
||||
else if final.isAvr then
|
||||
"avrlibc"
|
||||
else if final.isGhcjs then
|
||||
null
|
||||
else if final.isNone then
|
||||
"newlib"
|
||||
# TODO(@Ericson2314) think more about other operating systems
|
||||
else
|
||||
"native/impure";
|
||||
# Choose what linker we wish to use by default. Someday we might also
|
||||
# choose the C compiler, runtime library, C++ standard library, etc. in
|
||||
# this way, nice and orthogonally, and deprecate `useLLVM`. But due to
|
||||
# the monolithic GCC build we cannot actually make those choices
|
||||
# independently, so we are just doing `linker` and keeping `useLLVM` for
|
||||
# now.
|
||||
linker =
|
||||
if final.useLLVM or false then
|
||||
"lld"
|
||||
else if final.isDarwin then
|
||||
"cctools"
|
||||
# "bfd" and "gold" both come from GNU binutils. The existence of Gold
|
||||
# is why we use the more obscure "bfd" and not "binutils" for this
|
||||
# choice.
|
||||
else
|
||||
"bfd";
|
||||
# The standard lib directory name that non-nixpkgs binaries distributed
|
||||
# for this platform normally assume.
|
||||
libDir =
|
||||
if final.isLinux then
|
||||
if final.isx86_64 || final.isMips64 || final.isPower64 then "lib64" else "lib"
|
||||
else
|
||||
null;
|
||||
extensions =
|
||||
optionalAttrs final.hasSharedLibraries {
|
||||
sharedLibrary =
|
||||
if final.isDarwin then
|
||||
".dylib"
|
||||
else if final.isWindows then
|
||||
".dll"
|
||||
else
|
||||
".so";
|
||||
}
|
||||
// {
|
||||
staticLibrary = if final.isWindows then ".lib" else ".a";
|
||||
library = if final.isStatic then final.extensions.staticLibrary else final.extensions.sharedLibrary;
|
||||
executable = if final.isWindows then ".exe" else "";
|
||||
};
|
||||
# Misc boolean options
|
||||
useAndroidPrebuilt = false;
|
||||
useiOSPrebuilt = false;
|
||||
|
||||
# Output from uname
|
||||
uname = {
|
||||
# uname -s
|
||||
system =
|
||||
{
|
||||
linux = "Linux";
|
||||
windows = "Windows";
|
||||
darwin = "Darwin";
|
||||
netbsd = "NetBSD";
|
||||
freebsd = "FreeBSD";
|
||||
openbsd = "OpenBSD";
|
||||
wasi = "Wasi";
|
||||
redox = "Redox";
|
||||
genode = "Genode";
|
||||
}
|
||||
.${final.parsed.kernel.name} or null;
|
||||
|
||||
# uname -m
|
||||
processor =
|
||||
if final.isPower64 then
|
||||
"ppc64${optionalString final.isLittleEndian "le"}"
|
||||
else if final.isPower then
|
||||
"ppc${optionalString final.isLittleEndian "le"}"
|
||||
else if final.isMips64 then
|
||||
"mips64" # endianness is *not* included on mips64
|
||||
else if final.isDarwin then
|
||||
"cctools"
|
||||
# "bfd" and "gold" both come from GNU binutils. The existence of Gold
|
||||
# is why we use the more obscure "bfd" and not "binutils" for this
|
||||
# choice.
|
||||
final.darwinArch
|
||||
else
|
||||
"bfd";
|
||||
# The standard lib directory name that non-nixpkgs binaries distributed
|
||||
# for this platform normally assume.
|
||||
libDir =
|
||||
if final.isLinux then
|
||||
if final.isx86_64 || final.isMips64 || final.isPower64 then "lib64" else "lib"
|
||||
final.parsed.cpu.name;
|
||||
|
||||
# uname -r
|
||||
release = null;
|
||||
};
|
||||
|
||||
# It is important that hasSharedLibraries==false when the platform has no
|
||||
# dynamic library loader. Various tools (including the gcc build system)
|
||||
# have knowledge of which platforms are incapable of dynamic linking, and
|
||||
# will still build on/for those platforms with --enable-shared, but simply
|
||||
# omit any `.so` build products such as libgcc_s.so. When that happens,
|
||||
# it causes hard-to-troubleshoot build failures.
|
||||
hasSharedLibraries =
|
||||
with final;
|
||||
(
|
||||
isAndroid
|
||||
|| isGnu
|
||||
|| isMusl # Linux (allows multiple libcs)
|
||||
|| isDarwin
|
||||
|| isSunOS
|
||||
|| isOpenBSD
|
||||
|| isFreeBSD
|
||||
|| isNetBSD # BSDs
|
||||
|| isCygwin
|
||||
|| isMinGW
|
||||
|| isWindows # Windows
|
||||
|| isWasm # WASM
|
||||
)
|
||||
&& !isStatic;
|
||||
|
||||
# The difference between `isStatic` and `hasSharedLibraries` is mainly the
|
||||
# addition of the `staticMarker` (see make-derivation.nix). Some
|
||||
# platforms, like embedded machines without a libc (e.g. arm-none-eabi)
|
||||
# don't support dynamic linking, but don't get the `staticMarker`.
|
||||
# `pkgsStatic` sets `isStatic=true`, so `pkgsStatic.hostPlatform` always
|
||||
# has the `staticMarker`.
|
||||
isStatic = final.isWasi || final.isRedox;
|
||||
|
||||
# Just a guess, based on `system`
|
||||
inherit
|
||||
(
|
||||
{
|
||||
linux-kernel = args.linux-kernel or { };
|
||||
gcc = args.gcc or { };
|
||||
}
|
||||
// platforms.select final
|
||||
)
|
||||
linux-kernel
|
||||
gcc
|
||||
;
|
||||
|
||||
# TODO: remove after 23.05 is EOL, with an error pointing to the rust.* attrs.
|
||||
rustc = args.rustc or { };
|
||||
|
||||
linuxArch =
|
||||
if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isAarch64 then
|
||||
"arm64"
|
||||
else if final.isx86_32 then
|
||||
"i386"
|
||||
else if final.isx86_64 then
|
||||
"x86_64"
|
||||
# linux kernel does not distinguish microblaze/microblazeel
|
||||
else if final.isMicroBlaze then
|
||||
"microblaze"
|
||||
else if final.isMips32 then
|
||||
"mips"
|
||||
else if final.isMips64 then
|
||||
"mips" # linux kernel does not distinguish mips32/mips64
|
||||
else if final.isPower then
|
||||
"powerpc"
|
||||
else if final.isRiscV then
|
||||
"riscv"
|
||||
else if final.isS390 then
|
||||
"s390"
|
||||
else if final.isLoongArch64 then
|
||||
"loongarch"
|
||||
else
|
||||
final.parsed.cpu.name;
|
||||
|
||||
# https://source.denx.de/u-boot/u-boot/-/blob/9bfb567e5f1bfe7de8eb41f8c6d00f49d2b9a426/common/image.c#L81-106
|
||||
ubootArch =
|
||||
if final.isx86_32 then
|
||||
"x86" # not i386
|
||||
else if final.isMips64 then
|
||||
"mips64" # uboot *does* distinguish between mips32/mips64
|
||||
else
|
||||
final.linuxArch; # other cases appear to agree with linuxArch
|
||||
|
||||
qemuArch =
|
||||
if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isAarch64 then
|
||||
"aarch64"
|
||||
else if final.isS390 && !final.isS390x then
|
||||
null
|
||||
else if final.isx86_64 then
|
||||
"x86_64"
|
||||
else if final.isx86 then
|
||||
"i386"
|
||||
else if final.isMips64n32 then
|
||||
"mipsn32${optionalString final.isLittleEndian "el"}"
|
||||
else if final.isMips64 then
|
||||
"mips64${optionalString final.isLittleEndian "el"}"
|
||||
else
|
||||
final.uname.processor;
|
||||
|
||||
# Name used by UEFI for architectures.
|
||||
efiArch =
|
||||
if final.isx86_32 then
|
||||
"ia32"
|
||||
else if final.isx86_64 then
|
||||
"x64"
|
||||
else if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isAarch64 then
|
||||
"aa64"
|
||||
else
|
||||
final.parsed.cpu.name;
|
||||
|
||||
darwinArch = parse.darwinArch final.parsed.cpu;
|
||||
|
||||
darwinPlatform =
|
||||
if final.isMacOS then
|
||||
"macos"
|
||||
else if final.isiOS then
|
||||
"ios"
|
||||
else
|
||||
null;
|
||||
# The canonical name for this attribute is darwinSdkVersion, but some
|
||||
# platforms define the old name "sdkVer".
|
||||
darwinSdkVersion = final.sdkVer or "11.3";
|
||||
darwinMinVersion = final.darwinSdkVersion;
|
||||
darwinMinVersionVariable =
|
||||
if final.isMacOS then
|
||||
"MACOSX_DEPLOYMENT_TARGET"
|
||||
else if final.isiOS then
|
||||
"IPHONEOS_DEPLOYMENT_TARGET"
|
||||
else
|
||||
null;
|
||||
|
||||
# Handle Android SDK and NDK versions.
|
||||
androidSdkVersion = args.androidSdkVersion or null;
|
||||
androidNdkVersion = args.androidNdkVersion or null;
|
||||
}
|
||||
// (
|
||||
let
|
||||
selectEmulator =
|
||||
pkgs:
|
||||
let
|
||||
wine = (pkgs.winePackagesFor "wine${toString final.parsed.cpu.bits}").minimal;
|
||||
in
|
||||
# Note: we guarantee that the return value is either `null` or a path
|
||||
# to an emulator program. That is, if an emulator requires additional
|
||||
# arguments, a wrapper should be used.
|
||||
if pkgs.stdenv.hostPlatform.canExecute final then
|
||||
lib.getExe (pkgs.writeShellScriptBin "exec" ''exec "$@"'')
|
||||
else if final.isWindows then
|
||||
"${wine}/bin/wine${optionalString (final.parsed.cpu.bits == 64) "64"}"
|
||||
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux && final.qemuArch != null then
|
||||
"${pkgs.qemu-user}/bin/qemu-${final.qemuArch}"
|
||||
else if final.isWasi then
|
||||
"${pkgs.wasmtime}/bin/wasmtime"
|
||||
else if final.isMmix then
|
||||
"${pkgs.mmixware}/bin/mmix"
|
||||
else
|
||||
null;
|
||||
extensions =
|
||||
optionalAttrs final.hasSharedLibraries {
|
||||
sharedLibrary =
|
||||
if final.isDarwin then
|
||||
".dylib"
|
||||
else if final.isWindows then
|
||||
".dll"
|
||||
else
|
||||
".so";
|
||||
}
|
||||
// {
|
||||
staticLibrary = if final.isWindows then ".lib" else ".a";
|
||||
library = if final.isStatic then final.extensions.staticLibrary else final.extensions.sharedLibrary;
|
||||
executable = if final.isWindows then ".exe" else "";
|
||||
};
|
||||
# Misc boolean options
|
||||
useAndroidPrebuilt = false;
|
||||
useiOSPrebuilt = false;
|
||||
in
|
||||
{
|
||||
emulatorAvailable = pkgs: (selectEmulator pkgs) != null;
|
||||
|
||||
# Output from uname
|
||||
uname = {
|
||||
# uname -s
|
||||
system =
|
||||
{
|
||||
linux = "Linux";
|
||||
windows = "Windows";
|
||||
darwin = "Darwin";
|
||||
netbsd = "NetBSD";
|
||||
freebsd = "FreeBSD";
|
||||
openbsd = "OpenBSD";
|
||||
wasi = "Wasi";
|
||||
redox = "Redox";
|
||||
genode = "Genode";
|
||||
}
|
||||
.${final.parsed.kernel.name} or null;
|
||||
# whether final.emulator pkgs.pkgsStatic works
|
||||
staticEmulatorAvailable =
|
||||
pkgs: final.emulatorAvailable pkgs && (final.isLinux || final.isWasi || final.isMmix);
|
||||
|
||||
# uname -m
|
||||
processor =
|
||||
if final.isPower64 then
|
||||
"ppc64${optionalString final.isLittleEndian "le"}"
|
||||
else if final.isPower then
|
||||
"ppc${optionalString final.isLittleEndian "le"}"
|
||||
emulator =
|
||||
pkgs:
|
||||
if (final.emulatorAvailable pkgs) then
|
||||
selectEmulator pkgs
|
||||
else
|
||||
throw "Don't know how to run ${final.config} executables.";
|
||||
|
||||
}
|
||||
)
|
||||
// mapAttrs (n: v: v final.parsed) inspect.predicates
|
||||
// mapAttrs (n: v: v final.gcc.arch or "default") architectures.predicates
|
||||
// args
|
||||
// {
|
||||
rust = rust // {
|
||||
# Once args.rustc.platform.target-family is deprecated and
|
||||
# removed, there will no longer be any need to modify any
|
||||
# values from args.rust.platform, so we can drop all the
|
||||
# "args ? rust" etc. checks, and merge args.rust.platform in
|
||||
# /after/.
|
||||
platform = rust.platform or { } // {
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_arch
|
||||
arch =
|
||||
if rust ? platform then
|
||||
rust.platform.arch
|
||||
else if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isMips64 then
|
||||
"mips64" # endianness is *not* included on mips64
|
||||
else if final.isDarwin then
|
||||
final.darwinArch
|
||||
"mips64" # never add "el" suffix
|
||||
else if final.isPower64 then
|
||||
"powerpc64" # never add "le" suffix
|
||||
else
|
||||
final.parsed.cpu.name;
|
||||
|
||||
# uname -r
|
||||
release = null;
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_os
|
||||
os =
|
||||
if rust ? platform then
|
||||
rust.platform.os or "none"
|
||||
else if final.isDarwin then
|
||||
"macos"
|
||||
else if final.isWasm && !final.isWasi then
|
||||
"unknown" # Needed for {wasm32,wasm64}-unknown-unknown.
|
||||
else
|
||||
final.parsed.kernel.name;
|
||||
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_family
|
||||
target-family =
|
||||
if args ? rust.platform.target-family then
|
||||
args.rust.platform.target-family
|
||||
else if args ? rustc.platform.target-family then
|
||||
(
|
||||
# Since https://github.com/rust-lang/rust/pull/84072
|
||||
# `target-family` is a list instead of single value.
|
||||
let
|
||||
f = args.rustc.platform.target-family;
|
||||
in
|
||||
if isList f then f else [ f ]
|
||||
)
|
||||
else
|
||||
optional final.isUnix "unix" ++ optional final.isWindows "windows" ++ optional final.isWasm "wasm";
|
||||
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_vendor
|
||||
vendor =
|
||||
let
|
||||
inherit (final.parsed) vendor;
|
||||
in
|
||||
rust.platform.vendor or {
|
||||
"w64" = "pc";
|
||||
}
|
||||
.${vendor.name} or vendor.name;
|
||||
};
|
||||
|
||||
# It is important that hasSharedLibraries==false when the platform has no
|
||||
# dynamic library loader. Various tools (including the gcc build system)
|
||||
# have knowledge of which platforms are incapable of dynamic linking, and
|
||||
# will still build on/for those platforms with --enable-shared, but simply
|
||||
# omit any `.so` build products such as libgcc_s.so. When that happens,
|
||||
# it causes hard-to-troubleshoot build failures.
|
||||
hasSharedLibraries =
|
||||
with final;
|
||||
(
|
||||
isAndroid
|
||||
|| isGnu
|
||||
|| isMusl # Linux (allows multiple libcs)
|
||||
|| isDarwin
|
||||
|| isSunOS
|
||||
|| isOpenBSD
|
||||
|| isFreeBSD
|
||||
|| isNetBSD # BSDs
|
||||
|| isCygwin
|
||||
|| isMinGW
|
||||
|| isWindows # Windows
|
||||
|| isWasm # WASM
|
||||
)
|
||||
&& !isStatic;
|
||||
|
||||
# The difference between `isStatic` and `hasSharedLibraries` is mainly the
|
||||
# addition of the `staticMarker` (see make-derivation.nix). Some
|
||||
# platforms, like embedded machines without a libc (e.g. arm-none-eabi)
|
||||
# don't support dynamic linking, but don't get the `staticMarker`.
|
||||
# `pkgsStatic` sets `isStatic=true`, so `pkgsStatic.hostPlatform` always
|
||||
# has the `staticMarker`.
|
||||
isStatic = final.isWasi || final.isRedox;
|
||||
|
||||
# Just a guess, based on `system`
|
||||
inherit
|
||||
(
|
||||
{
|
||||
linux-kernel = args.linux-kernel or { };
|
||||
gcc = args.gcc or { };
|
||||
}
|
||||
// platforms.select final
|
||||
)
|
||||
linux-kernel
|
||||
gcc
|
||||
;
|
||||
|
||||
# TODO: remove after 23.05 is EOL, with an error pointing to the rust.* attrs.
|
||||
rustc = args.rustc or { };
|
||||
|
||||
linuxArch =
|
||||
if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isAarch64 then
|
||||
"arm64"
|
||||
else if final.isx86_32 then
|
||||
"i386"
|
||||
else if final.isx86_64 then
|
||||
"x86_64"
|
||||
# linux kernel does not distinguish microblaze/microblazeel
|
||||
else if final.isMicroBlaze then
|
||||
"microblaze"
|
||||
else if final.isMips32 then
|
||||
"mips"
|
||||
else if final.isMips64 then
|
||||
"mips" # linux kernel does not distinguish mips32/mips64
|
||||
else if final.isPower then
|
||||
"powerpc"
|
||||
else if final.isRiscV then
|
||||
"riscv"
|
||||
else if final.isS390 then
|
||||
"s390"
|
||||
else if final.isLoongArch64 then
|
||||
"loongarch"
|
||||
else
|
||||
final.parsed.cpu.name;
|
||||
|
||||
# https://source.denx.de/u-boot/u-boot/-/blob/9bfb567e5f1bfe7de8eb41f8c6d00f49d2b9a426/common/image.c#L81-106
|
||||
ubootArch =
|
||||
if final.isx86_32 then
|
||||
"x86" # not i386
|
||||
else if final.isMips64 then
|
||||
"mips64" # uboot *does* distinguish between mips32/mips64
|
||||
else
|
||||
final.linuxArch; # other cases appear to agree with linuxArch
|
||||
|
||||
qemuArch =
|
||||
if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isAarch64 then
|
||||
"aarch64"
|
||||
else if final.isS390 && !final.isS390x then
|
||||
null
|
||||
else if final.isx86_64 then
|
||||
"x86_64"
|
||||
else if final.isx86 then
|
||||
"i386"
|
||||
else if final.isMips64n32 then
|
||||
"mipsn32${optionalString final.isLittleEndian "el"}"
|
||||
else if final.isMips64 then
|
||||
"mips64${optionalString final.isLittleEndian "el"}"
|
||||
else
|
||||
final.uname.processor;
|
||||
|
||||
# Name used by UEFI for architectures.
|
||||
efiArch =
|
||||
if final.isx86_32 then
|
||||
"ia32"
|
||||
else if final.isx86_64 then
|
||||
"x64"
|
||||
else if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isAarch64 then
|
||||
"aa64"
|
||||
else
|
||||
final.parsed.cpu.name;
|
||||
|
||||
darwinArch = parse.darwinArch final.parsed.cpu;
|
||||
|
||||
darwinPlatform =
|
||||
if final.isMacOS then
|
||||
"macos"
|
||||
else if final.isiOS then
|
||||
"ios"
|
||||
else
|
||||
null;
|
||||
# The canonical name for this attribute is darwinSdkVersion, but some
|
||||
# platforms define the old name "sdkVer".
|
||||
darwinSdkVersion = final.sdkVer or "11.3";
|
||||
darwinMinVersion = final.darwinSdkVersion;
|
||||
darwinMinVersionVariable =
|
||||
if final.isMacOS then
|
||||
"MACOSX_DEPLOYMENT_TARGET"
|
||||
else if final.isiOS then
|
||||
"IPHONEOS_DEPLOYMENT_TARGET"
|
||||
else
|
||||
null;
|
||||
|
||||
# Handle Android SDK and NDK versions.
|
||||
androidSdkVersion = args.androidSdkVersion or null;
|
||||
androidNdkVersion = args.androidNdkVersion or null;
|
||||
}
|
||||
// (
|
||||
let
|
||||
selectEmulator =
|
||||
pkgs:
|
||||
let
|
||||
wine = (pkgs.winePackagesFor "wine${toString final.parsed.cpu.bits}").minimal;
|
||||
in
|
||||
# Note: we guarantee that the return value is either `null` or a path
|
||||
# to an emulator program. That is, if an emulator requires additional
|
||||
# arguments, a wrapper should be used.
|
||||
if pkgs.stdenv.hostPlatform.canExecute final then
|
||||
lib.getExe (pkgs.writeShellScriptBin "exec" ''exec "$@"'')
|
||||
else if final.isWindows then
|
||||
"${wine}/bin/wine${optionalString (final.parsed.cpu.bits == 64) "64"}"
|
||||
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux && final.qemuArch != null then
|
||||
"${pkgs.qemu-user}/bin/qemu-${final.qemuArch}"
|
||||
else if final.isWasi then
|
||||
"${pkgs.wasmtime}/bin/wasmtime"
|
||||
else if final.isMmix then
|
||||
"${pkgs.mmixware}/bin/mmix"
|
||||
else
|
||||
null;
|
||||
in
|
||||
{
|
||||
emulatorAvailable = pkgs: (selectEmulator pkgs) != null;
|
||||
|
||||
# whether final.emulator pkgs.pkgsStatic works
|
||||
staticEmulatorAvailable =
|
||||
pkgs: final.emulatorAvailable pkgs && (final.isLinux || final.isWasi || final.isMmix);
|
||||
|
||||
emulator =
|
||||
pkgs:
|
||||
if (final.emulatorAvailable pkgs) then
|
||||
selectEmulator pkgs
|
||||
else
|
||||
throw "Don't know how to run ${final.config} executables.";
|
||||
|
||||
}
|
||||
)
|
||||
// mapAttrs (n: v: v final.parsed) inspect.predicates
|
||||
// mapAttrs (n: v: v final.gcc.arch or "default") architectures.predicates
|
||||
// args
|
||||
// {
|
||||
rust = rust // {
|
||||
# Once args.rustc.platform.target-family is deprecated and
|
||||
# removed, there will no longer be any need to modify any
|
||||
# values from args.rust.platform, so we can drop all the
|
||||
# "args ? rust" etc. checks, and merge args.rust.platform in
|
||||
# /after/.
|
||||
platform = rust.platform or { } // {
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_arch
|
||||
arch =
|
||||
if rust ? platform then
|
||||
rust.platform.arch
|
||||
else if final.isAarch32 then
|
||||
"arm"
|
||||
else if final.isMips64 then
|
||||
"mips64" # never add "el" suffix
|
||||
else if final.isPower64 then
|
||||
"powerpc64" # never add "le" suffix
|
||||
else
|
||||
final.parsed.cpu.name;
|
||||
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_os
|
||||
os =
|
||||
if rust ? platform then
|
||||
rust.platform.os or "none"
|
||||
else if final.isDarwin then
|
||||
"macos"
|
||||
else if final.isWasm && !final.isWasi then
|
||||
"unknown" # Needed for {wasm32,wasm64}-unknown-unknown.
|
||||
else
|
||||
final.parsed.kernel.name;
|
||||
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_family
|
||||
target-family =
|
||||
if args ? rust.platform.target-family then
|
||||
args.rust.platform.target-family
|
||||
else if args ? rustc.platform.target-family then
|
||||
(
|
||||
# Since https://github.com/rust-lang/rust/pull/84072
|
||||
# `target-family` is a list instead of single value.
|
||||
let
|
||||
f = args.rustc.platform.target-family;
|
||||
in
|
||||
if isList f then f else [ f ]
|
||||
)
|
||||
else
|
||||
optional final.isUnix "unix" ++ optional final.isWindows "windows" ++ optional final.isWasm "wasm";
|
||||
|
||||
# https://doc.rust-lang.org/reference/conditional-compilation.html#target_vendor
|
||||
vendor =
|
||||
let
|
||||
inherit (final.parsed) vendor;
|
||||
in
|
||||
rust.platform.vendor or {
|
||||
"w64" = "pc";
|
||||
# The name of the rust target, even if it is custom. Adjustments are
|
||||
# because rust has slightly different naming conventions than we do.
|
||||
rustcTarget =
|
||||
let
|
||||
inherit (final.parsed) cpu kernel abi;
|
||||
cpu_ =
|
||||
rust.platform.arch or {
|
||||
"armv7a" = "armv7";
|
||||
"armv7l" = "armv7";
|
||||
"armv6l" = "arm";
|
||||
"armv5tel" = "armv5te";
|
||||
"riscv32" = "riscv32gc";
|
||||
"riscv64" = "riscv64gc";
|
||||
}
|
||||
.${vendor.name} or vendor.name;
|
||||
};
|
||||
.${cpu.name} or cpu.name;
|
||||
vendor_ = final.rust.platform.vendor;
|
||||
in
|
||||
# TODO: deprecate args.rustc in favour of args.rust after 23.05 is EOL.
|
||||
args.rust.rustcTarget or args.rustc.config or (
|
||||
# Rust uses `wasm32-wasip?` rather than `wasm32-unknown-wasi`.
|
||||
# We cannot know which subversion does the user want, and
|
||||
# currently use WASI 0.1 as default for compatibility. Custom
|
||||
# users can set `rust.rustcTarget` to override it.
|
||||
if final.isWasi then
|
||||
"${cpu_}-wasip1"
|
||||
else
|
||||
"${cpu_}-${vendor_}-${kernel.name}${optionalString (abi.name != "unknown") "-${abi.name}"}"
|
||||
);
|
||||
|
||||
# The name of the rust target, even if it is custom. Adjustments are
|
||||
# because rust has slightly different naming conventions than we do.
|
||||
rustcTarget =
|
||||
let
|
||||
inherit (final.parsed) cpu kernel abi;
|
||||
cpu_ =
|
||||
rust.platform.arch or {
|
||||
"armv7a" = "armv7";
|
||||
"armv7l" = "armv7";
|
||||
"armv6l" = "arm";
|
||||
"armv5tel" = "armv5te";
|
||||
"riscv32" = "riscv32gc";
|
||||
"riscv64" = "riscv64gc";
|
||||
}
|
||||
.${cpu.name} or cpu.name;
|
||||
vendor_ = final.rust.platform.vendor;
|
||||
in
|
||||
# TODO: deprecate args.rustc in favour of args.rust after 23.05 is EOL.
|
||||
args.rust.rustcTarget or args.rustc.config or (
|
||||
# Rust uses `wasm32-wasip?` rather than `wasm32-unknown-wasi`.
|
||||
# We cannot know which subversion does the user want, and
|
||||
# currently use WASI 0.1 as default for compatibility. Custom
|
||||
# users can set `rust.rustcTarget` to override it.
|
||||
if final.isWasi then
|
||||
"${cpu_}-wasip1"
|
||||
else
|
||||
"${cpu_}-${vendor_}-${kernel.name}${optionalString (abi.name != "unknown") "-${abi.name}"}"
|
||||
);
|
||||
# The name of the rust target if it is standard, or the json file
|
||||
# containing the custom target spec.
|
||||
rustcTargetSpec =
|
||||
rust.rustcTargetSpec or (
|
||||
if rust ? platform then
|
||||
builtins.toFile (final.rust.rustcTarget + ".json") (toJSON rust.platform)
|
||||
else
|
||||
final.rust.rustcTarget
|
||||
);
|
||||
|
||||
# The name of the rust target if it is standard, or the json file
|
||||
# containing the custom target spec.
|
||||
rustcTargetSpec =
|
||||
rust.rustcTargetSpec or (
|
||||
if rust ? platform then
|
||||
builtins.toFile (final.rust.rustcTarget + ".json") (toJSON rust.platform)
|
||||
else
|
||||
final.rust.rustcTarget
|
||||
);
|
||||
# The name of the rust target if it is standard, or the
|
||||
# basename of the file containing the custom target spec,
|
||||
# without the .json extension.
|
||||
#
|
||||
# This is the name used by Cargo for target subdirectories.
|
||||
cargoShortTarget = removeSuffix ".json" (baseNameOf "${final.rust.rustcTargetSpec}");
|
||||
|
||||
# The name of the rust target if it is standard, or the
|
||||
# basename of the file containing the custom target spec,
|
||||
# without the .json extension.
|
||||
#
|
||||
# This is the name used by Cargo for target subdirectories.
|
||||
cargoShortTarget = removeSuffix ".json" (baseNameOf "${final.rust.rustcTargetSpec}");
|
||||
# When used as part of an environment variable name, triples are
|
||||
# uppercased and have all hyphens replaced by underscores:
|
||||
#
|
||||
# https://github.com/rust-lang/cargo/pull/9169
|
||||
# https://github.com/rust-lang/cargo/issues/8285#issuecomment-634202431
|
||||
cargoEnvVarTarget = replaceString "-" "_" (toUpper final.rust.cargoShortTarget);
|
||||
|
||||
# When used as part of an environment variable name, triples are
|
||||
# uppercased and have all hyphens replaced by underscores:
|
||||
#
|
||||
# https://github.com/rust-lang/cargo/pull/9169
|
||||
# https://github.com/rust-lang/cargo/issues/8285#issuecomment-634202431
|
||||
cargoEnvVarTarget = replaceString "-" "_" (toUpper final.rust.cargoShortTarget);
|
||||
|
||||
# True if the target is no_std
|
||||
# https://github.com/rust-lang/rust/blob/2e44c17c12cec45b6a682b1e53a04ac5b5fcc9d2/src/bootstrap/config.rs#L415-L421
|
||||
isNoStdTarget = any (t: hasInfix t final.rust.rustcTarget) [
|
||||
"-none"
|
||||
"nvptx"
|
||||
"switch"
|
||||
"-uefi"
|
||||
];
|
||||
};
|
||||
}
|
||||
// {
|
||||
go = {
|
||||
# See https://pkg.go.dev/internal/platform for a list of known platforms
|
||||
GOARCH =
|
||||
{
|
||||
"aarch64" = "arm64";
|
||||
"arm" = "arm";
|
||||
"armv5tel" = "arm";
|
||||
"armv6l" = "arm";
|
||||
"armv7l" = "arm";
|
||||
"i686" = "386";
|
||||
"loongarch64" = "loong64";
|
||||
"mips" = "mips";
|
||||
"mips64el" = "mips64le";
|
||||
"mipsel" = "mipsle";
|
||||
"powerpc64" = "ppc64";
|
||||
"powerpc64le" = "ppc64le";
|
||||
"riscv64" = "riscv64";
|
||||
"s390x" = "s390x";
|
||||
"x86_64" = "amd64";
|
||||
"wasm32" = "wasm";
|
||||
}
|
||||
.${final.parsed.cpu.name} or null;
|
||||
GOOS = if final.isWasi then "wasip1" else final.parsed.kernel.name;
|
||||
|
||||
# See https://go.dev/wiki/GoArm
|
||||
GOARM = toString (lib.intersectLists [ (final.parsed.cpu.version or "") ] [ "5" "6" "7" ]);
|
||||
};
|
||||
# True if the target is no_std
|
||||
# https://github.com/rust-lang/rust/blob/2e44c17c12cec45b6a682b1e53a04ac5b5fcc9d2/src/bootstrap/config.rs#L415-L421
|
||||
isNoStdTarget = any (t: hasInfix t final.rust.rustcTarget) [
|
||||
"-none"
|
||||
"nvptx"
|
||||
"switch"
|
||||
"-uefi"
|
||||
];
|
||||
};
|
||||
}
|
||||
// {
|
||||
go = {
|
||||
# See https://pkg.go.dev/internal/platform for a list of known platforms
|
||||
GOARCH =
|
||||
{
|
||||
"aarch64" = "arm64";
|
||||
"arm" = "arm";
|
||||
"armv5tel" = "arm";
|
||||
"armv6l" = "arm";
|
||||
"armv7l" = "arm";
|
||||
"i686" = "386";
|
||||
"loongarch64" = "loong64";
|
||||
"mips" = "mips";
|
||||
"mips64el" = "mips64le";
|
||||
"mipsel" = "mipsle";
|
||||
"powerpc64" = "ppc64";
|
||||
"powerpc64le" = "ppc64le";
|
||||
"riscv64" = "riscv64";
|
||||
"s390x" = "s390x";
|
||||
"x86_64" = "amd64";
|
||||
"wasm32" = "wasm";
|
||||
}
|
||||
.${final.parsed.cpu.name} or null;
|
||||
GOOS = if final.isWasi then "wasip1" else final.parsed.kernel.name;
|
||||
|
||||
# See https://go.dev/wiki/GoArm
|
||||
GOARM = toString (lib.intersectLists [ (final.parsed.cpu.version or "") ] [ "5" "6" "7" ]);
|
||||
};
|
||||
};
|
||||
in
|
||||
assert final.useAndroidPrebuilt -> final.isAndroid;
|
||||
assert foldl (pass: { assertion, message }: if assertion final then pass else throw message) true (
|
||||
|
@ -38,23 +38,28 @@ rec {
|
||||
|
||||
sheevaplug = {
|
||||
config = "armv5tel-unknown-linux-gnueabi";
|
||||
} // platforms.sheevaplug;
|
||||
}
|
||||
// platforms.sheevaplug;
|
||||
|
||||
raspberryPi = {
|
||||
config = "armv6l-unknown-linux-gnueabihf";
|
||||
} // platforms.raspberrypi;
|
||||
}
|
||||
// platforms.raspberrypi;
|
||||
|
||||
bluefield2 = {
|
||||
config = "aarch64-unknown-linux-gnu";
|
||||
} // platforms.bluefield2;
|
||||
}
|
||||
// platforms.bluefield2;
|
||||
|
||||
remarkable1 = {
|
||||
config = "armv7l-unknown-linux-gnueabihf";
|
||||
} // platforms.zero-gravitas;
|
||||
}
|
||||
// platforms.zero-gravitas;
|
||||
|
||||
remarkable2 = {
|
||||
config = "armv7l-unknown-linux-gnueabihf";
|
||||
} // platforms.zero-sugar;
|
||||
}
|
||||
// platforms.zero-sugar;
|
||||
|
||||
armv7l-hf-multiplatform = {
|
||||
config = "armv7l-unknown-linux-gnueabihf";
|
||||
@ -70,7 +75,8 @@ rec {
|
||||
androidSdkVersion = "33";
|
||||
androidNdkVersion = "26";
|
||||
useAndroidPrebuilt = true;
|
||||
} // platforms.armv7a-android;
|
||||
}
|
||||
// platforms.armv7a-android;
|
||||
|
||||
aarch64-android-prebuilt = {
|
||||
config = "aarch64-unknown-linux-android";
|
||||
@ -91,39 +97,48 @@ rec {
|
||||
|
||||
pogoplug4 = {
|
||||
config = "armv5tel-unknown-linux-gnueabi";
|
||||
} // platforms.pogoplug4;
|
||||
}
|
||||
// platforms.pogoplug4;
|
||||
|
||||
ben-nanonote = {
|
||||
config = "mipsel-unknown-linux-uclibc";
|
||||
} // platforms.ben_nanonote;
|
||||
}
|
||||
// platforms.ben_nanonote;
|
||||
|
||||
fuloongminipc = {
|
||||
config = "mipsel-unknown-linux-gnu";
|
||||
} // platforms.fuloong2f_n32;
|
||||
}
|
||||
// platforms.fuloong2f_n32;
|
||||
|
||||
# can execute on 32bit chip
|
||||
mips-linux-gnu = {
|
||||
config = "mips-unknown-linux-gnu";
|
||||
} // platforms.gcc_mips32r2_o32;
|
||||
}
|
||||
// platforms.gcc_mips32r2_o32;
|
||||
mipsel-linux-gnu = {
|
||||
config = "mipsel-unknown-linux-gnu";
|
||||
} // platforms.gcc_mips32r2_o32;
|
||||
}
|
||||
// platforms.gcc_mips32r2_o32;
|
||||
|
||||
# require 64bit chip (for more registers, 64-bit floating point, 64-bit "long long") but use 32bit pointers
|
||||
mips64-linux-gnuabin32 = {
|
||||
config = "mips64-unknown-linux-gnuabin32";
|
||||
} // platforms.gcc_mips64r2_n32;
|
||||
}
|
||||
// platforms.gcc_mips64r2_n32;
|
||||
mips64el-linux-gnuabin32 = {
|
||||
config = "mips64el-unknown-linux-gnuabin32";
|
||||
} // platforms.gcc_mips64r2_n32;
|
||||
}
|
||||
// platforms.gcc_mips64r2_n32;
|
||||
|
||||
# 64bit pointers
|
||||
mips64-linux-gnuabi64 = {
|
||||
config = "mips64-unknown-linux-gnuabi64";
|
||||
} // platforms.gcc_mips64r2_64;
|
||||
}
|
||||
// platforms.gcc_mips64r2_64;
|
||||
mips64el-linux-gnuabi64 = {
|
||||
config = "mips64el-unknown-linux-gnuabi64";
|
||||
} // platforms.gcc_mips64r2_64;
|
||||
}
|
||||
// platforms.gcc_mips64r2_64;
|
||||
|
||||
muslpi = raspberryPi // {
|
||||
config = "armv6l-unknown-linux-musleabihf";
|
||||
|
@ -260,16 +260,16 @@ rec {
|
||||
bits = 64;
|
||||
};
|
||||
};
|
||||
isILP32 =
|
||||
[
|
||||
{
|
||||
cpu = {
|
||||
family = "wasm";
|
||||
bits = 32;
|
||||
};
|
||||
}
|
||||
]
|
||||
++ map
|
||||
isILP32 = [
|
||||
{
|
||||
cpu = {
|
||||
family = "wasm";
|
||||
bits = 32;
|
||||
};
|
||||
}
|
||||
]
|
||||
++
|
||||
map
|
||||
(a: {
|
||||
abi = {
|
||||
abi = a;
|
||||
|
@ -5,13 +5,12 @@
|
||||
{
|
||||
|
||||
# Always defined, but the value depends on the presence of an option.
|
||||
config.set =
|
||||
{
|
||||
value = if options ? set.enable then 360 else 7;
|
||||
}
|
||||
# Only define if possible.
|
||||
// lib.optionalAttrs (options ? set.enable) {
|
||||
enable = true;
|
||||
};
|
||||
config.set = {
|
||||
value = if options ? set.enable then 360 else 7;
|
||||
}
|
||||
# Only define if possible.
|
||||
// lib.optionalAttrs (options ? set.enable) {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -5,13 +5,12 @@
|
||||
{
|
||||
|
||||
# Always defined, but the value depends on the presence of an option.
|
||||
config =
|
||||
{
|
||||
value = if options ? enable then 360 else 7;
|
||||
}
|
||||
# Only define if possible.
|
||||
// lib.optionalAttrs (options ? enable) {
|
||||
enable = true;
|
||||
};
|
||||
config = {
|
||||
value = if options ? enable then 360 else 7;
|
||||
}
|
||||
# Only define if possible.
|
||||
// lib.optionalAttrs (options ? enable) {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -27,7 +27,8 @@ pkgs.runCommand "nixpkgs-lib-tests-nix-${nix.version}"
|
||||
nativeBuildInputs = [
|
||||
nix
|
||||
pkgs.gitMinimal
|
||||
] ++ lib.optional pkgs.stdenv.hostPlatform.isLinux pkgs.inotify-tools;
|
||||
]
|
||||
++ lib.optional pkgs.stdenv.hostPlatform.isLinux pkgs.inotify-tools;
|
||||
strictDeps = true;
|
||||
}
|
||||
''
|
||||
|
@ -1210,7 +1210,8 @@ let
|
||||
# It shouldn't cause an issue since this is cosmetic for the manual.
|
||||
_module.args.name = lib.mkOptionDefault "‹name›";
|
||||
}
|
||||
] ++ modules;
|
||||
]
|
||||
++ modules;
|
||||
};
|
||||
|
||||
freeformType = base._module.freeformType;
|
||||
|
@ -86,14 +86,12 @@ When adding users to [`maintainer-list.nix`](./maintainer-list.nix), the followi
|
||||
|
||||
Given a maintainer entry like this:
|
||||
|
||||
``` nix
|
||||
```nix
|
||||
{
|
||||
example = {
|
||||
email = "user@example.com";
|
||||
name = "Example User";
|
||||
keys = [{
|
||||
fingerprint = "0000 0000 2A70 6423 0AED 3C11 F04F 7A19 AAA6 3AFE";
|
||||
}];
|
||||
keys = [ { fingerprint = "0000 0000 2A70 6423 0AED 3C11 F04F 7A19 AAA6 3AFE"; } ];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
@ -28,7 +28,7 @@ can be accomplished using the following configuration on the host:
|
||||
```nix
|
||||
{
|
||||
networking.nat.enable = true;
|
||||
networking.nat.internalInterfaces = ["ve-+"];
|
||||
networking.nat.internalInterfaces = [ "ve-+" ];
|
||||
networking.nat.externalInterface = "eth0";
|
||||
}
|
||||
```
|
||||
@ -40,9 +40,7 @@ If you are using Network Manager, you need to explicitly prevent it from
|
||||
managing container interfaces:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
|
||||
}
|
||||
{ networking.networkmanager.unmanaged = [ "interface-name:ve-*" ]; }
|
||||
```
|
||||
|
||||
You may need to restart your system for the changes to take effect.
|
||||
|
@ -39,9 +39,7 @@ they were in the same cgroup, then the PostgreSQL process would get
|
||||
`configuration.nix`:
|
||||
|
||||
```nix
|
||||
{
|
||||
systemd.services.httpd.serviceConfig.CPUShares = 512;
|
||||
}
|
||||
{ systemd.services.httpd.serviceConfig.CPUShares = 512; }
|
||||
```
|
||||
|
||||
By default, every cgroup has 1024 CPU shares, so this will halve the CPU
|
||||
@ -54,9 +52,7 @@ limits can be specified in `configuration.nix`; for instance, to limit
|
||||
`httpd.service` to 512 MiB of RAM (excluding swap):
|
||||
|
||||
```nix
|
||||
{
|
||||
systemd.services.httpd.serviceConfig.MemoryLimit = "512M";
|
||||
}
|
||||
{ systemd.services.httpd.serviceConfig.MemoryLimit = "512M"; }
|
||||
```
|
||||
|
||||
The command `systemd-cgtop` shows a continuously updated list of all
|
||||
|
@ -6,13 +6,14 @@ shall be a container named `database` running PostgreSQL:
|
||||
|
||||
```nix
|
||||
{
|
||||
containers.database =
|
||||
{ config =
|
||||
{ config, pkgs, ... }:
|
||||
{ services.postgresql.enable = true;
|
||||
containers.database = {
|
||||
config =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_14;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -82,9 +82,7 @@ In order to enable a systemd *system* service with provided upstream
|
||||
package, use (e.g):
|
||||
|
||||
```nix
|
||||
{
|
||||
systemd.packages = [ pkgs.packagekit ];
|
||||
}
|
||||
{ systemd.packages = [ pkgs.packagekit ]; }
|
||||
```
|
||||
|
||||
Usually NixOS modules written by the community do the above, plus take
|
||||
@ -126,17 +124,19 @@ in turn will not make the service / timer start on login.
|
||||
You can define services by adding them to `systemd.services`:
|
||||
|
||||
```nix
|
||||
systemd.services.myservice = {
|
||||
after = [ "network-online.target" ];
|
||||
requires = [ "network-online.target" ];
|
||||
{
|
||||
systemd.services.myservice = {
|
||||
after = [ "network-online.target" ];
|
||||
requires = [ "network-online.target" ];
|
||||
|
||||
before = [ "multi-user.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "multi-user.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "...";
|
||||
serviceConfig = {
|
||||
ExecStart = "...";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
If you want to specify a multi-line script for `ExecStart`,
|
||||
|
@ -4,37 +4,37 @@ If you find yourself repeating yourself over and over, it’s time to abstract.
|
||||
|
||||
```nix
|
||||
{
|
||||
services.httpd.virtualHosts =
|
||||
{ "blog.example.org" = {
|
||||
documentRoot = "/webroot/blog.example.org";
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
"wiki.example.org" = {
|
||||
documentRoot = "/webroot/wiki.example.org";
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
services.httpd.virtualHosts = {
|
||||
"blog.example.org" = {
|
||||
documentRoot = "/webroot/blog.example.org";
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
"wiki.example.org" = {
|
||||
documentRoot = "/webroot/wiki.example.org";
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
It defines two virtual hosts with nearly identical configuration; the only difference is the document root directories. To prevent this duplication, we can use a `let`:
|
||||
```nix
|
||||
let
|
||||
commonConfig =
|
||||
{ adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
commonConfig = {
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
in
|
||||
{
|
||||
services.httpd.virtualHosts =
|
||||
{ "blog.example.org" = (commonConfig // { documentRoot = "/webroot/blog.example.org"; });
|
||||
"wiki.example.org" = (commonConfig // { documentRoot = "/webroot/wiki.example.org"; });
|
||||
};
|
||||
services.httpd.virtualHosts = {
|
||||
"blog.example.org" = (commonConfig // { documentRoot = "/webroot/blog.example.org"; });
|
||||
"wiki.example.org" = (commonConfig // { documentRoot = "/webroot/wiki.example.org"; });
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
@ -45,9 +45,24 @@ You can write a `let` wherever an expression is allowed. Thus, you also could ha
|
||||
```nix
|
||||
{
|
||||
services.httpd.virtualHosts =
|
||||
let commonConfig = { /* ... */ }; in
|
||||
{ "blog.example.org" = (commonConfig // { /* ... */ });
|
||||
"wiki.example.org" = (commonConfig // { /* ... */ });
|
||||
let
|
||||
commonConfig = {
|
||||
# ...
|
||||
};
|
||||
in
|
||||
{
|
||||
"blog.example.org" = (
|
||||
commonConfig
|
||||
// {
|
||||
# ...
|
||||
}
|
||||
);
|
||||
"wiki.example.org" = (
|
||||
commonConfig
|
||||
// {
|
||||
# ...
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
```
|
||||
@ -60,18 +75,19 @@ but not `{ let commonConfig = ...; in ...; }` since attributes (as opposed to at
|
||||
{
|
||||
services.httpd.virtualHosts =
|
||||
let
|
||||
makeVirtualHost = webroot:
|
||||
{ documentRoot = webroot;
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
in
|
||||
{ "example.org" = (makeVirtualHost "/webroot/example.org");
|
||||
"example.com" = (makeVirtualHost "/webroot/example.com");
|
||||
"example.gov" = (makeVirtualHost "/webroot/example.gov");
|
||||
"example.nl" = (makeVirtualHost "/webroot/example.nl");
|
||||
makeVirtualHost = webroot: {
|
||||
documentRoot = webroot;
|
||||
adminAddr = "alice@example.org";
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
in
|
||||
{
|
||||
"example.org" = (makeVirtualHost "/webroot/example.org");
|
||||
"example.com" = (makeVirtualHost "/webroot/example.com");
|
||||
"example.gov" = (makeVirtualHost "/webroot/example.gov");
|
||||
"example.nl" = (makeVirtualHost "/webroot/example.nl");
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -7,9 +7,8 @@ modules. For instance, to statically configure an IPv6 address:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.localCommands =
|
||||
''
|
||||
ip -6 addr add 2001:610:685:1::1/64 dev eth0
|
||||
'';
|
||||
networking.localCommands = ''
|
||||
ip -6 addr add 2001:610:685:1::1/64 dev eth0
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
@ -23,9 +23,7 @@ Then you write and test the package as described in the Nixpkgs manual.
|
||||
Finally, you add it to [](#opt-environment.systemPackages), e.g.
|
||||
|
||||
```nix
|
||||
{
|
||||
environment.systemPackages = [ pkgs.my-package ];
|
||||
}
|
||||
{ environment.systemPackages = [ pkgs.my-package ]; }
|
||||
```
|
||||
|
||||
and you run `nixos-rebuild`, specifying your own Nixpkgs tree:
|
||||
@ -43,13 +41,15 @@ tree. For instance, here is how you specify a build of the
|
||||
{
|
||||
environment.systemPackages =
|
||||
let
|
||||
my-hello = with pkgs; stdenv.mkDerivation rec {
|
||||
name = "hello-2.8";
|
||||
src = fetchurl {
|
||||
url = "mirror://gnu/hello/${name}.tar.gz";
|
||||
hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
|
||||
my-hello =
|
||||
with pkgs;
|
||||
stdenv.mkDerivation rec {
|
||||
name = "hello-2.8";
|
||||
src = fetchurl {
|
||||
url = "mirror://gnu/hello/${name}.tar.gz";
|
||||
hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
[ my-hello ];
|
||||
}
|
||||
@ -59,15 +59,13 @@ Of course, you can also move the definition of `my-hello` into a
|
||||
separate Nix expression, e.g.
|
||||
|
||||
```nix
|
||||
{
|
||||
environment.systemPackages = [ (import ./my-hello.nix) ];
|
||||
}
|
||||
{ environment.systemPackages = [ (import ./my-hello.nix) ]; }
|
||||
```
|
||||
|
||||
where `my-hello.nix` contains:
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {}; # bring all of Nixpkgs into scope
|
||||
with import <nixpkgs> { }; # bring all of Nixpkgs into scope
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "hello-2.8";
|
||||
@ -111,7 +109,7 @@ If there are shared libraries missing add them with
|
||||
extraPkgs = pkgs: [
|
||||
# missing libraries here, e.g.: `pkgs.libepoxy`
|
||||
];
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -5,7 +5,8 @@ The NixOS configuration file generally looks like this:
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ /* option definitions */
|
||||
{
|
||||
# option definitions
|
||||
}
|
||||
```
|
||||
|
||||
@ -19,7 +20,8 @@ name of an option and `value` is its value. For example,
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ services.httpd.enable = true;
|
||||
{
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "alice@example.org";
|
||||
services.httpd.virtualHosts.localhost.documentRoot = "/webroot";
|
||||
}
|
||||
@ -38,7 +40,8 @@ example above can also be written as:
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ services = {
|
||||
{
|
||||
services = {
|
||||
httpd = {
|
||||
enable = true;
|
||||
adminAddr = "alice@example.org";
|
||||
|
@ -41,14 +41,14 @@ You can use them like this:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
sl
|
||||
(pass.withExtensions (subpkgs: with subpkgs; [
|
||||
pass-audit
|
||||
pass-otp
|
||||
pass-genphrase
|
||||
]))
|
||||
(python3.withPackages (subpkgs: with subpkgs; [
|
||||
requests
|
||||
]))
|
||||
(pass.withExtensions (
|
||||
subpkgs: with subpkgs; [
|
||||
pass-audit
|
||||
pass-otp
|
||||
pass-genphrase
|
||||
]
|
||||
))
|
||||
(python3.withPackages (subpkgs: with subpkgs; [ requests ]))
|
||||
cowsay
|
||||
];
|
||||
}
|
||||
@ -62,9 +62,7 @@ dependency on GTK 2. If you want to build it against GTK 3, you can
|
||||
specify that as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
environment.systemPackages = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ];
|
||||
}
|
||||
{ environment.systemPackages = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ]; }
|
||||
```
|
||||
|
||||
The function `override` performs the call to the Nix function that
|
||||
@ -109,9 +107,9 @@ your customised instance, you can apply a *global* override as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
nixpkgs.config.packageOverrides = pkgs:
|
||||
{ emacs = pkgs.emacs.override { gtk = pkgs.gtk3; };
|
||||
};
|
||||
nixpkgs.config.packageOverrides = pkgs: {
|
||||
emacs = pkgs.emacs.override { gtk = pkgs.gtk3; };
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -7,9 +7,7 @@ following line to `configuration.nix` enables the Mozilla Thunderbird
|
||||
email application:
|
||||
|
||||
```nix
|
||||
{
|
||||
environment.systemPackages = [ pkgs.thunderbird ];
|
||||
}
|
||||
{ environment.systemPackages = [ pkgs.thunderbird ]; }
|
||||
```
|
||||
|
||||
The effect of this specification is that the Thunderbird package from
|
||||
|
@ -7,10 +7,10 @@ point `/data`:
|
||||
|
||||
```nix
|
||||
{
|
||||
fileSystems."/data" =
|
||||
{ device = "/dev/disk/by-label/data";
|
||||
fsType = "ext4";
|
||||
};
|
||||
fileSystems."/data" = {
|
||||
device = "/dev/disk/by-label/data";
|
||||
fsType = "ext4";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -5,9 +5,7 @@ and other unexpected packets. The firewall applies to both IPv4 and IPv6
|
||||
traffic. It is enabled by default. It can be disabled as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.firewall.enable = false;
|
||||
}
|
||||
{ networking.firewall.enable = false; }
|
||||
```
|
||||
|
||||
If the firewall is enabled, you can open specific TCP ports to the
|
||||
@ -15,7 +13,10 @@ outside world:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
@ -28,8 +29,14 @@ To open ranges of TCP ports:
|
||||
```nix
|
||||
{
|
||||
networking.firewall.allowedTCPPortRanges = [
|
||||
{ from = 4000; to = 4007; }
|
||||
{ from = 8000; to = 8010; }
|
||||
{
|
||||
from = 4000;
|
||||
to = 4007;
|
||||
}
|
||||
{
|
||||
from = 8000;
|
||||
to = 8010;
|
||||
}
|
||||
];
|
||||
}
|
||||
```
|
||||
|
@ -55,11 +55,7 @@ supported through the rocmPackages.clr.icd package. Adding this package to
|
||||
enables OpenCL support:
|
||||
|
||||
```nix
|
||||
{
|
||||
hardware.graphics.extraPackages = [
|
||||
rocmPackages.clr.icd
|
||||
];
|
||||
}
|
||||
{ hardware.graphics.extraPackages = [ rocmPackages.clr.icd ]; }
|
||||
```
|
||||
|
||||
### Intel {#sec-gpu-accel-opencl-intel}
|
||||
@ -75,11 +71,7 @@ to enable OpenCL support. For example, for Gen12 and later GPUs, the following
|
||||
configuration can be used:
|
||||
|
||||
```nix
|
||||
{
|
||||
hardware.graphics.extraPackages = [
|
||||
intel-compute-runtime
|
||||
];
|
||||
}
|
||||
{ hardware.graphics.extraPackages = [ intel-compute-runtime ]; }
|
||||
```
|
||||
|
||||
## Vulkan {#sec-gpu-accel-vulkan}
|
||||
@ -145,20 +137,15 @@ A specific driver can be forced as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
hardware.graphics.extraPackages = [
|
||||
pkgs.amdvlk
|
||||
];
|
||||
hardware.graphics.extraPackages = [ pkgs.amdvlk ];
|
||||
|
||||
# To enable Vulkan support for 32-bit applications, also add:
|
||||
hardware.graphics.extraPackages32 = [
|
||||
pkgs.driversi686Linux.amdvlk
|
||||
];
|
||||
hardware.graphics.extraPackages32 = [ pkgs.driversi686Linux.amdvlk ];
|
||||
|
||||
# Force radv
|
||||
environment.variables.AMD_VULKAN_ICD = "RADV";
|
||||
# Or
|
||||
environment.variables.VK_ICD_FILENAMES =
|
||||
"/run/opengl-driver/share/vulkan/icd.d/radeon_icd.x86_64.json";
|
||||
environment.variables.VK_ICD_FILENAMES = "/run/opengl-driver/share/vulkan/icd.d/radeon_icd.x86_64.json";
|
||||
}
|
||||
```
|
||||
|
||||
@ -183,21 +170,13 @@ $ nix-shell -p libva-utils --run vainfo
|
||||
Modern Intel GPUs use the iHD driver, which can be installed with:
|
||||
|
||||
```nix
|
||||
{
|
||||
hardware.graphics.extraPackages = [
|
||||
intel-media-driver
|
||||
];
|
||||
}
|
||||
{ hardware.graphics.extraPackages = [ intel-media-driver ]; }
|
||||
```
|
||||
|
||||
Older Intel GPUs use the i965 driver, which can be installed with:
|
||||
|
||||
```nix
|
||||
{
|
||||
hardware.graphics.extraPackages = [
|
||||
intel-vaapi-driver
|
||||
];
|
||||
}
|
||||
{ hardware.graphics.extraPackages = [ intel-vaapi-driver ]; }
|
||||
```
|
||||
|
||||
## Common issues {#sec-gpu-accel-common-issues}
|
||||
|
@ -6,10 +6,12 @@ manually as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.interfaces.eth0.ipv4.addresses = [ {
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
} ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
@ -32,9 +34,7 @@ configuration is performed by `network-setup.service`.
|
||||
The host name is set using [](#opt-networking.hostName):
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.hostName = "cartman";
|
||||
}
|
||||
{ networking.hostName = "cartman"; }
|
||||
```
|
||||
|
||||
The default host name is `nixos`. Set it to the empty string (`""`) to
|
||||
|
@ -9,18 +9,14 @@ may be overridden on a per-interface basis by
|
||||
IPv6 support globally by setting:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.enableIPv6 = false;
|
||||
}
|
||||
{ networking.enableIPv6 = false; }
|
||||
```
|
||||
|
||||
You can disable IPv6 on a single interface using a normal sysctl (in
|
||||
this example, we use interface `eth0`):
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true;
|
||||
}
|
||||
{ boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true; }
|
||||
```
|
||||
|
||||
As with IPv4 networking interfaces are automatically configured via
|
||||
@ -28,10 +24,12 @@ DHCPv6. You can configure an interface manually:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.interfaces.eth0.ipv6.addresses = [ {
|
||||
address = "fe00:aa:bb:cc::2";
|
||||
prefixLength = 64;
|
||||
} ];
|
||||
networking.interfaces.eth0.ipv6.addresses = [
|
||||
{
|
||||
address = "fe00:aa:bb:cc::2";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -24,17 +24,13 @@ the host. This enables apiserver, controllerManager, scheduler,
|
||||
addonManager, kube-proxy and etcd:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.kubernetes.roles = [ "master" ];
|
||||
}
|
||||
{ services.kubernetes.roles = [ "master" ]; }
|
||||
```
|
||||
|
||||
While this will enable the kubelet and kube-proxy only:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.kubernetes.roles = [ "node" ];
|
||||
}
|
||||
{ services.kubernetes.roles = [ "node" ]; }
|
||||
```
|
||||
|
||||
Assigning both the master and node roles is usable if you want a single
|
||||
@ -42,7 +38,10 @@ node Kubernetes cluster for dev or testing purposes:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.kubernetes.roles = [ "master" "node" ];
|
||||
services.kubernetes.roles = [
|
||||
"master"
|
||||
"node"
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -5,9 +5,7 @@ option `boot.kernelPackages`. For instance, this selects the Linux 3.10
|
||||
kernel:
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_10;
|
||||
}
|
||||
{ boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_10; }
|
||||
```
|
||||
|
||||
Note that this not only replaces the kernel, but also packages that are
|
||||
@ -43,13 +41,15 @@ instance, to enable support for the kernel debugger KGDB:
|
||||
|
||||
```nix
|
||||
{
|
||||
nixpkgs.config.packageOverrides = pkgs: pkgs.lib.recursiveUpdate pkgs {
|
||||
linuxKernel.kernels.linux_5_10 = pkgs.linuxKernel.kernels.linux_5_10.override {
|
||||
extraConfig = ''
|
||||
KGDB y
|
||||
'';
|
||||
nixpkgs.config.packageOverrides =
|
||||
pkgs:
|
||||
pkgs.lib.recursiveUpdate pkgs {
|
||||
linuxKernel.kernels.linux_5_10 = pkgs.linuxKernel.kernels.linux_5_10.override {
|
||||
extraConfig = ''
|
||||
KGDB y
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
@ -64,7 +64,11 @@ by `udev`. You can force a module to be loaded via
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
|
||||
boot.kernelModules = [
|
||||
"fuse"
|
||||
"kvm-intel"
|
||||
"coretemp"
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
@ -72,9 +76,7 @@ If the module is required early during the boot (e.g. to mount the root
|
||||
file system), you can use [](#opt-boot.initrd.kernelModules):
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.initrd.kernelModules = [ "cifs" ];
|
||||
}
|
||||
{ boot.initrd.kernelModules = [ "cifs" ]; }
|
||||
```
|
||||
|
||||
This causes the specified modules and their dependencies to be added to
|
||||
@ -84,9 +86,7 @@ Kernel runtime parameters can be set through
|
||||
[](#opt-boot.kernel.sysctl), e.g.
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 120;
|
||||
}
|
||||
{ boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 120; }
|
||||
```
|
||||
|
||||
sets the kernel's TCP keepalive time to 120 seconds. To see the
|
||||
@ -99,9 +99,7 @@ Please refer to the Nixpkgs manual for the various ways of [building a custom ke
|
||||
To use your custom kernel package in your NixOS configuration, set
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel;
|
||||
}
|
||||
{ boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel; }
|
||||
```
|
||||
|
||||
## Rust {#sec-linux-rust}
|
||||
|
@ -39,9 +39,7 @@ Should grub be used as bootloader, and `/boot` is located on an
|
||||
encrypted partition, it is necessary to add the following grub option:
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.loader.grub.enableCryptodisk = true;
|
||||
}
|
||||
{ boot.loader.grub.enableCryptodisk = true; }
|
||||
```
|
||||
|
||||
## FIDO2 {#sec-luks-file-systems-fido2}
|
||||
@ -74,7 +72,8 @@ key, add the following to `configuration.nix`:
|
||||
```nix
|
||||
{
|
||||
boot.initrd.luks.fido2Support = true;
|
||||
boot.initrd.luks.devices."/dev/sda2".fido2.credential = "f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7";
|
||||
boot.initrd.luks.devices."/dev/sda2".fido2.credential =
|
||||
"f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7";
|
||||
}
|
||||
```
|
||||
|
||||
@ -83,9 +82,7 @@ you might want to enable it only when your device is PIN protected, such
|
||||
as [Trezor](https://trezor.io/).
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess = true;
|
||||
}
|
||||
{ boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess = true; }
|
||||
```
|
||||
|
||||
### systemd Stage 1 {#sec-luks-file-systems-fido2-systemd}
|
||||
|
@ -33,7 +33,7 @@ To enable Mattermost using Postgres, use a config like this:
|
||||
|
||||
# For example, to disable auto-installation of prepackaged plugins.
|
||||
settings.PluginSettings.AutomaticPrepackagedPlugins = false;
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
@ -71,11 +71,9 @@ Here is an example with a prebuilt plugin tarball:
|
||||
{
|
||||
services.mattermost = {
|
||||
plugins = with pkgs; [
|
||||
/*
|
||||
* todo
|
||||
* 0.7.1
|
||||
* https://github.com/mattermost/mattermost-plugin-todo/releases/tag/v0.7.1
|
||||
*/
|
||||
# todo
|
||||
# 0.7.1
|
||||
# https://github.com/mattermost/mattermost-plugin-todo/releases/tag/v0.7.1
|
||||
(fetchurl {
|
||||
# Note: Don't unpack the tarball; the NixOS module will repack it for you.
|
||||
url = "https://github.com/mattermost-community/mattermost-plugin-todo/releases/download/v0.7.1/com.mattermost.plugin-todo-0.7.1.tar.gz";
|
||||
|
@ -13,7 +13,11 @@ including them from `configuration.nix`, e.g.:
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ imports = [ ./vpn.nix ./kde.nix ];
|
||||
{
|
||||
imports = [
|
||||
./vpn.nix
|
||||
./kde.nix
|
||||
];
|
||||
services.httpd.enable = true;
|
||||
environment.systemPackages = [ pkgs.emacs ];
|
||||
# ...
|
||||
@ -26,7 +30,8 @@ Here, we include two modules from the same directory, `vpn.nix` and
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ services.xserver.enable = true;
|
||||
{
|
||||
services.xserver.enable = true;
|
||||
services.displayManager.sddm.enable = true;
|
||||
services.xserver.desktopManager.plasma5.enable = true;
|
||||
environment.systemPackages = [ pkgs.vim ];
|
||||
@ -42,9 +47,7 @@ merged last, so for list-type options, it will appear at the end of the
|
||||
merged list. If you want it to appear first, you can use `mkBefore`:
|
||||
|
||||
```nix
|
||||
{
|
||||
boot.kernelModules = mkBefore [ "kvm-intel" ];
|
||||
}
|
||||
{ boot.kernelModules = mkBefore [ "kvm-intel" ]; }
|
||||
```
|
||||
|
||||
This causes the `kvm-intel` kernel module to be loaded before any other
|
||||
@ -62,9 +65,7 @@ When that happens, it's possible to force one definition take precedence
|
||||
over the others:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.httpd.adminAddr = pkgs.lib.mkForce "bob@example.org";
|
||||
}
|
||||
{ services.httpd.adminAddr = pkgs.lib.mkForce "bob@example.org"; }
|
||||
```
|
||||
|
||||
When using multiple modules, you may need to access configuration values
|
||||
@ -84,9 +85,11 @@ For example, here is a module that adds some packages to
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ environment.systemPackages =
|
||||
{
|
||||
environment.systemPackages =
|
||||
if config.services.xserver.enable then
|
||||
[ pkgs.firefox
|
||||
[
|
||||
pkgs.firefox
|
||||
pkgs.thunderbird
|
||||
]
|
||||
else
|
||||
@ -126,12 +129,14 @@ have the same effect as importing a file which sets those options.
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
let netConfig = hostName: {
|
||||
networking.hostName = hostName;
|
||||
networking.useDHCP = false;
|
||||
};
|
||||
let
|
||||
netConfig = hostName: {
|
||||
networking.hostName = hostName;
|
||||
networking.useDHCP = false;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{ imports = [ (netConfig "nixos.localdomain") ]; }
|
||||
{
|
||||
imports = [ (netConfig "nixos.localdomain") ];
|
||||
}
|
||||
```
|
||||
|
@ -4,9 +4,7 @@ To facilitate network configuration, some desktop environments use
|
||||
NetworkManager. You can enable NetworkManager by setting:
|
||||
|
||||
```nix
|
||||
{
|
||||
networking.networkmanager.enable = true;
|
||||
}
|
||||
{ networking.networkmanager.enable = true; }
|
||||
```
|
||||
|
||||
some desktop managers (e.g., GNOME) enable NetworkManager automatically
|
||||
@ -16,9 +14,7 @@ All users that should have permission to change network settings must
|
||||
belong to the `networkmanager` group:
|
||||
|
||||
```nix
|
||||
{
|
||||
users.users.alice.extraGroups = [ "networkmanager" ];
|
||||
}
|
||||
{ users.users.alice.extraGroups = [ "networkmanager" ]; }
|
||||
```
|
||||
|
||||
NetworkManager is controlled using either `nmcli` or `nmtui`
|
||||
@ -38,7 +34,9 @@ NetworkManager to ignore those interfaces like:
|
||||
```nix
|
||||
{
|
||||
networking.networkmanager.unmanaged = [
|
||||
"*" "except:type:wwan" "except:type:gsm"
|
||||
"*"
|
||||
"except:type:wwan"
|
||||
"except:type:gsm"
|
||||
];
|
||||
}
|
||||
```
|
||||
|
@ -8,11 +8,7 @@ is to say, expected usage is to add them to the imports list of your
|
||||
`/etc/configuration.nix` as such:
|
||||
|
||||
```nix
|
||||
{
|
||||
imports = [
|
||||
<nixpkgs/nixos/modules/profiles/profile-name.nix>
|
||||
];
|
||||
}
|
||||
{ imports = [ <nixpkgs/nixos/modules/profiles/profile-name.nix> ]; }
|
||||
```
|
||||
|
||||
Even if some of these profiles seem only useful in the context of
|
||||
|
@ -3,9 +3,7 @@
|
||||
Secure shell (SSH) access to your machine can be enabled by setting:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
}
|
||||
{ services.openssh.enable = true; }
|
||||
```
|
||||
|
||||
By default, root logins using a password are disallowed. They can be
|
||||
@ -17,7 +15,6 @@ as follows:
|
||||
|
||||
```nix
|
||||
{
|
||||
users.users.alice.openssh.authorizedKeys.keys =
|
||||
[ "ssh-ed25519 AAAAB3NzaC1kc3MAAACBAPIkGWVEt4..." ];
|
||||
users.users.alice.openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAB3NzaC1kc3MAAACBAPIkGWVEt4..." ];
|
||||
}
|
||||
```
|
||||
|
@ -41,37 +41,38 @@ Here's a typical setup:
|
||||
fileSystems."/mnt/my-dir" = {
|
||||
device = "my-user@example.com:/my-dir/";
|
||||
fsType = "sshfs";
|
||||
options =
|
||||
[ # Filesystem options
|
||||
"allow_other" # for non-root access
|
||||
"_netdev" # this is a network fs
|
||||
"x-systemd.automount" # mount on demand
|
||||
options = [
|
||||
# Filesystem options
|
||||
"allow_other" # for non-root access
|
||||
"_netdev" # this is a network fs
|
||||
"x-systemd.automount" # mount on demand
|
||||
|
||||
# SSH options
|
||||
"reconnect" # handle connection drops
|
||||
"ServerAliveInterval=15" # keep connections alive
|
||||
"IdentityFile=/var/secrets/example-key"
|
||||
];
|
||||
# SSH options
|
||||
"reconnect" # handle connection drops
|
||||
"ServerAliveInterval=15" # keep connections alive
|
||||
"IdentityFile=/var/secrets/example-key"
|
||||
];
|
||||
};
|
||||
}
|
||||
```
|
||||
More options from `ssh_config(5)` can be given as well, for example you can change the default SSH port or specify a jump proxy:
|
||||
```nix
|
||||
{
|
||||
options =
|
||||
[ "ProxyJump=bastion@example.com"
|
||||
"Port=22"
|
||||
];
|
||||
options = [
|
||||
"ProxyJump=bastion@example.com"
|
||||
"Port=22"
|
||||
];
|
||||
}
|
||||
```
|
||||
It's also possible to change the `ssh` command used by SSHFS to connect to the server.
|
||||
For example:
|
||||
```nix
|
||||
{
|
||||
options =
|
||||
[ (builtins.replaceStrings [" "] ["\\040"]
|
||||
"ssh_command=${pkgs.openssh}/bin/ssh -v -L 8080:localhost:80")
|
||||
];
|
||||
options = [
|
||||
(builtins.replaceStrings [ " " ] [ "\\040" ]
|
||||
"ssh_command=${pkgs.openssh}/bin/ssh -v -L 8080:localhost:80"
|
||||
)
|
||||
];
|
||||
|
||||
}
|
||||
```
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user