Merge remote-tracking branch 'upstream/nixos-unstable' into nixos-unstable
This commit is contained in:
@@ -310,3 +310,6 @@ c283f32d296564fd649ef3ed268c1f1f7b199c49 # !autorebase nix-shell --run treefmt
|
||||
|
||||
# treewide: clean up 'meta = with' pattern
|
||||
567e8dfd8eddc5468e6380fc563ab8a27422ab1d
|
||||
|
||||
# nixfmt 1.2.0
|
||||
28096cc5e3d8334fbe1845925f000f8c8c5e0aac # !autorebase nix-shell --run treefmt
|
||||
|
||||
44
.github/actions/checkout/action.yml
vendored
44
.github/actions/checkout/action.yml
vendored
@@ -20,6 +20,7 @@ runs:
|
||||
PIN_BUMP_SHA: ${{ inputs.untrusted-pin-bump }}
|
||||
with:
|
||||
script: |
|
||||
const { rm, writeFile } = require('node:fs/promises')
|
||||
const { spawn } = require('node:child_process')
|
||||
const { join } = require('node:path')
|
||||
|
||||
@@ -55,10 +56,19 @@ runs:
|
||||
return pinned.pins.nixpkgs.revision
|
||||
}
|
||||
|
||||
const pin_bump_sha = process.env.PIN_BUMP_SHA
|
||||
// Getting the pin-bump diff via the API avoids issues with `git fetch`
|
||||
// thin-packs not having enough base objects to be applied locally.
|
||||
// Returns a unified diff suitable for `git apply`.
|
||||
async function getPinBumpDiff(ref) {
|
||||
const { data } = await github.rest.repos.getCommit({
|
||||
mediaType: { format: 'diff' },
|
||||
...context.repo,
|
||||
ref,
|
||||
})
|
||||
return data
|
||||
}
|
||||
|
||||
// When dealing with a pin bump commit, we need `--depth=2` to view & apply its diff
|
||||
const depth = pin_bump_sha ? 2 : 1
|
||||
const pin_bump_sha = process.env.PIN_BUMP_SHA
|
||||
|
||||
const commits = [
|
||||
{
|
||||
@@ -76,17 +86,14 @@ runs:
|
||||
{
|
||||
sha: await getPinnedSha(process.env.TARGET_SHA),
|
||||
path: 'trusted-pinned'
|
||||
},
|
||||
{
|
||||
sha: pin_bump_sha
|
||||
}
|
||||
].filter(({ sha }) => Boolean(sha))
|
||||
|
||||
console.log('Fetching the following commits:', commits)
|
||||
console.log('Checking out the following commits:', commits)
|
||||
|
||||
// Fetching all commits at once is much faster than doing multiple checkouts.
|
||||
// This would fail without --refetch, because the we had a partial clone before, but changed it above.
|
||||
await run('git', 'fetch', `--depth=${depth}`, '--refetch', 'origin', ...(commits.map(({ sha }) => sha)))
|
||||
await run('git', 'fetch', '--depth=1', '--refetch', 'origin', ...(commits.map(({ sha }) => sha)))
|
||||
|
||||
// Checking out onto tmpfs takes 1s and is faster by at least factor 10x.
|
||||
await run('mkdir', 'nixpkgs')
|
||||
@@ -101,20 +108,21 @@ runs:
|
||||
|
||||
// Create all worktrees in parallel.
|
||||
await Promise.all(
|
||||
commits
|
||||
.filter(({ path }) => Boolean(path))
|
||||
.map(async ({ sha, path }) => {
|
||||
await run('git', 'worktree', 'add', join('nixpkgs', path), sha, '--no-checkout')
|
||||
await run('git', '-C', join('nixpkgs', path), 'sparse-checkout', 'disable')
|
||||
await run('git', '-C', join('nixpkgs', path), 'checkout', '--progress')
|
||||
})
|
||||
commits.map(async ({ sha, path }) => {
|
||||
await run('git', 'worktree', 'add', join('nixpkgs', path), sha, '--no-checkout')
|
||||
await run('git', '-C', join('nixpkgs', path), 'sparse-checkout', 'disable')
|
||||
await run('git', '-C', join('nixpkgs', path), 'checkout', '--progress')
|
||||
})
|
||||
)
|
||||
|
||||
// Apply pin bump to untrusted worktree
|
||||
if (pin_bump_sha) {
|
||||
console.log('Applying untrusted ci/pinned.json bump:', pin_bump_sha)
|
||||
console.log('Fetching ci/pinned.json bump commit:', pin_bump_sha)
|
||||
await writeFile('pin-bump.patch', await getPinBumpDiff(pin_bump_sha))
|
||||
|
||||
console.log('Applying untrusted ci/pinned.json bump to ./nixpkgs/untrusted')
|
||||
try {
|
||||
await run('git', '-C', join('nixpkgs', 'untrusted'), 'cherry-pick', '--no-commit', pin_bump_sha)
|
||||
await run('git', '-C', join('nixpkgs', 'untrusted'), 'apply', '--3way', join('..', '..', 'pin-bump.patch'))
|
||||
} catch {
|
||||
core.setFailed([
|
||||
`Failed to apply ci/pinned.json bump commit ${pin_bump_sha}.`,
|
||||
@@ -122,5 +130,7 @@ runs:
|
||||
`Please rebase the PR or ensure the pin bump is standalone.`
|
||||
].join(' '))
|
||||
return
|
||||
} finally {
|
||||
await rm('pin-bump.patch')
|
||||
}
|
||||
}
|
||||
|
||||
2
.github/labeler-no-sync.yml
vendored
2
.github/labeler-no-sync.yml
vendored
@@ -26,7 +26,7 @@
|
||||
- all:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- .github/actions/*
|
||||
- .github/actions/**/*
|
||||
- .github/workflows/*
|
||||
- .github/labeler*.yml
|
||||
- ci/**/*.*
|
||||
|
||||
@@ -434,7 +434,7 @@ pkgs/by-name/fo/forgejo/ @adamcstephens @bendlas @emilylange
|
||||
|
||||
# Node.js
|
||||
/pkgs/build-support/node/build-npm-package @winterqt
|
||||
/pkgs/build-support/node/fetch-npm-deps @winterqt
|
||||
/pkgs/build-support/node/prefetch-npm-deps @winterqt
|
||||
/doc/languages-frameworks/javascript.section.md @winterqt
|
||||
/pkgs/development/tools/pnpm @Scrumplex @gepbird
|
||||
/pkgs/build-support/node/fetch-pnpm-deps @Scrumplex @gepbird
|
||||
@@ -504,7 +504,7 @@ pkgs/development/interpreters/elixir/ @NixOS/beam
|
||||
pkgs/development/interpreters/lfe/ @NixOS/beam
|
||||
|
||||
# Authelia
|
||||
pkgs/servers/authelia/ @06kellyjac @dit7ya @nicomem
|
||||
pkgs/by-name/au/authelia/ @06kellyjac @dit7ya @nicomem
|
||||
|
||||
# OctoDNS
|
||||
pkgs/by-name/oc/octodns/ @anthonyroussel
|
||||
|
||||
@@ -87,22 +87,30 @@ let
|
||||
"pkgs/development/haskell-modules/configuration-hackage2nix/transitive-broken.yaml"
|
||||
];
|
||||
|
||||
programs.nixf-diagnose.enable = true;
|
||||
settings.formatter.nixf-diagnose = {
|
||||
# Ensure nixfmt cleans up after nixf-diagnose.
|
||||
priority = -1;
|
||||
options = [
|
||||
"--auto-fix"
|
||||
programs.nixf-diagnose = {
|
||||
enable = true;
|
||||
ignore = [
|
||||
# Rule names can currently be looked up here:
|
||||
# https://github.com/nix-community/nixd/blob/main/libnixf/src/Basic/diagnostic.py
|
||||
# TODO: Remove the following and fix things.
|
||||
"--ignore=sema-unused-def-lambda-noarg-formal"
|
||||
"--ignore=sema-unused-def-lambda-witharg-arg"
|
||||
"--ignore=sema-unused-def-lambda-witharg-formal"
|
||||
"--ignore=sema-unused-def-let"
|
||||
"sema-unused-def-lambda-noarg-formal"
|
||||
"sema-unused-def-lambda-witharg-arg"
|
||||
"sema-unused-def-lambda-witharg-formal"
|
||||
"sema-unused-def-let"
|
||||
# Keep this rule, because we have `lib.or`.
|
||||
"--ignore=or-identifier"
|
||||
"or-identifier"
|
||||
# TODO: remove after outstanding prelude diagnostics issues are fixed:
|
||||
# https://github.com/nix-community/nixd/issues/761
|
||||
# https://github.com/nix-community/nixd/issues/762
|
||||
"sema-primop-removed-prefix"
|
||||
"sema-primop-overridden"
|
||||
"sema-constant-overridden"
|
||||
"sema-primop-unknown"
|
||||
];
|
||||
};
|
||||
settings.formatter.nixf-diagnose = {
|
||||
# Ensure nixfmt cleans up after nixf-diagnose.
|
||||
priority = -1;
|
||||
excludes = [
|
||||
# Auto-generated; violates sema-extra-with
|
||||
# Can only sensibly be removed when --auto-fix supports multiple fixes at once:
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
},
|
||||
"branch": "nixpkgs-unstable",
|
||||
"submodules": false,
|
||||
"revision": "ee09932cedcef15aaf476f9343d1dea2cb77e261",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/ee09932cedcef15aaf476f9343d1dea2cb77e261.tar.gz",
|
||||
"hash": "1xz5pa6la2fyj5b1cfigmg3nmml11fyf9ah0rnr4zfgmnwimn2gn"
|
||||
"revision": "bde09022887110deb780067364a0818e89258968",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/bde09022887110deb780067364a0818e89258968.tar.gz",
|
||||
"hash": "13mi187zpa4rw680qbwp7pmykjia8cra3nwvjqmsjba3qhlzif5l"
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"type": "Git",
|
||||
@@ -22,9 +22,9 @@
|
||||
},
|
||||
"branch": "main",
|
||||
"submodules": false,
|
||||
"revision": "5b4ee75aeefd1e2d5a1cc43cf6ba65eba75e83e4",
|
||||
"url": "https://github.com/numtide/treefmt-nix/archive/5b4ee75aeefd1e2d5a1cc43cf6ba65eba75e83e4.tar.gz",
|
||||
"hash": "0cr6aj9bk7n3y09lwmfjr7xg1f069332xf4q99z3kj1c1mp0wl82"
|
||||
"revision": "e96d59dff5c0d7fddb9d113ba108f03c3ef99eca",
|
||||
"url": "https://github.com/numtide/treefmt-nix/archive/e96d59dff5c0d7fddb9d113ba108f03c3ef99eca.tar.gz",
|
||||
"hash": "02gqyxila3ghw8gifq3mns639x86jcq079kvfvjm42mibx7z5fzb"
|
||||
}
|
||||
},
|
||||
"version": 5
|
||||
|
||||
@@ -10,7 +10,7 @@ The hook runs in `installCheckPhase`, requiring `doInstallCheck` is enabled for
|
||||
lib,
|
||||
stdenv,
|
||||
udevCheckHook,
|
||||
# ...
|
||||
# ...
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
|
||||
@@ -9,7 +9,7 @@ You use it like this:
|
||||
lib,
|
||||
stdenv,
|
||||
versionCheckHook,
|
||||
# ...
|
||||
# ...
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
|
||||
@@ -112,7 +112,7 @@ For example, to propagate a dependency on SDL2 for lockfiles that select the Nim
|
||||
lib,
|
||||
# …
|
||||
SDL2,
|
||||
# …
|
||||
# …
|
||||
}:
|
||||
|
||||
{
|
||||
|
||||
@@ -72,6 +72,8 @@
|
||||
|
||||
- `forgejo` has been updated to major version 14. For more information, see the [release blog post](https://forgejo.org/2026-01-release-v14-0/) and [full release notes](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/release-notes-published/14.0.0.md)
|
||||
|
||||
- `lima` has been updated from `1.x` to `2.x`. This major update includes several breaking changes, such as `/tmp/lima` no longer being mounted by default.
|
||||
|
||||
- `n8n` has been updated to version 2. You can find the breaking changes here: https://docs.n8n.io/2-0-breaking-changes/.
|
||||
|
||||
- `gurk-rs` has been updated from `0.6.4` to `0.8.0`. Version `0.8.0` includes breaking changes. For more information read the [release notes for 0.8.0](https://github.com/boxdot/gurk-rs/releases/tag/v0.8.0).
|
||||
|
||||
@@ -105,6 +105,22 @@ let
|
||||
# network
|
||||
network = callLibs ./network;
|
||||
|
||||
inherit (builtins)
|
||||
getContext
|
||||
hasContext
|
||||
convertHash
|
||||
hashString
|
||||
hasFile
|
||||
parseDrvName
|
||||
placeholder
|
||||
fromJSON
|
||||
fromTOML
|
||||
toFile
|
||||
toJSON
|
||||
toString
|
||||
toXML
|
||||
tryEval
|
||||
;
|
||||
inherit (self.trivial)
|
||||
id
|
||||
const
|
||||
@@ -112,6 +128,8 @@ let
|
||||
concat
|
||||
"or"
|
||||
and
|
||||
mul
|
||||
div
|
||||
xor
|
||||
bitAnd
|
||||
bitOr
|
||||
@@ -163,6 +181,8 @@ let
|
||||
pathExists
|
||||
genericClosure
|
||||
readFile
|
||||
ceil
|
||||
floor
|
||||
;
|
||||
inherit (self.fixedPoints)
|
||||
fix
|
||||
@@ -295,6 +315,7 @@ let
|
||||
elemAt
|
||||
isList
|
||||
concatAttrValues
|
||||
replaceElemAt
|
||||
;
|
||||
inherit (self.strings)
|
||||
concatStrings
|
||||
@@ -326,7 +347,6 @@ let
|
||||
escape
|
||||
escapeShellArg
|
||||
escapeShellArgs
|
||||
isPath
|
||||
isStorePath
|
||||
isStringLike
|
||||
isValidPosixName
|
||||
@@ -419,6 +439,9 @@ let
|
||||
pathType
|
||||
pathIsDirectory
|
||||
pathIsRegularFile
|
||||
baseNameOf
|
||||
dirOf
|
||||
isPath
|
||||
packagesFromDirectoryRecursive
|
||||
;
|
||||
inherit (self.sources)
|
||||
@@ -433,6 +456,7 @@ let
|
||||
pathIsGitRepo
|
||||
revOrTag
|
||||
repoRevToName
|
||||
filterSource
|
||||
;
|
||||
inherit (self.modules)
|
||||
evalModules
|
||||
@@ -563,6 +587,7 @@ let
|
||||
imap
|
||||
;
|
||||
inherit (self.versions)
|
||||
compareVersions
|
||||
splitVersion
|
||||
;
|
||||
inherit (self.network.ipv6)
|
||||
|
||||
@@ -191,7 +191,7 @@ in
|
||||
if isStringLike path then
|
||||
throw ''lib.fileset.maybeMissing: Argument ("${toString path}") is a string-like value, but it should be a path instead.''
|
||||
else
|
||||
throw ''lib.fileset.maybeMissing: Argument is of type ${typeOf path}, but it should be a path instead.''
|
||||
throw "lib.fileset.maybeMissing: Argument is of type ${typeOf path}, but it should be a path instead."
|
||||
else if !pathExists path then
|
||||
_emptyWithoutBase
|
||||
else
|
||||
@@ -443,7 +443,7 @@ in
|
||||
lib.fileset.toSource: `root` (${toString root}) is a string-like value, but it should be a path instead.
|
||||
Paths in strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.''
|
||||
else
|
||||
throw ''lib.fileset.toSource: `root` is of type ${typeOf root}, but it should be a path instead.''
|
||||
throw "lib.fileset.toSource: `root` is of type ${typeOf root}, but it should be a path instead."
|
||||
# Currently all Nix paths have the same filesystem root, but this could change in the future.
|
||||
# See also ../path/README.md
|
||||
else if !fileset._internalIsEmptyWithoutBase && rootFilesystemRoot != filesetFilesystemRoot then
|
||||
@@ -453,7 +453,7 @@ in
|
||||
`fileset`: Filesystem root is "${toString filesetFilesystemRoot}"
|
||||
Different filesystem roots are not supported.''
|
||||
else if !pathExists root then
|
||||
throw ''lib.fileset.toSource: `root` (${toString root}) is a path that does not exist.''
|
||||
throw "lib.fileset.toSource: `root` (${toString root}) is a path that does not exist."
|
||||
else if pathType root != "directory" then
|
||||
throw ''
|
||||
lib.fileset.toSource: `root` (${toString root}) is a file, but it should be a directory instead. Potential solutions:
|
||||
@@ -619,7 +619,7 @@ in
|
||||
unions =
|
||||
filesets:
|
||||
if !isList filesets then
|
||||
throw ''lib.fileset.unions: Argument is of type ${typeOf filesets}, but it should be a list instead.''
|
||||
throw "lib.fileset.unions: Argument is of type ${typeOf filesets}, but it should be a list instead."
|
||||
else
|
||||
pipe filesets [
|
||||
# Annotate the elements with context, used by _coerceMany for better errors
|
||||
@@ -808,16 +808,16 @@ in
|
||||
fileFilter =
|
||||
predicate: path:
|
||||
if !isFunction predicate then
|
||||
throw ''lib.fileset.fileFilter: First argument is of type ${typeOf predicate}, but it should be a function instead.''
|
||||
throw "lib.fileset.fileFilter: First argument is of type ${typeOf predicate}, but it should be a function instead."
|
||||
else if !isPath path then
|
||||
if path._type or "" == "fileset" then
|
||||
throw ''
|
||||
lib.fileset.fileFilter: Second argument is a file set, but it should be a path instead.
|
||||
If you need to filter files in a file set, use `intersection fileset (fileFilter pred ./.)` instead.''
|
||||
else
|
||||
throw ''lib.fileset.fileFilter: Second argument is of type ${typeOf path}, but it should be a path instead.''
|
||||
throw "lib.fileset.fileFilter: Second argument is of type ${typeOf path}, but it should be a path instead."
|
||||
else if !pathExists path then
|
||||
throw ''lib.fileset.fileFilter: Second argument (${toString path}) is a path that does not exist.''
|
||||
throw "lib.fileset.fileFilter: Second argument (${toString path}) is a path that does not exist."
|
||||
else
|
||||
_fileFilter predicate path;
|
||||
|
||||
@@ -896,9 +896,9 @@ in
|
||||
lib.fileset.fromSource: The source origin of the argument is a string-like value ("${toString path}"), but it should be a path instead.
|
||||
Sources created from paths in strings cannot be turned into file sets, use `lib.sources` or derivations instead.''
|
||||
else
|
||||
throw ''lib.fileset.fromSource: The source origin of the argument is of type ${typeOf path}, but it should be a path instead.''
|
||||
throw "lib.fileset.fromSource: The source origin of the argument is of type ${typeOf path}, but it should be a path instead."
|
||||
else if !pathExists path then
|
||||
throw ''lib.fileset.fromSource: The source origin (${toString path}) of the argument is a path that does not exist.''
|
||||
throw "lib.fileset.fromSource: The source origin (${toString path}) of the argument is a path that does not exist."
|
||||
else if isFiltered then
|
||||
_fromSourceFilter path source.filter
|
||||
else
|
||||
|
||||
@@ -211,7 +211,7 @@ rec {
|
||||
${context} ("${toString value}") is a string-like value, but it should be a file set or a path instead.
|
||||
Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.''
|
||||
else
|
||||
error ''${context} is of type ${typeOf value}, but it should be a file set or a path instead.''
|
||||
error "${context} is of type ${typeOf value}, but it should be a file set or a path instead."
|
||||
else if !pathExists value then
|
||||
error ''
|
||||
${context} (${toString value}) is a path that does not exist.
|
||||
|
||||
@@ -25,6 +25,11 @@ let
|
||||
in
|
||||
|
||||
{
|
||||
inherit (builtins)
|
||||
baseNameOf
|
||||
dirOf
|
||||
isPath
|
||||
;
|
||||
|
||||
/**
|
||||
The type of a path. The path needs to exist and be accessible.
|
||||
|
||||
@@ -742,6 +742,18 @@ lib.mapAttrs mkLicense (
|
||||
fullName = "Historical Permission Notice and Disclaimer - sell xserver variant with MIT disclaimer";
|
||||
};
|
||||
|
||||
hpndSellVariantSafetyClause = {
|
||||
fullName = "HPND - sell variant with safety critical systems clause";
|
||||
url = "https://gitlab.freedesktop.org/xorg/driver/xf86-video-voodoo/-/blob/68a5b6d98ae34749cca889f4373b4043d00bfe6a/src/voodoo_dga.c#L12-33";
|
||||
# TODO: if the license gets accepted to spdx then
|
||||
# add spdxId
|
||||
# else
|
||||
# remove license
|
||||
# && replace reference with whatever this license is supposed to be then
|
||||
# https://github.com/spdx/license-list-XML/issues/2922
|
||||
# spdxId = "HPND-sell-variant-safety-clause";
|
||||
};
|
||||
|
||||
hpndDec = {
|
||||
fullName = "Historical Permission Notice and Disclaimer - DEC variant";
|
||||
spdxId = "HPND-DEC";
|
||||
|
||||
@@ -2019,4 +2019,41 @@ rec {
|
||||
:::
|
||||
*/
|
||||
concatAttrValues = set: concatLists (attrValues set);
|
||||
|
||||
/**
|
||||
Replaces a list's nth element with a new element
|
||||
|
||||
# Inputs
|
||||
|
||||
`list`
|
||||
: Input list
|
||||
|
||||
`idx`
|
||||
: index to replace
|
||||
|
||||
`newElem`
|
||||
: new element to replace with
|
||||
|
||||
# Type
|
||||
|
||||
```
|
||||
replaceElemAt :: [a] -> int - b -> [a]
|
||||
```
|
||||
|
||||
# Examples
|
||||
:::{.example}
|
||||
## `replaceElemAt` usage example
|
||||
|
||||
```nix
|
||||
lib.replaceElemAt` [1 2 3] 0 "a"
|
||||
=> ["a" 2 3]
|
||||
```
|
||||
|
||||
:::
|
||||
*/
|
||||
replaceElemAt =
|
||||
list: idx: newElem:
|
||||
assert lib.assertMsg (idx >= 0 && idx < length list)
|
||||
"'lists.replaceElemAt' called with index ${toString idx} on a list of size ${toString (length list)}";
|
||||
genList (i: if i == idx then newElem else elemAt list i) (length list);
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ in
|
||||
# The subpath string to append
|
||||
subpath:
|
||||
assert assertMsg (isPath path)
|
||||
''lib.path.append: The first argument is of type ${builtins.typeOf path}, but a path was expected'';
|
||||
"lib.path.append: The first argument is of type ${builtins.typeOf path}, but a path was expected";
|
||||
assert assertMsg (isValid subpath) ''
|
||||
lib.path.append: Second argument is not a valid subpath string:
|
||||
${subpathInvalidReason subpath}'';
|
||||
|
||||
@@ -529,4 +529,6 @@ in
|
||||
|
||||
trace
|
||||
;
|
||||
|
||||
inherit (builtins) filterSource;
|
||||
}
|
||||
|
||||
@@ -85,7 +85,6 @@ rec {
|
||||
BLK_DEV_DM m
|
||||
DM_CRYPT m
|
||||
MD y
|
||||
REISERFS_FS m
|
||||
BTRFS_FS m
|
||||
XFS_FS m
|
||||
JFS_FS m
|
||||
@@ -430,7 +429,6 @@ rec {
|
||||
BLK_DEV_DM m
|
||||
DM_CRYPT m
|
||||
MD y
|
||||
REISERFS_FS m
|
||||
EXT4_FS m
|
||||
USB_STORAGE_CYPRESS_ATACB m
|
||||
|
||||
@@ -475,7 +473,6 @@ rec {
|
||||
FRAMEBUFFER_CONSOLE y
|
||||
EXT2_FS y
|
||||
EXT3_FS y
|
||||
REISERFS_FS y
|
||||
MAGIC_SYSRQ y
|
||||
|
||||
# The kernel doesn't boot at all, with FTRACE
|
||||
|
||||
@@ -331,7 +331,7 @@ in
|
||||
testAttrs = {
|
||||
expectedError = {
|
||||
type = "ThrownError";
|
||||
msg = ''A definition for option `foo' is not of type `string or signed integer convertible to it.*'';
|
||||
msg = "A definition for option `foo' is not of type `string or signed integer convertible to it.*";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -867,7 +867,7 @@ runTests {
|
||||
|
||||
testEscapeNixIdentifierNoQuote = {
|
||||
expr = strings.escapeNixIdentifier "foo";
|
||||
expected = ''foo'';
|
||||
expected = "foo";
|
||||
};
|
||||
|
||||
testEscapeNixIdentifierNumber = {
|
||||
@@ -2634,7 +2634,7 @@ runTests {
|
||||
sections = {
|
||||
};
|
||||
};
|
||||
expected = '''';
|
||||
expected = "";
|
||||
};
|
||||
|
||||
testToINIWithGlobalSectionGlobalEmptyIsTheSameAsToINI =
|
||||
@@ -3002,12 +3002,12 @@ runTests {
|
||||
|
||||
testToLuaEmptyAttrSet = {
|
||||
expr = generators.toLua { } { };
|
||||
expected = ''{}'';
|
||||
expected = "{}";
|
||||
};
|
||||
|
||||
testToLuaEmptyList = {
|
||||
expr = generators.toLua { } [ ];
|
||||
expected = ''{}'';
|
||||
expected = "{}";
|
||||
};
|
||||
|
||||
testToLuaListOfVariousTypes = {
|
||||
@@ -3052,7 +3052,7 @@ runTests {
|
||||
41
|
||||
43
|
||||
];
|
||||
expected = ''{ 41, 43 }'';
|
||||
expected = "{ 41, 43 }";
|
||||
};
|
||||
|
||||
testToLuaEmptyBindings = {
|
||||
@@ -4909,4 +4909,17 @@ runTests {
|
||||
targetTarget = "prefix-tt";
|
||||
};
|
||||
};
|
||||
|
||||
testReplaceElemAt = {
|
||||
expr = lib.replaceElemAt [ 1 2 3 ] 1 "a";
|
||||
expected = [
|
||||
1
|
||||
"a"
|
||||
3
|
||||
];
|
||||
};
|
||||
|
||||
testReplaceElemAtOutOfRange = testingThrow (lib.replaceElemAt [ 1 2 3 ] 5 "a");
|
||||
|
||||
testReplaceElemAtNegative = testingThrow (lib.replaceElemAt [ 1 2 3 ] (-1) "a");
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ in
|
||||
isFloat
|
||||
add
|
||||
sub
|
||||
mul
|
||||
div
|
||||
lessThan
|
||||
seq
|
||||
deepSeq
|
||||
@@ -34,6 +36,8 @@ in
|
||||
bitAnd
|
||||
bitOr
|
||||
bitXor
|
||||
ceil
|
||||
floor
|
||||
;
|
||||
|
||||
## Simple (higher order) functions
|
||||
|
||||
@@ -1412,7 +1412,7 @@ rec {
|
||||
else if builtins.isBool v then
|
||||
boolToString v
|
||||
else
|
||||
''<${builtins.typeOf v}>'';
|
||||
"<${builtins.typeOf v}>";
|
||||
in
|
||||
mkOptionType rec {
|
||||
name = "enum";
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
rec {
|
||||
|
||||
inherit (builtins) compareVersions;
|
||||
|
||||
/**
|
||||
Break a version string into its component parts.
|
||||
|
||||
|
||||
@@ -4584,6 +4584,12 @@
|
||||
githubId = 52875777;
|
||||
name = "Channing He";
|
||||
};
|
||||
chansuke = {
|
||||
email = "chansuke0@gmail.com";
|
||||
github = "chansuke";
|
||||
githubId = 501052;
|
||||
name = "chansuke";
|
||||
};
|
||||
chaoflow = {
|
||||
email = "flo@chaoflow.net";
|
||||
github = "chaoflow";
|
||||
@@ -4924,6 +4930,11 @@
|
||||
githubId = 114656678;
|
||||
name = "Gabriel Hosquet";
|
||||
};
|
||||
cizordj = {
|
||||
github = "cizordj";
|
||||
githubId = 32869222;
|
||||
name = "Cézar Augusto";
|
||||
};
|
||||
cizra = {
|
||||
email = "todurov+nix@gmail.com";
|
||||
github = "cizra";
|
||||
|
||||
@@ -11,10 +11,53 @@ These are called "bootstrap files".
|
||||
|
||||
Bootstrap files should always be fetched from hydra and uploaded to `tarballs.nixos.org` to guarantee that all the binaries were built from the code committed into `nixpkgs` repository.
|
||||
|
||||
The uploads to `tarballs.nixos.org` are done by `@lovesegfault` today.
|
||||
The uploads to `tarballs.nixos.org` are done by `@NixOS/infra` team members who have S3 write access.
|
||||
|
||||
This document describes the procedure of updating bootstrap files in `nixpkgs`.
|
||||
|
||||
## How to upload bootstrap files (for infra team)
|
||||
|
||||
When a PR updates bootstrap files, the commit message contains the upload commands.
|
||||
Infra team members with S3 access can upload as follows:
|
||||
|
||||
1. Clone or navigate to the [nixos-infra](https://github.com/NixOS/nixos-infra) repository and enter the `terraform` directory:
|
||||
|
||||
```
|
||||
$ cd nixos-infra/terraform
|
||||
```
|
||||
|
||||
2. Authenticate with AWS SSO:
|
||||
|
||||
```
|
||||
$ aws sso login
|
||||
```
|
||||
|
||||
3. Realize the build output locally (fetch from hydra cache):
|
||||
|
||||
```
|
||||
$ nix-store --realize /nix/store/<hash>-stdenv-bootstrap-tools
|
||||
```
|
||||
|
||||
4. Upload to S3 with public-read ACL:
|
||||
|
||||
```
|
||||
$ aws s3 cp --recursive --acl public-read \
|
||||
/nix/store/<hash>-stdenv-bootstrap-tools/on-server/ \
|
||||
s3://nixpkgs-tarballs/stdenv/<target>/<nixpkgs-revision>/
|
||||
```
|
||||
|
||||
5. Verify the upload by downloading and checking hashes:
|
||||
|
||||
```
|
||||
$ aws s3 cp --recursive s3://nixpkgs-tarballs/stdenv/<target>/<nixpkgs-revision>/ ./
|
||||
$ sha256sum bootstrap-tools.tar.xz busybox
|
||||
$ sha256sum /nix/store/<hash>-stdenv-bootstrap-tools/on-server/*
|
||||
```
|
||||
|
||||
Compare these hashes with those shown in the PR's commit message.
|
||||
|
||||
The exact paths and hashes are provided in each bootstrap update commit message generated by `refresh-tarballs.bash`.
|
||||
|
||||
## How to request the bootstrap seed update
|
||||
|
||||
To get the tarballs updated let's use an example `i686-unknown-linux-gnu` target:
|
||||
@@ -34,7 +77,7 @@ To get the tarballs updated let's use an example `i686-unknown-linux-gnu` target
|
||||
To validate cross-targets `binfmt` `NixOS` helper can be useful.
|
||||
For `riscv64-unknown-linux-gnu` the `/etc/nixos/configuration.nix` entry would be `boot.binfmt.emulatedSystems = [ "riscv64-linux" ]`.
|
||||
|
||||
3. Propose the commit as a PR to update bootstrap tarballs, tag people who can help you test the updated architecture and once reviewed tag `@lovesegfault` to upload the tarballs.
|
||||
3. Propose the commit as a PR to update bootstrap tarballs, tag people who can help you test the updated architecture and once reviewed tag `@NixOS/infra-build` to upload the tarballs.
|
||||
|
||||
## How to add bootstrap files for a new target
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ in
|
||||
maintainers = mkOption {
|
||||
type = listOfMaintainers;
|
||||
default = [ ];
|
||||
example = lib.literalExpression ''[ lib.maintainers.alice lib.maintainers.bob ]'';
|
||||
example = lib.literalExpression "[ lib.maintainers.alice lib.maintainers.bob ]";
|
||||
description = ''
|
||||
List of maintainers of each module.
|
||||
This option should be defined at most once per module.
|
||||
|
||||
@@ -82,7 +82,7 @@ Example:
|
||||
{
|
||||
stdenv,
|
||||
nixosTests,
|
||||
# ...
|
||||
# ...
|
||||
}:
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "example";
|
||||
|
||||
@@ -275,11 +275,11 @@ Alongside many enhancements to NixOS modules and general system improvements, th
|
||||
and thus doesn't qualify as default.
|
||||
|
||||
- PowerDNS Recursor has been updated to version 5.1.2, which comes with a new YAML configuration format (`recursor.yml`)
|
||||
and deprecates the previous format (`recursor.conf`). Accordingly, the NixOS option `services.pdns-recursor.settings`
|
||||
has been renamed to [old-settings](#opt-services.pdns-recursor.old-settings) and will be provided for backward compatibility
|
||||
until the next NixOS release. Users are asked to migrate their settings to the new [yaml-settings](#opt-services.pdns-recursor.old-settings)
|
||||
and deprecates the previous format (`recursor.conf`). Accordingly, the NixOS option {option}`services.pdns-recursor.settings`
|
||||
has been renamed to `old-settings` and will be provided for backward compatibility
|
||||
until the next NixOS release. Users are asked to migrate their settings to the new `yaml-settings`.
|
||||
option following this [guide](https://doc.powerdns.com/recursor/appendices/yamlconversion.html).
|
||||
Note that options other than `services.pdns-recursor.settings` are unaffacted by this change.
|
||||
Note that options other than {option}`services.pdns-recursor.settings` are unaffacted by this change.
|
||||
|
||||
- The `virtualisation.hypervGuest.videoMode` option has been removed. Standard tooling can now be used to configure display modes for Hyper-V VMs.
|
||||
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
|
||||
- [reaction](https://reaction.ppom.me/), a daemon that scans program outputs for repeated patterns, and takes action. A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors. A modern alternative to fail2ban. Available as [services.reaction](#opt-services.reaction.enable).
|
||||
|
||||
- [Tailscale Serve](https://tailscale.com/kb/1552/tailscale-services), configure Tailscale Serve for exposing local services to your tailnet. Available as [services.tailscale.serve](#opt-services.tailscale.serve.enable).
|
||||
|
||||
- [qui](https://github.com/autobrr/qui), a modern alternative webUI for qBittorrent, with multi-instance support. Written in Go/React. Available as [services.qui](#opt-services.qui.enable).
|
||||
|
||||
- [LibreChat](https://www.librechat.ai/), open-source self-hostable ChatGPT clone with Agents and RAG APIs. Available as [services.librechat](#opt-services.librechat.enable).
|
||||
@@ -36,6 +38,10 @@
|
||||
|
||||
- [Dawarich](https://dawarich.app/), a self-hostable location history tracker. Available as [services.dawarich](#opt-services.dawarich.enable).
|
||||
|
||||
- [Howdy](https://github.com/boltgolt/howdy), a Windows Hello™ style facial authentication program for Linux.
|
||||
|
||||
- [linux-enable-ir-emitter](https://github.com/EmixamPP/linux-enable-ir-emitter), a tool used to set up IR cameras, used with Howdy.
|
||||
|
||||
- [udp-over-tcp](https://github.com/mullvad/udp-over-tcp), a tunnel for proxying UDP traffic over a TCP stream. Available as `services.udp-over-tcp`.
|
||||
|
||||
- [Komodo Periphery](https://github.com/moghtech/komodo), a multi-server Docker and Git deployment agent by Komodo. Available as [services.komodo-periphery](#opt-services.komodo-periphery.enable).
|
||||
@@ -50,6 +56,8 @@
|
||||
|
||||
- `services.tandoor-recipes` now uses a sub-directory for media files by default starting with `26.05`. Existing setups should move media files out of the data directory and adjust `services.tandoor-recipes.extraConfig.MEDIA_ROOT` accordingly. See [Migrating media files for pre 26.05 installations](#module-services-tandoor-recipes-migrating-media).
|
||||
|
||||
- The packages `iw` and `wirelesstools` (`iwconfig`, `iwlist`, etc.) are no longer installed implicitly if wireless networking has been enabled.
|
||||
|
||||
- `services.kubernetes.addons.dns.coredns` has been renamed to `services.kubernetes.addons.dns.corednsImage` and now expects a
|
||||
package instead of attrs. Now, by default, nixpkgs.coredns in conjunction with dockerTools.buildImage is used, instead
|
||||
of pulling the upstream container image from Docker Hub. If you want the old behavior, you can set:
|
||||
@@ -77,6 +85,11 @@ of pulling the upstream container image from Docker Hub. If you want the old beh
|
||||
|
||||
- Support for `reiserfs` in nixpkgs has been removed, following the removal in Linux 6.13.
|
||||
|
||||
- `services.tor` no longer bind mounts Unix sockets of onion services into its chroot
|
||||
because it was not reliable. Users should do it themselves using either `JoinsNamespaceOf=` and Unix sockets in `/tmp`
|
||||
or `BindPaths=` from a persistent parent directory of each Unix socket.
|
||||
See <https://github.com/NixOS/nixpkgs/issues/481673>.
|
||||
|
||||
- support for `ecryptfs` in nixpkgs has been removed.
|
||||
|
||||
- The `networking.wireless` module has been security hardened: the `wpa_supplicant` daemon now runs under an unprivileged user with restricted access to the system.
|
||||
@@ -97,6 +110,8 @@ of pulling the upstream container image from Docker Hub. If you want the old beh
|
||||
|
||||
- `pdns` has been updated to version [v5.0.x](https://doc.powerdns.com/authoritative/changelog/5.0.html), which introduces breaking changes. Check out the [Upgrade Notes](https://doc.powerdns.com/authoritative/upgrading.html#to-5-0-0) for details.
|
||||
|
||||
- In the PowerDNS Recursor module, following the deprecation period started with NixOS 25.05, the option {option}`services.pdns-recursor.old-settings` has been removed and {option}`services.pdns-recursor.yaml-settings` consequently renamed to [](#opt-services.pdns-recursor.settings).
|
||||
|
||||
- `services.angrr` now uses TOML for configuration. Define policies with `services.angrr.settings` (generate TOML file) or point to a file using `services.angrr.configFile`. The legacy options `services.angrr.period`, `services.angrr.ownedOnly`, and `services.angrr.removeRoot` have been removed. See `man 5 angrr` and the description of `services.angrr.settings` options for examples and details.
|
||||
|
||||
- `services.pingvin-share` has been removed as the `pingvin-share.backend` package was broken and the project was archived upstream.
|
||||
@@ -116,3 +131,41 @@ of pulling the upstream container image from Docker Hub. If you want the old beh
|
||||
- `services.openssh` now supports generating host SSH keys by setting `services.openssh.generateHostKeys = true` while leaving `services.openssh.enable` disabled. This is particularly useful for systems that have no need of an SSH daemon but want SSH host keys for other purposes such as using agenix or sops-nix.
|
||||
|
||||
- `services.slurm` now supports slurmrestd usage through the `services.slurm.rest` NixOS options.
|
||||
|
||||
- `glibc` has been updated to version 2.42.
|
||||
|
||||
This version no longer makes the stack executable when a shared library requires this. A symptom
|
||||
is an error like
|
||||
|
||||
> cannot enable executable stack as shared object requires: Invalid argument
|
||||
|
||||
This is usually a bug. Please consider reporting it to the software maintainers.
|
||||
|
||||
In a lot of cases, the library requires the execstack by mistake only. The following workarounds exist:
|
||||
|
||||
* When building the shared library in question from source, use the following linker flags to force turning off the
|
||||
executable flag:
|
||||
|
||||
```nix
|
||||
mkDerivation {
|
||||
# …
|
||||
|
||||
env.NIX_LDFLAGS = "-z,noexecstack";
|
||||
}
|
||||
```
|
||||
|
||||
* If the sources are not available, the execstack-flag can be cleared with `patchelf`:
|
||||
|
||||
```
|
||||
patchelf --clear-execstack binary-only.so
|
||||
```
|
||||
|
||||
* If the shared library to be loaded actually requires an executable stack and it isn't turned
|
||||
on by the application loading it, you may force allowing that behavior by setting the
|
||||
following environment variable:
|
||||
|
||||
```
|
||||
GLIBC_TUNABLES=glibc.rtld.execstack=2
|
||||
```
|
||||
|
||||
**Do not set this globally!** This makes your setup inherently less secure.
|
||||
|
||||
@@ -7,7 +7,7 @@ let
|
||||
common = import ./common.nix;
|
||||
inherit (common) outputPath indexPath;
|
||||
devmode = pkgs.devmode.override {
|
||||
buildArgs = ''${toString ../../release.nix} -A manualHTML.${builtins.currentSystem}'';
|
||||
buildArgs = "${toString ../../release.nix} -A manualHTML.${builtins.currentSystem}";
|
||||
open = "/${outputPath}/${indexPath}";
|
||||
};
|
||||
nixos-render-docs-redirects = pkgs.writeShellScriptBin "redirects" ''${pkgs.lib.getExe pkgs.nixos-render-docs-redirects} --file '${toString ./redirects.json}' "$@"'';
|
||||
|
||||
@@ -347,7 +347,7 @@ let
|
||||
--no-root-passwd \
|
||||
--system ${config.system.build.toplevel} \
|
||||
--substituters "" \
|
||||
${lib.optionalString includeChannel ''--channel ${channelSources}''}
|
||||
${lib.optionalString includeChannel "--channel ${channelSources}"}
|
||||
|
||||
df -h
|
||||
|
||||
|
||||
@@ -330,7 +330,7 @@ let
|
||||
--no-root-passwd \
|
||||
--system ${config.system.build.toplevel} \
|
||||
--substituters "" \
|
||||
${lib.optionalString includeChannel ''--channel ${channelSources}''}
|
||||
${lib.optionalString includeChannel "--channel ${channelSources}"}
|
||||
|
||||
df -h
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ in
|
||||
'';
|
||||
type = types.bool;
|
||||
default = config.node.pkgs != null;
|
||||
defaultText = literalExpression ''node.pkgs != null'';
|
||||
defaultText = literalExpression "node.pkgs != null";
|
||||
};
|
||||
|
||||
node.specialArgs = mkOption {
|
||||
|
||||
@@ -410,7 +410,7 @@ let
|
||||
stringOrDefault (concatStringsSep " | " (
|
||||
imap1 (
|
||||
index: name:
|
||||
''${name} = ($ENV.secret${toString index}${optionalString (!secrets.${name}.quote) " | fromjson"})''
|
||||
"${name} = ($ENV.secret${toString index}${optionalString (!secrets.${name}.quote) " | fromjson"})"
|
||||
) (attrNames secrets)
|
||||
)) "."
|
||||
)
|
||||
|
||||
@@ -62,9 +62,9 @@ let
|
||||
"*" # password unset
|
||||
]);
|
||||
|
||||
overrideOrderMutable = ''{option}`initialHashedPassword` -> {option}`initialPassword` -> {option}`hashedPassword` -> {option}`password` -> {option}`hashedPasswordFile`'';
|
||||
overrideOrderMutable = "{option}`initialHashedPassword` -> {option}`initialPassword` -> {option}`hashedPassword` -> {option}`password` -> {option}`hashedPasswordFile`";
|
||||
|
||||
overrideOrderImmutable = ''{option}`initialHashedPassword` -> {option}`hashedPassword` -> {option}`initialPassword` -> {option}`password` -> {option}`hashedPasswordFile`'';
|
||||
overrideOrderImmutable = "{option}`initialHashedPassword` -> {option}`hashedPassword` -> {option}`initialPassword` -> {option}`password` -> {option}`hashedPasswordFile`";
|
||||
|
||||
overrideOrderText = isMutable: ''
|
||||
If the option {option}`users.mutableUsers` is
|
||||
|
||||
@@ -37,7 +37,7 @@ in
|
||||
|
||||
options.xdg.portal = {
|
||||
enable =
|
||||
mkEnableOption ''[xdg desktop integration](https://github.com/flatpak/xdg-desktop-portal)''
|
||||
mkEnableOption "[xdg desktop integration](https://github.com/flatpak/xdg-desktop-portal)"
|
||||
// {
|
||||
default = false;
|
||||
};
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
}:
|
||||
|
||||
{
|
||||
options.hardware.inputmodule.enable = lib.mkEnableOption ''Support for Framework input modules'';
|
||||
options.hardware.inputmodule.enable = lib.mkEnableOption "Support for Framework input modules";
|
||||
|
||||
config = lib.mkIf config.hardware.inputmodule.enable {
|
||||
environment.systemPackages = [ pkgs.inputmodule-control ];
|
||||
|
||||
@@ -228,7 +228,7 @@ in
|
||||
cfg.hostPlatform # make identical, so that `==` equality works; see https://github.com/NixOS/nixpkgs/issues/278001
|
||||
else
|
||||
elaborated;
|
||||
defaultText = lib.literalExpression ''config.nixpkgs.hostPlatform'';
|
||||
defaultText = lib.literalExpression "config.nixpkgs.hostPlatform";
|
||||
description = ''
|
||||
Specifies the platform on which NixOS should be built.
|
||||
By default, NixOS is built on the system where it runs, but you can
|
||||
@@ -252,7 +252,7 @@ in
|
||||
# Make sure that the final value has all fields for sake of other modules
|
||||
# referring to this. TODO make `lib.systems` itself use the module system.
|
||||
apply = lib.systems.elaborate;
|
||||
defaultText = lib.literalExpression ''config.nixpkgs.system'';
|
||||
defaultText = lib.literalExpression "config.nixpkgs.system";
|
||||
description = ''
|
||||
Systems with a recently generated `hardware-configuration.nix`
|
||||
do not need to specify this option, unless cross-compiling, in which case
|
||||
|
||||
@@ -24,7 +24,7 @@ in
|
||||
nixpkgs = {
|
||||
pkgs = mkOption {
|
||||
type = lib.types.pkgs;
|
||||
description = ''The pkgs module argument.'';
|
||||
description = "The pkgs module argument.";
|
||||
};
|
||||
config = mkOption {
|
||||
internal = true;
|
||||
|
||||
@@ -28,7 +28,7 @@ let
|
||||
escapeIfNecessary = s: if needsEscaping s then s else ''"${lib.escape [ "$" "\"" "\\" "`" ] s}"'';
|
||||
attrsToText =
|
||||
attrs:
|
||||
concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}=${escapeIfNecessary (toString v)}'') attrs)
|
||||
concatStringsSep "\n" (mapAttrsToList (n: v: "${n}=${escapeIfNecessary (toString v)}") attrs)
|
||||
+ "\n";
|
||||
|
||||
osReleaseContents =
|
||||
|
||||
@@ -878,6 +878,7 @@
|
||||
./services/misc/languagetool.nix
|
||||
./services/misc/leaps.nix
|
||||
./services/misc/lifecycled.nix
|
||||
./services/misc/linux-enable-ir-emitter.nix
|
||||
./services/misc/litellm.nix
|
||||
./services/misc/llama-cpp.nix
|
||||
./services/misc/local-content-share.nix
|
||||
@@ -1391,6 +1392,7 @@
|
||||
./services/networking/syncthing.nix
|
||||
./services/networking/tailscale-auth.nix
|
||||
./services/networking/tailscale-derper.nix
|
||||
./services/networking/tailscale-serve.nix
|
||||
./services/networking/tailscale.nix
|
||||
./services/networking/tayga.nix
|
||||
./services/networking/tcpcrypt.nix
|
||||
@@ -1488,6 +1490,7 @@
|
||||
./services/security/hockeypuck.nix
|
||||
./services/security/hologram-agent.nix
|
||||
./services/security/hologram-server.nix
|
||||
./services/security/howdy
|
||||
./services/security/infnoise.nix
|
||||
./services/security/intune.nix
|
||||
./services/security/jitterentropy-rngd.nix
|
||||
@@ -1928,7 +1931,6 @@
|
||||
./tasks/filesystems/nfs.nix
|
||||
./tasks/filesystems/ntfs.nix
|
||||
./tasks/filesystems/overlayfs.nix
|
||||
./tasks/filesystems/reiserfs.nix
|
||||
./tasks/filesystems/squashfs.nix
|
||||
./tasks/filesystems/sshfs.nix
|
||||
./tasks/filesystems/unionfs-fuse.nix
|
||||
|
||||
@@ -27,15 +27,15 @@ let
|
||||
chromium:
|
||||
concatStringsSep " " [
|
||||
''env XDG_CONFIG_HOME="$PREV_CONFIG_HOME"''
|
||||
''${chromium}/bin/chromium''
|
||||
''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive''
|
||||
"${chromium}/bin/chromium"
|
||||
"--user-data-dir=\${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive"
|
||||
''--proxy-server="socks5://$PROXY"''
|
||||
''--host-resolver-rules="MAP * ~NOTFOUND , EXCLUDE localhost"''
|
||||
''--no-first-run''
|
||||
''--new-window''
|
||||
''--incognito''
|
||||
''-no-default-browser-check''
|
||||
''http://cache.nixos.org/''
|
||||
"--no-first-run"
|
||||
"--new-window"
|
||||
"--incognito"
|
||||
"-no-default-browser-check"
|
||||
"http://cache.nixos.org/"
|
||||
];
|
||||
|
||||
desktopItem = pkgs.makeDesktopItem {
|
||||
|
||||
@@ -22,7 +22,7 @@ in
|
||||
{
|
||||
meta.maintainers = [ lib.maintainers.mic92 ];
|
||||
options.programs.nix-ld = {
|
||||
enable = lib.mkEnableOption ''nix-ld, Documentation: <https://github.com/nix-community/nix-ld>'';
|
||||
enable = lib.mkEnableOption "nix-ld, Documentation: <https://github.com/nix-community/nix-ld>";
|
||||
package = lib.mkPackageOption pkgs "nix-ld" { };
|
||||
libraries = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.package;
|
||||
|
||||
@@ -154,7 +154,7 @@ in
|
||||
''
|
||||
+ lib.concatMapStrings (p: "Include ${p}/etc/apparmor.d\n") cfg.packages;
|
||||
# For aa-logprof
|
||||
environment.etc."apparmor/apparmor.conf".text = '''';
|
||||
environment.etc."apparmor/apparmor.conf".text = "";
|
||||
# For aa-logprof
|
||||
environment.etc."apparmor/severity.db".source = pkgs.apparmor-utils + "/etc/apparmor/severity.db";
|
||||
environment.etc."apparmor/logprof.conf".source =
|
||||
|
||||
@@ -322,6 +322,28 @@ let
|
||||
'';
|
||||
};
|
||||
|
||||
howdy = {
|
||||
enable = lib.mkOption {
|
||||
default = config.security.pam.howdy.enable;
|
||||
defaultText = lib.literalExpression "config.security.pam.howdy.enable";
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Whether to enable the Howdy PAM module.
|
||||
|
||||
If set, users can be authenticated using Howdy, the Windows
|
||||
Hello™-style facial authentication service.
|
||||
'';
|
||||
};
|
||||
control = lib.mkOption {
|
||||
default = config.security.pam.howdy.control;
|
||||
defaultText = lib.literalExpression "config.security.pam.howdy.control";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
This option sets the PAM "control" used for this module.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
oathAuth = lib.mkOption {
|
||||
default = config.security.pam.oath.enable;
|
||||
defaultText = lib.literalExpression "config.security.pam.oath.enable";
|
||||
@@ -951,6 +973,12 @@ let
|
||||
control = "sufficient";
|
||||
modulePath = "${config.services.fprintd.package}/lib/security/pam_fprintd.so";
|
||||
}
|
||||
{
|
||||
name = "howdy";
|
||||
enable = cfg.howdy.enable;
|
||||
control = cfg.howdy.control;
|
||||
modulePath = "${config.services.howdy.package}/lib/security/pam_howdy.so";
|
||||
}
|
||||
]
|
||||
++
|
||||
# Modules in this block require having the password set in PAM_AUTHTOK.
|
||||
@@ -1797,6 +1825,28 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
security.pam.howdy = {
|
||||
enable = lib.mkOption {
|
||||
default = config.services.howdy.enable;
|
||||
defaultText = lib.literalExpression "config.services.howdy.enable";
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Whether to enable the Howdy PAM module.
|
||||
|
||||
If set, users can be authenticated using Howdy, the Windows
|
||||
Hello™-style facial authentication service.
|
||||
'';
|
||||
};
|
||||
control = lib.mkOption {
|
||||
default = config.services.howdy.control;
|
||||
defaultText = lib.literalExpression "config.services.howdy.control";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
This option sets the PAM "control" used for this module.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
security.pam.krb5 = {
|
||||
enable = lib.mkOption {
|
||||
default = config.security.krb5.enable;
|
||||
|
||||
@@ -78,6 +78,8 @@ in
|
||||
];
|
||||
systemd.services.polkit.stopIfChanged = false;
|
||||
|
||||
systemd.sockets."polkit-agent-helper".wantedBy = [ "sockets.target" ];
|
||||
|
||||
# The polkit daemon reads action/rule files
|
||||
environment.pathsToLink = [ "/share/polkit-1" ];
|
||||
|
||||
@@ -94,19 +96,11 @@ in
|
||||
|
||||
security.pam.services.polkit-1 = { };
|
||||
|
||||
security.wrappers = {
|
||||
pkexec = {
|
||||
setuid = true;
|
||||
owner = "root";
|
||||
group = "root";
|
||||
source = "${cfg.package.bin}/bin/pkexec";
|
||||
};
|
||||
polkit-agent-helper-1 = {
|
||||
setuid = true;
|
||||
owner = "root";
|
||||
group = "root";
|
||||
source = "${cfg.package.out}/lib/polkit-1/polkit-agent-helper-1";
|
||||
};
|
||||
security.wrappers.pkexec = {
|
||||
setuid = true;
|
||||
owner = "root";
|
||||
group = "root";
|
||||
source = "${cfg.package.bin}/bin/pkexec";
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
|
||||
@@ -290,12 +290,12 @@ in
|
||||
]
|
||||
(
|
||||
_:
|
||||
''${cfg.tctiEnvironment.interface}:${
|
||||
"${cfg.tctiEnvironment.interface}:${
|
||||
if cfg.tctiEnvironment.interface == "tabrmd" then
|
||||
cfg.tctiEnvironment.tabrmdConf
|
||||
else
|
||||
cfg.tctiEnvironment.deviceConf
|
||||
}''
|
||||
}"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -411,7 +411,7 @@ in
|
||||
lib.concatStringsSep "\n" (
|
||||
lib.imap0 (
|
||||
i: c:
|
||||
''${pkgs.replace-secret}/bin/replace-secret '{{password-${toString i}}}' '${c.passwordFile}' /run/mpd/mpd.conf''
|
||||
"${pkgs.replace-secret}/bin/replace-secret '{{password-${toString i}}}' '${c.passwordFile}' /run/mpd/mpd.conf"
|
||||
) cfg.credentials
|
||||
)
|
||||
);
|
||||
|
||||
@@ -64,7 +64,7 @@ let
|
||||
secretFile: placeholder: targetFile:
|
||||
lib.optionalString (
|
||||
secretFile != null
|
||||
) ''${pkgs.replace-secret}/bin/replace-secret '${placeholder}' '${secretFile}' '${targetFile}' '';
|
||||
) "${pkgs.replace-secret}/bin/replace-secret '${placeholder}' '${secretFile}' '${targetFile}' ";
|
||||
|
||||
preStart = pkgs.writeShellScript "mpdscribble-pre-start" ''
|
||||
cp -f "${cfgTemplate}" "${cfgFile}"
|
||||
|
||||
@@ -35,7 +35,7 @@ let
|
||||
let
|
||||
a = cfg.tcp.anonymousClients.allowedIpRanges;
|
||||
in
|
||||
lib.optional (a != [ ]) ''auth-ip-acl=${lib.concatStringsSep ";" a}'';
|
||||
lib.optional (a != [ ]) "auth-ip-acl=${lib.concatStringsSep ";" a}";
|
||||
port = lib.optional (!(isNull cfg.tcp.port)) "port=${toString cfg.tcp.port}";
|
||||
in
|
||||
pkgs.writeTextFile {
|
||||
|
||||
@@ -134,6 +134,10 @@ in
|
||||
systemd.sockets.restic-rest-server = {
|
||||
listenStreams = [ cfg.listenAddress ];
|
||||
wantedBy = [ "sockets.target" ];
|
||||
socketConfig = {
|
||||
ReusePort = true;
|
||||
FreeBind = true;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = lib.mkIf cfg.privateRepos [
|
||||
|
||||
@@ -255,7 +255,7 @@ in
|
||||
runCheck = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = builtins.length config.services.restic.backups.${name}.checkOpts > 0;
|
||||
defaultText = lib.literalExpression ''builtins.length config.services.backups.${name}.checkOpts > 0'';
|
||||
defaultText = lib.literalExpression "builtins.length config.services.backups.${name}.checkOpts > 0";
|
||||
description = "Whether to run the `check` command with the provided `checkOpts` options.";
|
||||
example = true;
|
||||
};
|
||||
|
||||
@@ -240,7 +240,7 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
recursive = lib.mkEnableOption ''the transfer of child datasets'';
|
||||
recursive = lib.mkEnableOption "the transfer of child datasets";
|
||||
|
||||
sshKey = lib.mkOption {
|
||||
type = with lib.types; nullOr (coercedTo path toString str);
|
||||
|
||||
@@ -246,8 +246,8 @@ in
|
||||
--datadir ${cfg.beacon.dataDir}/${cfg.network} \
|
||||
--execution-endpoint http://${cfg.beacon.execution.address}:${toString cfg.beacon.execution.port} \
|
||||
--execution-jwt ''${CREDENTIALS_DIRECTORY}/LIGHTHOUSE_JWT \
|
||||
${lib.optionalString cfg.beacon.http.enable ''--http --http-address ${cfg.beacon.http.address} --http-port ${toString cfg.beacon.http.port}''} \
|
||||
${lib.optionalString cfg.beacon.metrics.enable ''--metrics --metrics-address ${cfg.beacon.metrics.address} --metrics-port ${toString cfg.beacon.metrics.port}''} \
|
||||
${lib.optionalString cfg.beacon.http.enable "--http --http-address ${cfg.beacon.http.address} --http-port ${toString cfg.beacon.http.port}"} \
|
||||
${lib.optionalString cfg.beacon.metrics.enable "--metrics --metrics-address ${cfg.beacon.metrics.address} --metrics-port ${toString cfg.beacon.metrics.port}"} \
|
||||
${cfg.extraArgs} ${cfg.beacon.extraArgs}
|
||||
'';
|
||||
serviceConfig = {
|
||||
@@ -292,7 +292,7 @@ in
|
||||
--network ${cfg.network} \
|
||||
--beacon-nodes ${lib.concatStringsSep "," cfg.validator.beaconNodes} \
|
||||
--datadir ${cfg.validator.dataDir}/${cfg.network} \
|
||||
${lib.optionalString cfg.validator.metrics.enable ''--metrics --metrics-address ${cfg.validator.metrics.address} --metrics-port ${toString cfg.validator.metrics.port}''} \
|
||||
${lib.optionalString cfg.validator.metrics.enable "--metrics --metrics-address ${cfg.validator.metrics.address} --metrics-port ${toString cfg.validator.metrics.port}"} \
|
||||
${cfg.extraArgs} ${cfg.validator.extraArgs}
|
||||
'';
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ in
|
||||
enable = lib.mkEnableOption "flannel networking";
|
||||
|
||||
openFirewallPorts = lib.mkOption {
|
||||
description = ''Whether to open the Flannel UDP ports in the firewall on all interfaces.'';
|
||||
description = "Whether to open the Flannel UDP ports in the firewall on all interfaces.";
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
@@ -116,7 +116,7 @@ in
|
||||
type = lib.types.path;
|
||||
description = "Optionally pass master.cfg path. Other options in this configuration will be ignored.";
|
||||
default = defaultMasterCfg;
|
||||
defaultText = lib.literalMD ''generated configuration file'';
|
||||
defaultText = lib.literalMD "generated configuration file";
|
||||
example = "/etc/nixos/buildbot/master.cfg";
|
||||
};
|
||||
|
||||
|
||||
@@ -728,7 +728,7 @@ in
|
||||
mapAttrsToList
|
||||
(
|
||||
name: serviceConfig:
|
||||
''`services.gitlab-runner.services.${name}.protected` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration.''
|
||||
"`services.gitlab-runner.services.${name}.protected` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration."
|
||||
)
|
||||
(
|
||||
filterAttrs (
|
||||
@@ -740,7 +740,7 @@ in
|
||||
mapAttrsToList
|
||||
(
|
||||
name: serviceConfig:
|
||||
''`services.gitlab-runner.services.${name}.runUntagged` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration.''
|
||||
"`services.gitlab-runner.services.${name}.runUntagged` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration."
|
||||
)
|
||||
(
|
||||
filterAttrs (
|
||||
@@ -752,7 +752,7 @@ in
|
||||
mapAttrsToList
|
||||
(
|
||||
name: v:
|
||||
''`services.gitlab-runner.services.${name}.maximumTimeout` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration.''
|
||||
"`services.gitlab-runner.services.${name}.maximumTimeout` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration."
|
||||
)
|
||||
(
|
||||
filterAttrs (
|
||||
@@ -764,7 +764,7 @@ in
|
||||
mapAttrsToList
|
||||
(
|
||||
name: v:
|
||||
''`services.gitlab-runner.services.${name}.tagList` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration.''
|
||||
"`services.gitlab-runner.services.${name}.tagList` with runner authentication tokens has no effect and will be ignored. Please remove it from your configuration."
|
||||
)
|
||||
(
|
||||
filterAttrs (
|
||||
|
||||
@@ -191,7 +191,7 @@ in
|
||||
// {
|
||||
NIX_REMOTE = "daemon";
|
||||
AGENT_WORK_DIR = cfg.workDir;
|
||||
AGENT_STARTUP_ARGS = ''${lib.concatStringsSep " " cfg.startupOptions}'';
|
||||
AGENT_STARTUP_ARGS = "${lib.concatStringsSep " " cfg.startupOptions}";
|
||||
LOG_DIR = cfg.workDir;
|
||||
LOG_FILE = "${cfg.workDir}/go-agent-start.log";
|
||||
}
|
||||
|
||||
@@ -205,10 +205,10 @@ in
|
||||
'';
|
||||
|
||||
environment = {
|
||||
ERL_FLAGS = ''-couch_ini ${lib.concatStringsSep " " configFiles}'';
|
||||
ERL_FLAGS = "-couch_ini ${lib.concatStringsSep " " configFiles}";
|
||||
# 5. the vm.args file
|
||||
COUCHDB_ARGS_FILE = ''${cfg.argsFile}'';
|
||||
HOME = ''${cfg.databaseDir}'';
|
||||
COUCHDB_ARGS_FILE = "${cfg.argsFile}";
|
||||
HOME = "${cfg.databaseDir}";
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
|
||||
@@ -312,7 +312,7 @@ in
|
||||
|
||||
settings = mkOption {
|
||||
default = { };
|
||||
description = ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
|
||||
description = "configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.";
|
||||
type = format.type;
|
||||
};
|
||||
|
||||
|
||||
@@ -390,6 +390,10 @@ in
|
||||
systemd.packages = [
|
||||
pkgs.gnome-session
|
||||
pkgs.gnome-shell
|
||||
]
|
||||
++ removeExcluded [
|
||||
pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
|
||||
pkgs.xdg-user-dirs-gtk # Used to create the default bookmarks
|
||||
];
|
||||
|
||||
services.udev.packages = [
|
||||
|
||||
@@ -173,9 +173,9 @@ in
|
||||
# This is required for autorotation in Plasma 6
|
||||
qtsensors
|
||||
]
|
||||
++ lib.optionals config.services.flatpak.enable [
|
||||
++ lib.optionals (config.services.flatpak.enable || config.services.fwupd.enable) [
|
||||
# Since PackageKit Nix support is not there yet,
|
||||
# only install discover if flatpak is enabled.
|
||||
# only install discover if flatpak or fwupd is enabled.
|
||||
discover
|
||||
];
|
||||
in
|
||||
|
||||
@@ -272,7 +272,7 @@ in
|
||||
|
||||
filterFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = ''Filename for the include exclude filter.'';
|
||||
description = "Filename for the include exclude filter.";
|
||||
default = null;
|
||||
example = lib.literalExpression ''
|
||||
pkgs.writeText "filterFile" '''
|
||||
@@ -285,7 +285,7 @@ in
|
||||
|
||||
robotsFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = ''Provides /robots.txt for net crawlers.'';
|
||||
description = "Provides /robots.txt for net crawlers.";
|
||||
default = null;
|
||||
example = lib.literalExpression ''pkgs.writeText "robots.txt" "# my custom robots.txt ..."'';
|
||||
};
|
||||
@@ -956,7 +956,7 @@ in
|
||||
serviceConfig = {
|
||||
Restart = "on-abnormal";
|
||||
Nice = 5;
|
||||
ExecStart = ''${cfg.package}/bin/athens -config_file=${configFile}'';
|
||||
ExecStart = "${cfg.package}/bin/athens -config_file=${configFile}";
|
||||
|
||||
KillMode = "mixed";
|
||||
KillSignal = "SIGINT";
|
||||
|
||||
@@ -47,7 +47,7 @@ in
|
||||
{
|
||||
options = {
|
||||
services.freeciv = {
|
||||
enable = lib.mkEnableOption ''freeciv'';
|
||||
enable = lib.mkEnableOption "freeciv";
|
||||
settings = lib.mkOption {
|
||||
description = ''
|
||||
Parameters of freeciv-server.
|
||||
|
||||
@@ -17,9 +17,9 @@ let
|
||||
command,
|
||||
...
|
||||
}:
|
||||
''${
|
||||
"${
|
||||
lib.concatMapStringsSep "+" toString keys
|
||||
}:${lib.concatStringsSep "," events}:${lib.concatStringsSep "," attributes}:${command}''
|
||||
}:${lib.concatStringsSep "," events}:${lib.concatStringsSep "," attributes}:${command}"
|
||||
) cfg.bindings}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
@@ -23,7 +23,7 @@ in
|
||||
'';
|
||||
|
||||
overdrive = {
|
||||
enable = lib.mkEnableOption ''`amdgpu` overdrive mode for overclocking'';
|
||||
enable = lib.mkEnableOption "`amdgpu` overdrive mode for overclocking";
|
||||
|
||||
ppfeaturemask = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -39,7 +39,7 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
opencl.enable = lib.mkEnableOption ''OpenCL support using ROCM runtime library'';
|
||||
opencl.enable = lib.mkEnableOption "OpenCL support using ROCM runtime library";
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
@@ -65,10 +65,10 @@ in
|
||||
{
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [ "services" "keyd" "ids" ]
|
||||
''Use keyboards.<filename>.ids instead. If you don't need a multi-file configuration, just add keyboards.default before the ids. See https://github.com/NixOS/nixpkgs/pull/243271.''
|
||||
"Use keyboards.<filename>.ids instead. If you don't need a multi-file configuration, just add keyboards.default before the ids. See https://github.com/NixOS/nixpkgs/pull/243271."
|
||||
)
|
||||
(lib.mkRemovedOptionModule [ "services" "keyd" "settings" ]
|
||||
''Use keyboards.<filename>.settings instead. If you don't need a multi-file configuration, just add keyboards.default before the settings. See https://github.com/NixOS/nixpkgs/pull/243271.''
|
||||
"Use keyboards.<filename>.settings instead. If you don't need a multi-file configuration, just add keyboards.default before the settings. See https://github.com/NixOS/nixpkgs/pull/243271."
|
||||
)
|
||||
];
|
||||
|
||||
|
||||
@@ -198,7 +198,7 @@
|
||||
assertion =
|
||||
((builtins.length config.hardware.nvidia-container-toolkit.csv-files) > 0)
|
||||
-> config.hardware.nvidia-container-toolkit.discovery-mode == "csv";
|
||||
message = ''When CSV files are provided, `config.hardware.nvidia-container-toolkit.discovery-mode` has to be set to `csv`.'';
|
||||
message = "When CSV files are provided, `config.hardware.nvidia-container-toolkit.discovery-mode` has to be set to `csv`.";
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ let
|
||||
cmd,
|
||||
...
|
||||
}:
|
||||
''${lib.concatMapStringsSep "+" (x: "KEY_" + x) keys} ${
|
||||
"${lib.concatMapStringsSep "+" (x: "KEY_" + x) keys} ${
|
||||
toString
|
||||
{
|
||||
press = 1;
|
||||
@@ -26,7 +26,7 @@ let
|
||||
release = 0;
|
||||
}
|
||||
.${event}
|
||||
} ${cmd}''
|
||||
} ${cmd}"
|
||||
) cfg.bindings}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
@@ -134,7 +134,7 @@ let
|
||||
ssl_cert = <${cfg.sslServerCert}
|
||||
ssl_key = <${cfg.sslServerKey}
|
||||
${optionalString (cfg.sslCACert != null) ("ssl_ca = <" + cfg.sslCACert)}
|
||||
${optionalString cfg.enableDHE ''ssl_dh = <${config.security.dhparams.params.dovecot2.path}''}
|
||||
${optionalString cfg.enableDHE "ssl_dh = <${config.security.dhparams.params.dovecot2.path}"}
|
||||
disable_plaintext_auth = yes
|
||||
''
|
||||
)
|
||||
|
||||
@@ -176,7 +176,7 @@ in
|
||||
hostname = lib.mkOption {
|
||||
default = "localhost";
|
||||
type = with lib.types; uniq str;
|
||||
example = ''example.com'';
|
||||
example = "example.com";
|
||||
description = ''
|
||||
Hostname to use. It should be FQDN.
|
||||
'';
|
||||
@@ -185,7 +185,7 @@ in
|
||||
primaryDomain = lib.mkOption {
|
||||
default = "localhost";
|
||||
type = with lib.types; uniq str;
|
||||
example = ''mail.example.com'';
|
||||
example = "mail.example.com";
|
||||
description = ''
|
||||
Primary MX domain to use. It should be FQDN.
|
||||
'';
|
||||
|
||||
@@ -202,9 +202,9 @@ in
|
||||
if cfg.socket ? path then
|
||||
"--unix=${cfg.socket.path} --socketmode=${cfg.socket.mode}"
|
||||
else
|
||||
''--inet=${
|
||||
"--inet=${
|
||||
optionalString (cfg.socket.addr != null) (cfg.socket.addr + ":")
|
||||
}${toString cfg.socket.port}'';
|
||||
}${toString cfg.socket.port}";
|
||||
in
|
||||
{
|
||||
description = "Postfix Greylisting Service";
|
||||
|
||||
@@ -223,7 +223,7 @@ in
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = ''${pkgs.mjolnir}/bin/mjolnir --mjolnir-config ./config/default.yaml'';
|
||||
ExecStart = "${pkgs.mjolnir}/bin/mjolnir --mjolnir-config ./config/default.yaml";
|
||||
ExecStartPre = [ generateConfig ];
|
||||
WorkingDirectory = cfg.dataPath;
|
||||
StateDirectory = "mjolnir";
|
||||
|
||||
@@ -40,7 +40,7 @@ let
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = ''${pkgs.pantalaimon-headless}/bin/pantalaimon --config ${mkConfigFile name instanceConfig} --data-path ${instanceConfig.dataPath}'';
|
||||
ExecStart = "${pkgs.pantalaimon-headless}/bin/pantalaimon --config ${mkConfigFile name instanceConfig} --data-path ${instanceConfig.dataPath}";
|
||||
Restart = "on-failure";
|
||||
DynamicUser = true;
|
||||
NoNewPrivileges = true;
|
||||
|
||||
@@ -37,9 +37,9 @@ in
|
||||
args = synapseConfig.settings.database.args;
|
||||
in
|
||||
if synapseConfig.enable then
|
||||
''postgresql://${args.user}${lib.optionalString (args ? password) (":" + args.password)}@${
|
||||
"postgresql://${args.user}${lib.optionalString (args ? password) (":" + args.password)}@${
|
||||
lib.escapeURL (if (args ? host) then args.host else "/run/postgresql")
|
||||
}${lib.optionalString (args ? port) (":" + args.port)}/${args.database}''
|
||||
}${lib.optionalString (args ? port) (":" + args.port)}/${args.database}"
|
||||
else
|
||||
null;
|
||||
defaultText = lib.literalExpression ''
|
||||
|
||||
@@ -31,7 +31,7 @@ let
|
||||
# export passwords in environment variables in plaintext.
|
||||
${concatMapStringsSep "\n" (
|
||||
x:
|
||||
''export SYNC_USER${toString x.i}=${escapeShellArg x.user.username}:${escapeShellArg x.user.password}''
|
||||
"export SYNC_USER${toString x.i}=${escapeShellArg x.user.username}:${escapeShellArg x.user.password}"
|
||||
) usersWithIndexesNoFile}
|
||||
exec ${lib.getExe cfg.package}
|
||||
'';
|
||||
|
||||
@@ -166,15 +166,15 @@ in
|
||||
imports = [
|
||||
(lib.mkRenamedOptionModule
|
||||
[ "services" "apache-kafka" "brokerId" ]
|
||||
[ "services" "apache-kafka" "settings" ''broker.id'' ]
|
||||
[ "services" "apache-kafka" "settings" "broker.id" ]
|
||||
)
|
||||
(lib.mkRenamedOptionModule
|
||||
[ "services" "apache-kafka" "logDirs" ]
|
||||
[ "services" "apache-kafka" "settings" ''log.dirs'' ]
|
||||
[ "services" "apache-kafka" "settings" "log.dirs" ]
|
||||
)
|
||||
(lib.mkRenamedOptionModule
|
||||
[ "services" "apache-kafka" "zookeeper" ]
|
||||
[ "services" "apache-kafka" "settings" ''zookeeper.connect'' ]
|
||||
[ "services" "apache-kafka" "settings" "zookeeper.connect" ]
|
||||
)
|
||||
|
||||
(lib.mkRemovedOptionModule [
|
||||
|
||||
@@ -136,7 +136,7 @@ in
|
||||
'';
|
||||
};
|
||||
wakeup_cmd = mkOption {
|
||||
default = ''sh -c 'echo 0 > /sys/class/rtc/rtc0/wakealarm && echo {timestamp:.0f} > /sys/class/rtc/rtc0/wakealarm' '';
|
||||
default = "sh -c 'echo 0 > /sys/class/rtc/rtc0/wakealarm && echo {timestamp:.0f} > /sys/class/rtc/rtc0/wakealarm' ";
|
||||
type = with types; str;
|
||||
description = ''
|
||||
The command to execute for scheduling a wake up of the system. The given string is
|
||||
@@ -232,7 +232,7 @@ in
|
||||
after = [ "network.target" ];
|
||||
path = flatten (attrValues (filterAttrs (n: _: hasCheck n) dependenciesForChecks));
|
||||
serviceConfig = {
|
||||
ExecStart = ''${autosuspend}/bin/autosuspend -l ${autosuspend}/etc/autosuspend-logging.conf -c ${autosuspend-conf} daemon'';
|
||||
ExecStart = "${autosuspend}/bin/autosuspend -l ${autosuspend}/etc/autosuspend-logging.conf -c ${autosuspend-conf} daemon";
|
||||
};
|
||||
};
|
||||
|
||||
@@ -242,7 +242,7 @@ in
|
||||
wantedBy = [ "sleep.target" ];
|
||||
after = [ "sleep.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${autosuspend}/bin/autosuspend -l ${autosuspend}/etc/autosuspend-logging.conf -c ${autosuspend-conf} presuspend'';
|
||||
ExecStart = "${autosuspend}/bin/autosuspend -l ${autosuspend}/etc/autosuspend-logging.conf -c ${autosuspend-conf} presuspend";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -11,7 +11,7 @@ let
|
||||
in
|
||||
{
|
||||
options.services.evdevremapkeys = {
|
||||
enable = lib.mkEnableOption ''evdevremapkeys, a daemon to remap events on linux input devices'';
|
||||
enable = lib.mkEnableOption "evdevremapkeys, a daemon to remap events on linux input devices";
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = format.type;
|
||||
|
||||
@@ -11,19 +11,19 @@ in
|
||||
meta.maintainers = [ lib.maintainers.camillemndn ];
|
||||
|
||||
options.services.jellyseerr = {
|
||||
enable = lib.mkEnableOption ''Jellyseerr, a requests manager for Jellyfin'';
|
||||
enable = lib.mkEnableOption "Jellyseerr, a requests manager for Jellyfin";
|
||||
package = lib.mkPackageOption pkgs "jellyseerr" { };
|
||||
|
||||
openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''Open port in the firewall for the Jellyseerr web interface.'';
|
||||
description = "Open port in the firewall for the Jellyseerr web interface.";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 5055;
|
||||
description = ''The port which the Jellyseerr web UI should listen to.'';
|
||||
description = "The port which the Jellyseerr web UI should listen to.";
|
||||
};
|
||||
|
||||
configDir = lib.mkOption {
|
||||
|
||||
71
nixos/modules/services/misc/linux-enable-ir-emitter.nix
Normal file
71
nixos/modules/services/misc/linux-enable-ir-emitter.nix
Normal file
@@ -0,0 +1,71 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.linux-enable-ir-emitter;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.linux-enable-ir-emitter = {
|
||||
enable = lib.mkEnableOption "" // {
|
||||
description = ''
|
||||
Whether to enable IR emitter hardware. Designed to be used with the
|
||||
Howdy facial authentication. After enabling the service, configure
|
||||
the emitter with `sudo linux-enable-ir-emitter configure`.
|
||||
'';
|
||||
};
|
||||
|
||||
package = lib.mkPackageOption pkgs "linux-enable-ir-emitter" { } // {
|
||||
description = ''
|
||||
Package to use for the Linux Enable IR Emitter service.
|
||||
'';
|
||||
};
|
||||
|
||||
device = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "video2";
|
||||
description = ''
|
||||
IR camera device to depend on. For example, for `/dev/video2`
|
||||
the value would be `video2`. Find this with the command
|
||||
{command}`realpath /dev/v4l/by-path/<generated-driver-name>`.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
# https://github.com/EmixamPP/linux-enable-ir-emitter/blob/7e3a6527ef2efccabaeefc5a93c792628325a8db/sources/systemd/linux-enable-ir-emitter.service
|
||||
systemd.services.linux-enable-ir-emitter =
|
||||
let
|
||||
targets = [
|
||||
"suspend.target"
|
||||
"sleep.target"
|
||||
"hybrid-sleep.target"
|
||||
"hibernate.target"
|
||||
"suspend-then-hibernate.target"
|
||||
];
|
||||
in
|
||||
{
|
||||
description = "Enable the infrared emitter";
|
||||
# Added to match
|
||||
# https://github.com/EmixamPP/linux-enable-ir-emitter/blob/6.1.2/boot_service/systemd/linux-enable-ir-emitter.service
|
||||
# Prevents the program fail to detect the IR camera until a service
|
||||
# restart.
|
||||
preStart = ''
|
||||
${pkgs.kmod}/bin/modprobe uvcvideo
|
||||
sleep 1
|
||||
'';
|
||||
script = "${lib.getExe cfg.package} --verbose run";
|
||||
serviceConfig.StateDirectory = "linux-enable-ir-emitter";
|
||||
serviceConfig.LogsDirectory = "linux-enable-ir-emitter";
|
||||
|
||||
wantedBy = targets ++ [ "multi-user.target" ];
|
||||
after = targets ++ [ "dev-${cfg.device}.device" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -79,7 +79,7 @@ in
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.automatic -> config.nix.enable;
|
||||
message = ''nix.gc.automatic requires nix.enable'';
|
||||
message = "nix.gc.automatic requires nix.enable";
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ in
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.automatic -> config.nix.enable;
|
||||
message = ''nix.optimise.automatic requires nix.enable'';
|
||||
message = "nix.optimise.automatic requires nix.enable";
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
@@ -109,9 +109,9 @@ in
|
||||
description = "Synergy client";
|
||||
wantedBy = lib.optional cfgC.autoStart "graphical-session.target";
|
||||
path = [ pkgs.synergy ];
|
||||
serviceConfig.ExecStart = ''${pkgs.synergy}/bin/synergyc -f ${
|
||||
serviceConfig.ExecStart = "${pkgs.synergy}/bin/synergyc -f ${
|
||||
lib.optionalString (cfgC.screenName != "") "-n ${cfgC.screenName}"
|
||||
} ${cfgC.serverAddress}'';
|
||||
} ${cfgC.serverAddress}";
|
||||
serviceConfig.Restart = "on-failure";
|
||||
};
|
||||
})
|
||||
@@ -124,13 +124,13 @@ in
|
||||
description = "Synergy server";
|
||||
wantedBy = lib.optional cfgS.autoStart "graphical-session.target";
|
||||
path = [ pkgs.synergy ];
|
||||
serviceConfig.ExecStart = ''${pkgs.synergy}/bin/synergys -c ${cfgS.configFile} -f${
|
||||
serviceConfig.ExecStart = "${pkgs.synergy}/bin/synergys -c ${cfgS.configFile} -f${
|
||||
lib.optionalString (cfgS.address != "") " -a ${cfgS.address}"
|
||||
}${
|
||||
lib.optionalString (cfgS.screenName != "") " -n ${cfgS.screenName}"
|
||||
}${lib.optionalString cfgS.tls.enable " --enable-crypto"}${
|
||||
lib.optionalString (cfgS.tls.cert != null) " --tls-cert ${cfgS.tls.cert}"
|
||||
}'';
|
||||
}";
|
||||
serviceConfig.Restart = "on-failure";
|
||||
};
|
||||
})
|
||||
|
||||
@@ -48,7 +48,9 @@ in
|
||||
users = {
|
||||
groups.weechat = { };
|
||||
users.weechat = {
|
||||
createHome = true;
|
||||
group = "weechat";
|
||||
home = cfg.root;
|
||||
isSystemUser = true;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -93,7 +93,7 @@ in
|
||||
|
||||
options = {
|
||||
services.nagios = {
|
||||
enable = lib.mkEnableOption ''[Nagios](https://www.nagios.org/) to monitor your system or network'';
|
||||
enable = lib.mkEnableOption "[Nagios](https://www.nagios.org/) to monitor your system or network";
|
||||
|
||||
objectDefs = lib.mkOption {
|
||||
description = ''
|
||||
|
||||
@@ -63,7 +63,7 @@ in
|
||||
"${pkgs.prometheus-mysqld-exporter}/bin/mysqld_exporter"
|
||||
"--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
|
||||
"--web.telemetry-path=${cfg.telemetryPath}"
|
||||
(optionalString (cfg.configFile != null) ''--config.my-cnf=''${CREDENTIALS_DIRECTORY}/config'')
|
||||
(optionalString (cfg.configFile != null) "--config.my-cnf=\${CREDENTIALS_DIRECTORY}/config")
|
||||
(escapeShellArgs cfg.extraFlags)
|
||||
];
|
||||
RestrictAddressFamilies = [
|
||||
|
||||
@@ -30,12 +30,12 @@ let
|
||||
sedExpr = '':x /^>\(${localCellsRegex}\) / { n; :y /^>/! { n; by }; bx }; p'';
|
||||
globalCommand =
|
||||
if cfg.cellServDB != { } then
|
||||
''sed -n -e ${lib.escapeShellArg sedExpr} ${cfg.globalCellServDBFile}''
|
||||
"sed -n -e ${lib.escapeShellArg sedExpr} ${cfg.globalCellServDBFile}"
|
||||
else
|
||||
''cat ${cfg.globalCellServDBFile}'';
|
||||
"cat ${cfg.globalCellServDBFile}";
|
||||
in
|
||||
pkgs.runCommand "CellServDB" { preferLocalBuild = true; } ''
|
||||
${lib.optionalString (cfg.globalCellServDBFile != null) ''${globalCommand} > $out''}
|
||||
${lib.optionalString (cfg.globalCellServDBFile != null) "${globalCommand} > $out"}
|
||||
cat ${clientServDB} >> $out
|
||||
'';
|
||||
|
||||
|
||||
@@ -395,7 +395,7 @@ in
|
||||
};
|
||||
|
||||
environment = {
|
||||
TUNNEL_ORIGIN_CERT = lib.mkIf (certFile != null) ''%d/cert.pem'';
|
||||
TUNNEL_ORIGIN_CERT = lib.mkIf (certFile != null) "%d/cert.pem";
|
||||
TUNNEL_EDGE_IP_VERSION = tunnel.edgeIPVersion;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -110,12 +110,12 @@ let
|
||||
concatLines (
|
||||
forEach relevantSecrets (
|
||||
secret:
|
||||
''export ${secret}=$(< ${
|
||||
"export ${secret}=$(< ${
|
||||
if cfg.settingsSecret.${secret} == null then
|
||||
"secrets/${secret}"
|
||||
else
|
||||
"\"$CREDENTIALS_DIRECTORY/${secret}\""
|
||||
})''
|
||||
})"
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ in
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "gns3";
|
||||
description = ''Username used to access the GNS3 Server.'';
|
||||
description = "Username used to access the GNS3 Server.";
|
||||
};
|
||||
|
||||
passwordFile = lib.mkOption {
|
||||
@@ -68,7 +68,7 @@ in
|
||||
file = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = "/var/log/gns3/server.log";
|
||||
description = ''Path of the file GNS3 Server should log to.'';
|
||||
description = "Path of the file GNS3 Server should log to.";
|
||||
};
|
||||
|
||||
debug = lib.mkEnableOption "debug logging";
|
||||
@@ -96,17 +96,17 @@ in
|
||||
};
|
||||
|
||||
dynamips = {
|
||||
enable = lib.mkEnableOption ''Dynamips support'';
|
||||
enable = lib.mkEnableOption "Dynamips support";
|
||||
package = lib.mkPackageOption pkgs "dynamips" { };
|
||||
};
|
||||
|
||||
ubridge = {
|
||||
enable = lib.mkEnableOption ''uBridge support'';
|
||||
enable = lib.mkEnableOption "uBridge support";
|
||||
package = lib.mkPackageOption pkgs "ubridge" { };
|
||||
};
|
||||
|
||||
vpcs = {
|
||||
enable = lib.mkEnableOption ''VPCS support'';
|
||||
enable = lib.mkEnableOption "VPCS support";
|
||||
package = lib.mkPackageOption pkgs "vpcs" { };
|
||||
};
|
||||
};
|
||||
|
||||
@@ -56,7 +56,7 @@ in
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.go-autoconfig}/bin/go-autoconfig -config ${configFile}";
|
||||
Restart = "on-failure";
|
||||
WorkingDirectory = ''${pkgs.go-autoconfig}/'';
|
||||
WorkingDirectory = "${pkgs.go-autoconfig}/";
|
||||
DynamicUser = true;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1363,13 +1363,13 @@ in
|
||||
# see https://github.com/openwrt/openwrt/blob/539cb5389d9514c99ec1f87bd4465f77c7ed9b93/package/kernel/mac80211/files/lib/netifd/wireless/mac80211.sh#L158
|
||||
{
|
||||
assertion = length (filter (bss: bss == radio) (attrNames radioCfg.networks)) == 1;
|
||||
message = ''hostapd radio ${radio}: Exactly one network must be named like the radio, for reasons internal to hostapd.'';
|
||||
message = "hostapd radio ${radio}: Exactly one network must be named like the radio, for reasons internal to hostapd.";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
(radioCfg.wifi4.enable && builtins.elem "HT40-" radioCfg.wifi4.capabilities)
|
||||
-> radioCfg.channel != 0;
|
||||
message = ''hostapd radio ${radio}: using ACS (channel = 0) together with HT40- (wifi4.capabilities) is unsupported by hostapd'';
|
||||
message = "hostapd radio ${radio}: using ACS (channel = 0) together with HT40- (wifi4.capabilities) is unsupported by hostapd";
|
||||
}
|
||||
]
|
||||
# BSS warnings
|
||||
@@ -1391,42 +1391,42 @@ in
|
||||
}
|
||||
{
|
||||
assertion = (length (attrNames radioCfg.networks) > 1) -> (bssCfg.bssid != null);
|
||||
message = ''hostapd radio ${radio} bss ${bss}: bssid must be specified manually (for now) since this radio uses multiple BSS.'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: bssid must be specified manually (for now) since this radio uses multiple BSS.";
|
||||
}
|
||||
{
|
||||
assertion = countWpaPasswordDefinitions <= 1;
|
||||
message = ''hostapd radio ${radio} bss ${bss}: must use at most one WPA password option (wpaPassword, wpaPasswordFile, wpaPskFile)'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: must use at most one WPA password option (wpaPassword, wpaPasswordFile, wpaPskFile)";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
auth.wpaPassword != null
|
||||
-> (stringLength auth.wpaPassword >= 8 && stringLength auth.wpaPassword <= 63);
|
||||
message = ''hostapd radio ${radio} bss ${bss}: uses a wpaPassword of invalid length (must be in [8,63]).'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: uses a wpaPassword of invalid length (must be in [8,63]).";
|
||||
}
|
||||
{
|
||||
assertion = auth.saePasswords == [ ] || auth.saePasswordsFile == null;
|
||||
message = ''hostapd radio ${radio} bss ${bss}: must use only one SAE password option (saePasswords or saePasswordsFile)'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: must use only one SAE password option (saePasswords or saePasswordsFile)";
|
||||
}
|
||||
{
|
||||
assertion = auth.mode == "wpa3-sae" -> (auth.saePasswords != [ ] || auth.saePasswordsFile != null);
|
||||
message = ''hostapd radio ${radio} bss ${bss}: uses WPA3-SAE which requires defining a sae password option'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: uses WPA3-SAE which requires defining a sae password option";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
auth.mode == "wpa3-sae-transition"
|
||||
-> (auth.saePasswords != [ ] || auth.saePasswordsFile != null) && countWpaPasswordDefinitions == 1;
|
||||
message = ''hostapd radio ${radio} bss ${bss}: uses WPA3-SAE in transition mode requires defining both a wpa password option and a sae password option'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: uses WPA3-SAE in transition mode requires defining both a wpa password option and a sae password option";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
(auth.mode == "wpa2-sha1" || auth.mode == "wpa2-sha256") -> countWpaPasswordDefinitions == 1;
|
||||
message = ''hostapd radio ${radio} bss ${bss}: uses WPA2-PSK which requires defining a wpa password option'';
|
||||
message = "hostapd radio ${radio} bss ${bss}: uses WPA2-PSK which requires defining a wpa password option";
|
||||
}
|
||||
]
|
||||
++ optionals (auth.saePasswords != [ ]) (
|
||||
imap1 (i: entry: {
|
||||
assertion = (entry.password == null) != (entry.passwordFile == null);
|
||||
message = ''hostapd radio ${radio} bss ${bss} saePassword entry ${i}: must set exactly one of `password` or `passwordFile`'';
|
||||
message = "hostapd radio ${radio} bss ${bss} saePassword entry ${i}: must set exactly one of `password` or `passwordFile`";
|
||||
}) auth.saePasswords
|
||||
)
|
||||
) radioCfg.networks
|
||||
|
||||
@@ -221,7 +221,7 @@ in
|
||||
startAt = cfg.interval;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = ''${lib.getExe pkgs.inadyn} -f ${configFile} --cache-dir ''${CACHE_DIRECTORY} -1 --foreground -l ${cfg.logLevel}'';
|
||||
ExecStart = "${lib.getExe pkgs.inadyn} -f ${configFile} --cache-dir \${CACHE_DIRECTORY} -1 --foreground -l ${cfg.logLevel}";
|
||||
LoadCredential = "config:${configFile}";
|
||||
CacheDirectory = "inadyn";
|
||||
|
||||
|
||||
@@ -237,10 +237,12 @@ in
|
||||
type = lib.types.enum [
|
||||
"local"
|
||||
"redis"
|
||||
"postgres"
|
||||
];
|
||||
default = "local";
|
||||
description = ''
|
||||
Lock backend to use: 'local' (single instance), 'redis' (distributed).
|
||||
Lock backend to use: 'local' (single instance), 'redis'
|
||||
(distributed), or 'postgres' (distributed, requires PostgreSQL).
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
@@ -301,7 +301,7 @@ in
|
||||
ui.enable = mkOption {
|
||||
type = bool;
|
||||
default = nixosConfig.services.netbird.ui.enable;
|
||||
defaultText = literalExpression ''client.ui.enable'';
|
||||
defaultText = literalExpression "client.ui.enable";
|
||||
description = ''
|
||||
Controls presence of `netbird-ui` wrapper for this NetBird client.
|
||||
'';
|
||||
|
||||
@@ -148,7 +148,7 @@ let
|
||||
'';
|
||||
|
||||
maybeString = prefix: x: optionalString (x != null) ''${prefix} "${x}"'';
|
||||
maybeToString = prefix: x: optionalString (x != null) ''${prefix} ${toString x}'';
|
||||
maybeToString = prefix: x: optionalString (x != null) "${prefix} ${toString x}";
|
||||
forEach = pre: l: concatMapStrings (x: pre + x + "\n") l;
|
||||
|
||||
keyConfigFile = concatStrings (
|
||||
|
||||
@@ -554,7 +554,6 @@ in
|
||||
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
jackr
|
||||
sigmasquadron
|
||||
water-sucks
|
||||
];
|
||||
}
|
||||
|
||||
@@ -48,23 +48,6 @@ let
|
||||
}
|
||||
);
|
||||
|
||||
configFile =
|
||||
if cfg.old-settings != { } then
|
||||
# Convert recursor.conf to recursor.yml and merge it
|
||||
let
|
||||
conf = pkgs.writeText "recursor.conf" (
|
||||
concatStringsSep "\n" (mapAttrsToList (name: val: "${name}=${serialize val}") cfg.old-settings)
|
||||
);
|
||||
|
||||
yaml = settingsFormat.generate "recursor.yml" cfg.yaml-settings;
|
||||
in
|
||||
pkgs.runCommand "recursor-merged.yml" { } ''
|
||||
${pkgs.pdns-recursor}/bin/rec_control show-yaml --config ${conf} > override.yml
|
||||
${pkgs.yq-go}/bin/yq '. *= load("override.yml")' ${yaml} > $out
|
||||
''
|
||||
else
|
||||
settingsFormat.generate "recursor.yml" cfg.yaml-settings;
|
||||
|
||||
in
|
||||
{
|
||||
options.services.pdns-recursor = {
|
||||
@@ -196,30 +179,7 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
old-settings = mkOption {
|
||||
type = configType;
|
||||
default = { };
|
||||
example = literalExpression ''
|
||||
{
|
||||
loglevel = 8;
|
||||
log-common-errors = true;
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Older PowerDNS Recursor settings. Use this option to configure
|
||||
Recursor settings not exposed in a NixOS option or to bypass one.
|
||||
See the full documentation at
|
||||
<https://doc.powerdns.com/recursor/settings.html>
|
||||
for the available options.
|
||||
|
||||
::: {.warning}
|
||||
This option is provided for backward compatibility only
|
||||
and will be removed in the next release of NixOS.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
yaml-settings = mkOption {
|
||||
settings = mkOption {
|
||||
type = settingsFormat.type;
|
||||
default = { };
|
||||
example = literalExpression ''
|
||||
@@ -249,11 +209,12 @@ in
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.etc."/pdns-recursor/recursor.yml".source = configFile;
|
||||
environment.etc."/pdns-recursor/recursor.yml".source =
|
||||
settingsFormat.generate "recursor.yml" cfg.settings;
|
||||
|
||||
networking.resolvconf.useLocalResolver = lib.mkDefault true;
|
||||
|
||||
services.pdns-recursor.yaml-settings = {
|
||||
services.pdns-recursor.settings = {
|
||||
incoming = mkDefaultAttrs {
|
||||
listen = cfg.dns.address;
|
||||
port = cfg.dns.port;
|
||||
@@ -301,15 +262,6 @@ in
|
||||
|
||||
users.groups.pdns-recursor = { };
|
||||
|
||||
warnings = lib.optional (cfg.old-settings != { }) ''
|
||||
pdns-recursor has changed its configuration file format from pdns-recursor.conf
|
||||
(mapped to `services.pdns-recursor.old-settings`) to the newer pdns-recursor.yml
|
||||
(mapped to `services.pdns-recursor.yaml-settings`).
|
||||
|
||||
Support for the older format will be removed in a future version, so please migrate
|
||||
your settings over. See <https://doc.powerdns.com/recursor/yamlsettings.html>.
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
imports = [
|
||||
@@ -320,16 +272,32 @@ in
|
||||
] "To change extra Recursor settings use services.pdns-recursor.settings instead.")
|
||||
|
||||
(mkRenamedOptionModule
|
||||
[
|
||||
"services"
|
||||
"pdns-recursor"
|
||||
"yaml-settings"
|
||||
]
|
||||
[
|
||||
"services"
|
||||
"pdns-recursor"
|
||||
"settings"
|
||||
]
|
||||
)
|
||||
|
||||
(mkRemovedOptionModule
|
||||
[
|
||||
"services"
|
||||
"pdns-recursor"
|
||||
"old-settings"
|
||||
]
|
||||
''
|
||||
pdns-recursor has changed its configuration file format from pdns-recursor.conf
|
||||
(mapped to `services.pdns-recursor.old-settings`) to the newer pdns-recursor.yml
|
||||
(mapped to `services.pdns-recursor.settings`).
|
||||
|
||||
Support for the older format has been removed, please migrate your settings over.
|
||||
See <https://doc.powerdns.com/recursor/yamlsettings.html>.
|
||||
''
|
||||
)
|
||||
];
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user