treewide: fix typos

Changed files
+110 -110
lib
nixos
pkgs
applications
editors
emacs
elisp-packages
vscode
extensions
ms-vscode.cpptools
graphics
yacreader
build-support
rust
build-rust-crate
by-name
ad
adios2
hy
hyperhdr
il
ilmbase
im
imlib2
li
libcredis
mi
miktex
mx
mxnet
ty
typical
us
usb-reset
yt
ytmdesktop
ze
zepp-simulator
development
compilers
gcc
haskell-modules
interpreters
libraries
ffmpeg
openssl
vigra
python-modules
brotlicffi
datadog
dm-haiku
flashinfer
fmpy
itables
langchain
langchain-aws
langchain-azure-dynamic-sessions
langchain-chroma
langchain-community
langchain-groq
langchain-huggingface
langchain-mongodb
langchain-ollama
langchain-openai
langchain-tests
langchain-text-splitters
mpi-pytest
piano-transcription-inference
pip
twisted
yamllint
tools
build-managers
bazel
bazel_6
bazel_7
servers
home-assistant
stdenv
generic
test
texlive
top-level
+1 -1
lib/attrsets.nix
···
/**
Get the first of the `outputs` provided by the package, or the default.
-
This function is alligned with `_overrideFirst()` from the `multiple-outputs.sh` setup hook.
Like `getOutput`, the function is idempotent.
# Inputs
···
/**
Get the first of the `outputs` provided by the package, or the default.
+
This function is aligned with `_overrideFirst()` from the `multiple-outputs.sh` setup hook.
Like `getOutput`, the function is idempotent.
# Inputs
+1 -1
lib/fixed-points.nix
···
extensions = composeManyExtensions [ overlayA overlayB ];
-
# Caluculate the fixed point of all composed overlays.
fixedpoint = lib.fix (lib.extends extensions original );
in fixedpoint
···
extensions = composeManyExtensions [ overlayA overlayB ];
+
# Calculate the fixed point of all composed overlays.
fixedpoint = lib.fix (lib.extends extensions original );
in fixedpoint
+2 -2
lib/options.nix
···
```nix
myType = mkOptionType {
name = "myType";
-
merge = mergeDefaultOption; # <- This line is redundant. It is the default aready.
};
```
···
args@{
message,
# WARNING: the default merge function assumes that the definition is a valid (option) value. You MUST pass a merge function if the return value needs to be
-
# - type checked beyond what .check does (which should be very litte; only on the value head; not attribute values, etc)
# - if you want attribute values to be checked, or list items
# - if you want coercedTo-like behavior to work
merge ? loc: defs: (head defs).value,
···
```nix
myType = mkOptionType {
name = "myType";
+
merge = mergeDefaultOption; # <- This line is redundant. It is the default already.
};
```
···
args@{
message,
# WARNING: the default merge function assumes that the definition is a valid (option) value. You MUST pass a merge function if the return value needs to be
+
# - type checked beyond what .check does (which should be very little; only on the value head; not attribute values, etc)
# - if you want attribute values to be checked, or list items
# - if you want coercedTo-like behavior to work
merge ? loc: defs: (head defs).value,
+1 -1
lib/types.nix
···
if pos == null then "" else " at ${pos.file}:${toString pos.line}:${toString pos.column}";
# Internal functor to help for migrating functor.wrapped to functor.payload.elemType
-
# Note that individual attributes can be overriden if needed.
elemTypeFunctor =
name:
{ elemType, ... }@payload:
···
if pos == null then "" else " at ${pos.file}:${toString pos.line}:${toString pos.column}";
# Internal functor to help for migrating functor.wrapped to functor.payload.elemType
+
# Note that individual attributes can be overridden if needed.
elemTypeFunctor =
name:
{ elemType, ... }@payload:
+2 -2
nixos/lib/make-options-doc/default.nix
···
Documentation rendered as AsciiDoc. This is useful for e.g. man pages.
-
> Note: NixOS itself uses this ouput to to build the configuration.nix man page"
## optionsNix
···
let
# Evaluate a NixOS configuration
eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
-
# Overriden explicitly here, this would include all modules from NixOS otherwise.
# See: docs of eval-config.nix for more details
baseModules = [];
modules = [
···
Documentation rendered as AsciiDoc. This is useful for e.g. man pages.
+
> Note: NixOS itself uses this output to to build the configuration.nix man page"
## optionsNix
···
let
# Evaluate a NixOS configuration
eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
+
# Overridden explicitly here, this would include all modules from NixOS otherwise.
# See: docs of eval-config.nix for more details
baseModules = [];
modules = [
+1 -1
nixos/lib/testing/network.nix
···
virtualisation.test.nodeName = mkOption {
internal = true;
default = name;
-
# We need to force this in specilisations, otherwise it'd be
# readOnly = true;
description = ''
The `name` in `nodes.<name>`; stable across `specialisations`.
···
virtualisation.test.nodeName = mkOption {
internal = true;
default = name;
+
# We need to force this in specialisations, otherwise it'd be
# readOnly = true;
description = ''
The `name` in `nodes.<name>`; stable across `specialisations`.
+1 -1
nixos/maintainers/option-usages.nix
···
inherit (eval) pkgs;
excludedTestOptions = [
-
# We cannot evluate _module.args, as it is used during the computation
# of the modules list.
"_module.args"
···
inherit (eval) pkgs;
excludedTestOptions = [
+
# We cannot evaluate _module.args, as it is used during the computation
# of the modules list.
"_module.args"
+3 -3
nixos/modules/misc/ids.nix
···
caddy = 239;
taskd = 240;
# factorio = 241; # DynamicUser = true
-
# emby = 242; # unusued, removed 2019-05-01
#graylog = 243;# dynamically allocated as of 2021-09-03
sniproxy = 244;
nzbget = 245;
···
# system user or group of the same id in someone else's NixOS.
# This could break their system and make that person upset for a whole day.
#
-
# Sidenote: the default is defined in `shadow` module[2], and the relavent change
# was made way back in 2014[3].
#
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
···
# system user or group of the same id in someone else's NixOS.
# This could break their system and make that person upset for a whole day.
#
-
# Sidenote: the default is defined in `shadow` module[2], and the relavent change
# was made way back in 2014[3].
#
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
···
caddy = 239;
taskd = 240;
# factorio = 241; # DynamicUser = true
+
# emby = 242; # unused, removed 2019-05-01
#graylog = 243;# dynamically allocated as of 2021-09-03
sniproxy = 244;
nzbget = 245;
···
# system user or group of the same id in someone else's NixOS.
# This could break their system and make that person upset for a whole day.
#
+
# Sidenote: the default is defined in `shadow` module[2], and the relevant change
# was made way back in 2014[3].
#
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
···
# system user or group of the same id in someone else's NixOS.
# This could break their system and make that person upset for a whole day.
#
+
# Sidenote: the default is defined in `shadow` module[2], and the relevant change
# was made way back in 2014[3].
#
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
+1 -1
nixos/modules/profiles/image-based-appliance.nix
···
-
# This profile sets up a sytem for image based appliance usage. An appliance is
# installed as an image, cannot be re-built, has no Nix available, and is
# generally not meant for interactive use. Updates to such an appliance are
# handled by updating whole partition images via a tool like systemd-sysupdate.
···
+
# This profile sets up a system for image based appliance usage. An appliance is
# installed as an image, cannot be re-built, has no Nix available, and is
# generally not meant for interactive use. Updates to such an appliance are
# handled by updating whole partition images via a tool like systemd-sysupdate.
+1 -1
nixos/modules/programs/turbovnc.nix
···
# software rendering to implement GLX (OpenGL on Xorg).
# However, just building TurboVNC with support for that is not enough
# (it only takes care of the X server side part of OpenGL);
-
# the indiviudual applications (e.g. `glxgears`) also need to directly load
# the OpenGL libs.
# Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications
# can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`.
···
# software rendering to implement GLX (OpenGL on Xorg).
# However, just building TurboVNC with support for that is not enough
# (it only takes care of the X server side part of OpenGL);
+
# the individual applications (e.g. `glxgears`) also need to directly load
# the OpenGL libs.
# Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications
# can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`.
+1 -1
nixos/modules/services/cluster/k3s/default.nix
···
[
(yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
]
-
# alternate the YAML doc seperator (---) and extraDeploy manifests to create
# multi document YAMLs
++ (lib.concatMap (x: [
yamlDocSeparator
···
[
(yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
]
+
# alternate the YAML doc separator (---) and extraDeploy manifests to create
# multi document YAMLs
++ (lib.concatMap (x: [
yamlDocSeparator
+1 -1
nixos/modules/services/desktops/bonsaid.nix
···
lib.mkDefault (json.generate "bonsai_tree.json" (filterNulls cfg.settings));
# bonsaid is controlled by bonsaictl, so place the latter in the environment by default.
-
# bonsaictl is typically invoked by scripts or a DE so this isn't strictly necesssary,
# but it's helpful while administering the service generally.
environment.systemPackages = [ cfg.package ];
···
lib.mkDefault (json.generate "bonsai_tree.json" (filterNulls cfg.settings));
# bonsaid is controlled by bonsaictl, so place the latter in the environment by default.
+
# bonsaictl is typically invoked by scripts or a DE so this isn't strictly necessary,
# but it's helpful while administering the service generally.
environment.systemPackages = [ cfg.package ];
+1 -1
nixos/modules/services/games/crossfire-server.nix
···
# need to be writeable, so we can't just point at the ones in the nix
# store. Instead we take the approach of copying them out of the store
# on first run. If `bookarch` already exists, we assume the rest of the
-
# files do as well, and copy nothing -- otherwise we risk ovewriting
# server state information every time the server is upgraded.
preStart = ''
if [ ! -e "${cfg.stateDir}"/bookarch ]; then
···
# need to be writeable, so we can't just point at the ones in the nix
# store. Instead we take the approach of copying them out of the store
# on first run. If `bookarch` already exists, we assume the rest of the
+
# files do as well, and copy nothing -- otherwise we risk overwriting
# server state information every time the server is upgraded.
preStart = ''
if [ ! -e "${cfg.stateDir}"/bookarch ]; then
+1 -1
nixos/modules/services/hardware/kmonad.nix
···
# the old service and then starts the new service after config updates.
# Since we use path-based activation[1] here, the service unit will
# immediately[2] be started by the path unit. Probably that start is
-
# before config updates, whcih causes the service unit to use the old
# config after nixos-rebuild switch. Setting stopIfChanged to false works
# around this issue by restarting the service after config updates.
# [0]: https://nixos.org/manual/nixos/unstable/#sec-switching-systems
···
# the old service and then starts the new service after config updates.
# Since we use path-based activation[1] here, the service unit will
# immediately[2] be started by the path unit. Probably that start is
+
# before config updates, which causes the service unit to use the old
# config after nixos-rebuild switch. Setting stopIfChanged to false works
# around this issue by restarting the service after config updates.
# [0]: https://nixos.org/manual/nixos/unstable/#sec-switching-systems
+1 -1
nixos/modules/services/misc/ntfy-sh.nix
···
RestrictNamespaces = true;
RestrictRealtime = true;
MemoryDenyWriteExecute = true;
-
# Upstream Recommandation
LimitNOFILE = 20500;
};
};
···
RestrictNamespaces = true;
RestrictRealtime = true;
MemoryDenyWriteExecute = true;
+
# Upstream Recommendation
LimitNOFILE = 20500;
};
};
+1 -1
nixos/modules/services/monitoring/below.nix
···
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.below ];
-
# /etc/below.conf is also refered to by the `below` CLI tool,
# so this can't be a store-only file whose path is passed to the service
environment.etc."below/below.conf".text = cfgContents;
···
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.below ];
+
# /etc/below.conf is also referred to by the `below` CLI tool,
# so this can't be a store-only file whose path is passed to the service
environment.etc."below/below.conf".text = cfgContents;
+1 -1
nixos/modules/services/networking/cato-client.nix
···
wantedBy = [ "multi-user.target" ];
};
-
# set up Security wrapper Same as inteded in deb post install
security.wrappers.cato-clientd = {
source = "${cfg.package}/bin/cato-clientd";
owner = "root";
···
wantedBy = [ "multi-user.target" ];
};
+
# set up Security wrapper Same as intended in deb post install
security.wrappers.cato-clientd = {
source = "${cfg.package}/bin/cato-clientd";
owner = "root";
+1 -1
nixos/modules/services/networking/fedimintd.nix
···
{
# Note: we want by default to enable OpenSSL, but it seems anything 100 and above is
-
# overriden by default value from vhost-options.nix
enableACME = mkOverride 99 true;
forceSSL = mkOverride 99 true;
locations.${cfg.nginx.path} = {
···
{
# Note: we want by default to enable OpenSSL, but it seems anything 100 and above is
+
# overridden by default value from vhost-options.nix
enableACME = mkOverride 99 true;
forceSSL = mkOverride 99 true;
locations.${cfg.nginx.path} = {
+1 -1
nixos/modules/services/networking/netbird.nix
···
User = client.user.name;
Group = client.user.group;
-
# settings implied by DynamicUser=true, without actully using it,
# see https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#DynamicUser=
RemoveIPC = true;
PrivateTmp = true;
···
User = client.user.name;
Group = client.user.group;
+
# settings implied by DynamicUser=true, without actually using it,
# see https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#DynamicUser=
RemoveIPC = true;
PrivateTmp = true;
+1 -1
nixos/modules/services/networking/netbird/dashboard.nix
···
# special options as its public anyway
# As far as I know leaking this secret is just
# an information leak as one can fetch some basic app
-
# informations from the IDP
# To actually do something one still needs to have login
# data and this secret so this being public will not
# suffice for anything just decreasing security
···
# special options as its public anyway
# As far as I know leaking this secret is just
# an information leak as one can fetch some basic app
+
# information from the IDP
# To actually do something one still needs to have login
# data and this secret so this being public will not
# suffice for anything just decreasing security
+1 -1
nixos/modules/services/networking/ssh/sshd.nix
···
# values must be separated by whitespace or even commas.
# Consult either sshd_config(5) or, as last resort, the OpehSSH source for parsing
# the options at servconf.c:process_server_config_line_depth() to determine the right "mode"
-
# for each. But fortunaly this fact is documented for most of them in the manpage.
commaSeparated = [
"Ciphers"
"KexAlgorithms"
···
# values must be separated by whitespace or even commas.
# Consult either sshd_config(5) or, as last resort, the OpehSSH source for parsing
# the options at servconf.c:process_server_config_line_depth() to determine the right "mode"
+
# for each. But fortunately this fact is documented for most of them in the manpage.
commaSeparated = [
"Ciphers"
"KexAlgorithms"
+1 -1
nixos/modules/services/networking/yggdrasil-jumper.nix
···
services.yggdrasil.settings.Listen =
let
-
# By default linux dynamically alocates ports in range 32768..60999
# `sysctl net.ipv4.ip_local_port_range`
# See: https://xkcd.com/221/
prot_port = {
···
services.yggdrasil.settings.Listen =
let
+
# By default linux dynamically allocates ports in range 32768..60999
# `sysctl net.ipv4.ip_local_port_range`
# See: https://xkcd.com/221/
prot_port = {
+1 -1
nixos/modules/services/security/tor.nix
···
(
lib.mapAttrs (
k: v:
-
# Not necesssary, but prettier rendering
if
lib.elem k [
"AutomapHostsSuffixes"
···
(
lib.mapAttrs (
k: v:
+
# Not necessary, but prettier rendering
if
lib.elem k [
"AutomapHostsSuffixes"
+1 -1
nixos/modules/services/web-apps/davis.nix
···
else if
pgsqlLocal
# note: davis expects a non-standard postgres uri (due to the underlying doctrine library)
-
# specifically the dummy hostname which is overriden by the host query parameter
then
"postgres://${user}@localhost/${db.name}?host=/run/postgresql"
else if mysqlLocal then
···
else if
pgsqlLocal
# note: davis expects a non-standard postgres uri (due to the underlying doctrine library)
+
# specifically the dummy hostname which is overridden by the host query parameter
then
"postgres://${user}@localhost/${db.name}?host=/run/postgresql"
else if mysqlLocal then
+1 -1
nixos/modules/services/web-apps/plausible.nix
···
# Thus, disable distribution for improved simplicity and security:
#
# When distribution is enabled,
-
# Elixir spwans the Erlang VM, which will listen by default on all
# interfaces for messages between Erlang nodes (capable of
# remote code execution); it can be protected by a cookie; see
# https://erlang.org/doc/reference_manual/distributed.html#security).
···
# Thus, disable distribution for improved simplicity and security:
#
# When distribution is enabled,
+
# Elixir spawns the Erlang VM, which will listen by default on all
# interfaces for messages between Erlang nodes (capable of
# remote code execution); it can be protected by a cookie; see
# https://erlang.org/doc/reference_manual/distributed.html#security).
+1 -1
nixos/modules/services/web-apps/windmill.nix
···
{
# coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
-
# modified to not grant priviledges on all tables
# create role windmill_user and windmill_admin only if they don't exist
postgresql.postStart = lib.mkIf cfg.database.createLocally (
lib.mkAfter ''
···
{
# coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
+
# modified to not grant privileges on all tables
# create role windmill_user and windmill_admin only if they don't exist
postgresql.postStart = lib.mkIf cfg.database.createLocally (
lib.mkAfter ''
+1 -1
nixos/modules/virtualisation/azure-common.nix
···
# Enable cloud-init by default for waagent.
# Otherwise waagent would try manage networking using ifupdown,
-
# which is currently not availeble in nixpkgs.
services.cloud-init.enable = true;
services.cloud-init.network.enable = true;
systemd.services.cloud-config.serviceConfig.Restart = "on-failure";
···
# Enable cloud-init by default for waagent.
# Otherwise waagent would try manage networking using ifupdown,
+
# which is currently not available in nixpkgs.
services.cloud-init.enable = true;
services.cloud-init.network.enable = true;
systemd.services.cloud-config.serviceConfig.Restart = "on-failure";
+1 -1
nixos/modules/virtualisation/azure-image.nix
···
splashImage = null;
# For Gen 1 VM, configurate grub output to serial_com0.
# Not needed for Gen 2 VM wbere serial_com0 does not exist,
-
# and outputing to console is enough to make Azure Serial Console working
extraConfig = lib.mkIf (!efiSupport) ''
serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
terminal_input --append serial
···
splashImage = null;
# For Gen 1 VM, configurate grub output to serial_com0.
# Not needed for Gen 2 VM wbere serial_com0 does not exist,
+
# and outputting to console is enough to make Azure Serial Console working
extraConfig = lib.mkIf (!efiSupport) ''
serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
terminal_input --append serial
+1 -1
nixos/modules/virtualisation/waagent.nix
···
convert =
attrs:
pipe (recurse [ ] attrs) [
-
# Filter out null values and emoty lists
(filter (kv: kv.value != null && kv.value != [ ]))
# Transform to Key=Value form, then concatenate
(map (kv: "${kv.name}=${transform kv.value}"))
···
convert =
attrs:
pipe (recurse [ ] attrs) [
+
# Filter out null values and empty lists
(filter (kv: kv.value != null && kv.value != [ ]))
# Transform to Key=Value form, then concatenate
(map (kv: "${kv.name}=${transform kv.value}"))
+2 -2
nixos/tests/appliance-repart-image.nix
···
repartConfig = {
Type = "esp";
Format = "vfat";
-
# Minimize = "guess" seems to not work very vell for vfat
-
# partitons. It's better to set a sensible default instead. The
# aarch64 kernel seems to generally be a little bigger than the
# x86_64 kernel. To stay on the safe side, leave some more slack
# for every platform other than x86_64.
···
repartConfig = {
Type = "esp";
Format = "vfat";
+
# Minimize = "guess" seems to not work very well for vfat
+
# partitions. It's better to set a sensible default instead. The
# aarch64 kernel seems to generally be a little bigger than the
# x86_64 kernel. To stay on the safe side, leave some more slack
# for every platform other than x86_64.
+1 -1
nixos/tests/common/acme/server/default.nix
···
) cfg.configuration.security.acme.certs
)
# A specialisation's config is nested under its configuration attribute.
-
# For ease of use, nest the root node's configuration simiarly.
([ { configuration = node; } ] ++ (builtins.attrValues node.specialisation))
)
);
···
) cfg.configuration.security.acme.certs
)
# A specialisation's config is nested under its configuration attribute.
+
# For ease of use, nest the root node's configuration similarly.
([ { configuration = node; } ] ++ (builtins.attrValues node.specialisation))
)
);
+1 -1
nixos/tests/ntfy-sh-migration.nix
···
# this test works doing a migration and asserting ntfy-sh runs properly. first,
# ntfy-sh is configured to use a static user and group. then ntfy-sh is
# started and tested. after that, ntfy-sh is shut down and a systemd drop
-
# in configuration file is used to upate the service configuration to use
# DynamicUser=true. then the ntfy-sh is started again and tested.
import ./make-test-python.nix {
···
# this test works doing a migration and asserting ntfy-sh runs properly. first,
# ntfy-sh is configured to use a static user and group. then ntfy-sh is
# started and tested. after that, ntfy-sh is shut down and a systemd drop
+
# in configuration file is used to update the service configuration to use
# DynamicUser=true. then the ntfy-sh is started again and tested.
import ./make-test-python.nix {
+2 -2
nixos/tests/sftpgo.nix
···
# - downloading the file over sftp
# - assert that the ACLs are respected
# - share a file between alice and bob (using sftp)
-
# - assert that eve cannot acceess the shared folder between alice and bob.
#
# Additional test coverage for the remaining protocols (i.e. ftp, http and webdav)
# would be a nice to have for the future.
···
testScript =
{ nodes, ... }:
let
-
# A function to generate test cases for wheter
# a specified username is expected to access the shared folder.
accessSharedFoldersSubtest =
{
···
# - downloading the file over sftp
# - assert that the ACLs are respected
# - share a file between alice and bob (using sftp)
+
# - assert that eve cannot access the shared folder between alice and bob.
#
# Additional test coverage for the remaining protocols (i.e. ftp, http and webdav)
# would be a nice to have for the future.
···
testScript =
{ nodes, ... }:
let
+
# A function to generate test cases for whether
# a specified username is expected to access the shared folder.
accessSharedFoldersSubtest =
{
+1 -1
nixos/tests/systemd-sysupdate.nix
···
-
# Tests downloading a signed update aritfact from a server to a target machine.
# This test does not rely on the `systemd.timer` units provided by the
# `systemd-sysupdate` module but triggers the `systemd-sysupdate` service
# manually to make the test more robust.
···
+
# Tests downloading a signed update artifact from a server to a target machine.
# This test does not rely on the `systemd.timer` units provided by the
# `systemd-sysupdate` module but triggers the `systemd-sysupdate` service
# manually to make the test more robust.
+1 -1
nixos/tests/systemd-timesyncd-nscd-dnssec.nix
···
# correct time, we need to connect to an NTP server, which usually requires resolving its hostname.
#
# This test does the following:
-
# - Sets up a DNS server (tinydns) listening on the eth1 ip addess, serving .ntp and fake.ntp records.
# - Configures that DNS server as a resolver and enables DNSSEC in systemd-resolved settings.
# - Configures systemd-timesyncd to use fake.ntp hostname as an NTP server.
# - Performs a regular DNS lookup, to ensure it fails due to broken DNSSEC.
···
# correct time, we need to connect to an NTP server, which usually requires resolving its hostname.
#
# This test does the following:
+
# - Sets up a DNS server (tinydns) listening on the eth1 ip address, serving .ntp and fake.ntp records.
# - Configures that DNS server as a resolver and enables DNSSEC in systemd-resolved settings.
# - Configures systemd-timesyncd to use fake.ntp hostname as an NTP server.
# - Performs a regular DNS lookup, to ensure it fails due to broken DNSSEC.
+4 -4
pkgs/applications/editors/emacs/elisp-packages/melpa-packages.nix
···
./update-from-overlay
It will update both melpa and elpa packages using
-
https://github.com/nix-community/emacs-overlay. It's almost instantenous and
formats commits for you.
*/
···
hyperbole = ignoreCompilationError (addPackageRequires (mkHome super.hyperbole) [ self.el-mock ]); # elisp error
# needs non-existent "browser database directory" during compilation
-
# TODO report to upsteam about missing dependency websocket
ibrowse = ignoreCompilationError (addPackageRequires super.ibrowse [ self.websocket ]);
# elisp error and missing optional dependencies
···
indium = mkHome super.indium;
-
# TODO report to upsteam
inlineR = addPackageRequires super.inlineR [ self.ess ];
# https://github.com/duelinmarkers/insfactor.el/issues/7
···
org-gtd = ignoreCompilationError super.org-gtd; # elisp error
-
# needs newer org than the Eamcs 29.4 builtin one
org-link-beautify = addPackageRequires super.org-link-beautify [
self.org
self.qrencode
···
./update-from-overlay
It will update both melpa and elpa packages using
+
https://github.com/nix-community/emacs-overlay. It's almost instantaneous and
formats commits for you.
*/
···
hyperbole = ignoreCompilationError (addPackageRequires (mkHome super.hyperbole) [ self.el-mock ]); # elisp error
# needs non-existent "browser database directory" during compilation
+
# TODO report to upstream about missing dependency websocket
ibrowse = ignoreCompilationError (addPackageRequires super.ibrowse [ self.websocket ]);
# elisp error and missing optional dependencies
···
indium = mkHome super.indium;
+
# TODO report to upstream
inlineR = addPackageRequires super.inlineR [ self.ess ];
# https://github.com/duelinmarkers/insfactor.el/issues/7
···
org-gtd = ignoreCompilationError super.org-gtd; # elisp error
+
# needs newer org than the Emacs 29.4 builtin one
org-link-beautify = addPackageRequires super.org-link-beautify [
self.org
self.qrencode
+1 -1
pkgs/applications/editors/vscode/extensions/default.nix
···
};
# TODO: add overrides overlay, so that we can have a generated.nix
-
# then apply extension specific modifcations to packages.
# overlays will be applied left to right, overrides should come after aliases.
overlays = lib.optionals config.allowAliases [
···
};
# TODO: add overrides overlay, so that we can have a generated.nix
+
# then apply extension specific modifications to packages.
# overlays will be applied left to right, overrides should come after aliases.
overlays = lib.optionals config.allowAliases [
+1 -1
pkgs/applications/editors/vscode/extensions/ms-vscode.cpptools/default.nix
···
<https://github.com/Microsoft/vscode-cpptools/issues/35>
-
Once the symbolic link temporary solution taken, everything shoud run smootly.
*/
let
···
<https://github.com/Microsoft/vscode-cpptools/issues/35>
+
Once the symbolic link temporary solution taken, everything should run smootly.
*/
let
+1 -1
pkgs/applications/graphics/yacreader/default.nix
···
libsForQt5.qtmacextras # can be removed when using qt6
];
-
# custom Darwin install instructions taken from the upsteam compileOSX.sh script
installPhase = lib.optionalString stdenv.hostPlatform.isDarwin ''
runHook preInstall
···
libsForQt5.qtmacextras # can be removed when using qt6
];
+
# custom Darwin install instructions taken from the upstream compileOSX.sh script
installPhase = lib.optionalString stdenv.hostPlatform.isDarwin ''
runHook preInstall
+1 -1
pkgs/build-support/rust/build-rust-crate/default.nix
···
jq,
libiconv,
# Controls codegen parallelization for all crates.
-
# May be overriden on a per-crate level.
# See <https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units>
defaultCodegenUnits ? 1,
}:
···
jq,
libiconv,
# Controls codegen parallelization for all crates.
+
# May be overridden on a per-crate level.
# See <https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units>
defaultCodegenUnits ? 1,
}:
+3 -3
pkgs/build-support/rust/build-rust-crate/test/rcgen-crates.nix
···
defaultCrateOverrides ? pkgs.defaultCrateOverrides,
# The features to enable for the root_crate or the workspace_members.
rootFeatures ? [ "default" ],
-
# If true, throw errors instead of issueing deprecation warnings.
strictDeprecation ? false,
# Used for conditional compilation based on CPU feature detection.
targetFeatures ? [ ],
···
runTests ? false,
testCrateFlags ? [ ],
testInputs ? [ ],
-
# Any command to run immediatelly before a test is executed.
testPreRun ? "",
-
# Any command run immediatelly after a test is executed.
testPostRun ? "",
}:
lib.makeOverridable
···
defaultCrateOverrides ? pkgs.defaultCrateOverrides,
# The features to enable for the root_crate or the workspace_members.
rootFeatures ? [ "default" ],
+
# If true, throw errors instead of issuing deprecation warnings.
strictDeprecation ? false,
# Used for conditional compilation based on CPU feature detection.
targetFeatures ? [ ],
···
runTests ? false,
testCrateFlags ? [ ],
testInputs ? [ ],
+
# Any command to run immediately before a test is executed.
testPreRun ? "",
+
# Any command run immediately after a test is executed.
testPostRun ? "",
}:
lib.makeOverridable
+1 -1
pkgs/by-name/ad/adios2/package.nix
···
yaml-cpp
nlohmann_json
-
# Todo: add these optional dependcies in nixpkgs.
# sz
# mgard
# catalyst
···
yaml-cpp
nlohmann_json
+
# Todo: add these optional dependencies in nixpkgs.
# sz
# mgard
# catalyst
+1 -1
pkgs/by-name/hy/hyperhdr/package.nix
···
];
patches = [
-
# Allow completly unvendoring hyperhdr
# This can be removed on the next hyperhdr release
./unvendor.patch
];
···
];
patches = [
+
# Allow completely unvendoring hyperhdr
# This can be removed on the next hyperhdr release
./unvendor.patch
];
+1 -1
pkgs/by-name/il/ilmbase/package.nix
···
lib,
buildPackages,
cmake,
-
# explicitely depending on openexr_2 because ilmbase doesn't exist for v3
openexr_2,
}:
···
lib,
buildPackages,
cmake,
+
# explicitly depending on openexr_2 because ilmbase doesn't exist for v3
openexr_2,
}:
+1 -1
pkgs/by-name/im/imlib2/package.nix
···
enableParallelBuilding = true;
# Do not build amd64 assembly code on Darwin, because it fails to compile
-
# with unknow directive errors
configureFlags =
optional stdenv.hostPlatform.isDarwin "--enable-amd64=no"
++ optional (!svgSupport) "--without-svg"
···
enableParallelBuilding = true;
# Do not build amd64 assembly code on Darwin, because it fails to compile
+
# with unknown directive errors
configureFlags =
optional stdenv.hostPlatform.isDarwin "--enable-amd64=no"
++ optional (!svgSupport) "--without-svg"
+1 -1
pkgs/by-name/li/libcredis/package.nix
···
sha256 = "1l3hlw9rrc11qggbg9a2303p3bhxxx2vqkmlk8avsrbqw15r1ayr";
};
-
# credis build system has no install actions, provide our own.
installPhase = ''
mkdir -p "$out/bin"
mkdir -p "$out/lib"
···
sha256 = "1l3hlw9rrc11qggbg9a2303p3bhxxx2vqkmlk8avsrbqw15r1ayr";
};
+
# credits build system has no install actions, provide our own.
installPhase = ''
mkdir -p "$out/bin"
mkdir -p "$out/lib"
+1 -1
pkgs/by-name/mi/miktex/package.nix
···
patches = [
./startup-config-support-nix-store.patch
# Miktex will search exectables in "GetMyPrefix(true)/bin".
-
# The path evalutate to "/usr/bin" in FHS style linux distrubution,
# compared to "/nix/store/.../bin" in NixOS.
# As a result, miktex will fail to find e.g. 'pkexec','ksudo','gksu'
# under /run/wrappers/bin in NixOS.
···
patches = [
./startup-config-support-nix-store.patch
# Miktex will search exectables in "GetMyPrefix(true)/bin".
+
# The path evaluate to "/usr/bin" in FHS style linux distribution,
# compared to "/nix/store/.../bin" in NixOS.
# As a result, miktex will fail to find e.g. 'pkexec','ksudo','gksu'
# under /run/wrappers/bin in NixOS.
+1 -1
pkgs/by-name/mx/mxnet/package.nix
···
perl,
# mxnet cuda support is turned off, but dependencies like opencv can still be built with cudaSupport
# and fail to compile without the cudatoolkit
-
# mxnet cuda support will not be availaible, as mxnet requires version <=11
cudaSupport ? config.cudaSupport,
cudaPackages ? { },
}:
···
perl,
# mxnet cuda support is turned off, but dependencies like opencv can still be built with cudaSupport
# and fail to compile without the cudatoolkit
+
# mxnet cuda support will not be available, as mxnet requires version <=11
cudaSupport ? config.cudaSupport,
cudaPackages ? { },
}:
+1 -1
pkgs/by-name/ty/typical/package.nix
···
patches = [
# Related to https://github.com/stepchowfun/typical/pull/501
-
# Commiting a slightly different patch because the upstream one doesn't apply cleanly
./lifetime.patch
];
···
patches = [
# Related to https://github.com/stepchowfun/typical/pull/501
+
# Committing a slightly different patch because the upstream one doesn't apply cleanly
./lifetime.patch
];
+1 -1
pkgs/by-name/us/usb-reset/package.nix
···
stdenv.mkDerivation {
pname = "usb-reset";
# not tagged, but changelog has this with the date of the e9a9d6c commit
-
# and no significant change occured between bumping the version in the Makefile and that
# and the changes since then (up to ff822d8) seem snap related
version = "0.3";
···
stdenv.mkDerivation {
pname = "usb-reset";
# not tagged, but changelog has this with the date of the e9a9d6c commit
+
# and no significant change occurred between bumping the version in the Makefile and that
# and the changes since then (up to ff822d8) seem snap related
version = "0.3";
+1 -1
pkgs/by-name/yt/ytmdesktop/package.nix
···
};
patches = [
-
# instead of runnning git during the build process
# use the .COMMIT file generated in the fetcher FOD
./git-rev-parse.patch
];
···
};
patches = [
+
# instead of running git during the build process
# use the .COMMIT file generated in the fetcher FOD
./git-rev-parse.patch
];
+1 -1
pkgs/by-name/ze/zepp-simulator/package.nix
···
copyDesktopItems,
autoPatchelfHook,
-
# Upstream is officialy built with Electron 18
# (but it works with latest Electron with minor changes, see HACK below)
electron,
asar,
···
copyDesktopItems,
autoPatchelfHook,
+
# Upstream is officially built with Electron 18
# (but it works with latest Electron with minor changes, see HACK below)
electron,
asar,
+1 -1
pkgs/development/compilers/gcc/common/configure-flags.nix
···
# Note [Windows Exception Handling]
# sjlj (short jump long jump) exception handling makes no sense on x86_64,
-
# it's forcably slowing programs down as it produces a constant overhead.
# On x86_64 we have SEH (Structured Exception Handling) and we should use
# that. On i686, we do not have SEH, and have to use sjlj with dwarf2.
# Hence it's now conditional on x86_32 (i686 is 32bit).
···
# Note [Windows Exception Handling]
# sjlj (short jump long jump) exception handling makes no sense on x86_64,
+
# it's forcibly slowing programs down as it produces a constant overhead.
# On x86_64 we have SEH (Structured Exception Handling) and we should use
# that. On i686, we do not have SEH, and have to use sjlj with dwarf2.
# Hence it's now conditional on x86_32 (i686 is 32bit).
+4 -4
pkgs/development/haskell-modules/configuration-common.nix
···
self: super:
{
-
# Hackage's accelerate is from 2020 and incomptible with our GHC.
# The existing derivation also has missing dependencies
# compared to the source from github.
# https://github.com/AccelerateHS/accelerate/issues/553
···
katt = dontCheck super.katt;
language-slice = dontCheck super.language-slice;
-
# Bogus lower bound on data-default-class added via Hackage revison
# https://github.com/mrkkrp/req/pull/180#issuecomment-2628201485
req = overrideCabal {
revision = null;
···
license = lib.licenses.bsd3;
# ghc-bignum is not buildable if none of the three backends
# is explicitly enabled. We enable Native for now as it doesn't
-
# depend on anything else as oppossed to GMP and FFI.
# Apply patch which fixes a compilation failure we encountered.
# Will need to be kept until we can drop ghc-bignum entirely,
# i. e. if GHC 8.10.* and 8.8.* have been removed.
···
# Missing test files https://github.com/kephas/xdg-basedir-compliant/issues/1
xdg-basedir-compliant = dontCheck super.xdg-basedir-compliant;
-
# Test failure after libxcrypt migration, reported upstrem at
# https://github.com/phadej/crypt-sha512/issues/13
crypt-sha512 = dontCheck super.crypt-sha512;
···
self: super:
{
+
# Hackage's accelerate is from 2020 and incompatible with our GHC.
# The existing derivation also has missing dependencies
# compared to the source from github.
# https://github.com/AccelerateHS/accelerate/issues/553
···
katt = dontCheck super.katt;
language-slice = dontCheck super.language-slice;
+
# Bogus lower bound on data-default-class added via Hackage revision
# https://github.com/mrkkrp/req/pull/180#issuecomment-2628201485
req = overrideCabal {
revision = null;
···
license = lib.licenses.bsd3;
# ghc-bignum is not buildable if none of the three backends
# is explicitly enabled. We enable Native for now as it doesn't
+
# depend on anything else as opposed to GMP and FFI.
# Apply patch which fixes a compilation failure we encountered.
# Will need to be kept until we can drop ghc-bignum entirely,
# i. e. if GHC 8.10.* and 8.8.* have been removed.
···
# Missing test files https://github.com/kephas/xdg-basedir-compliant/issues/1
xdg-basedir-compliant = dontCheck super.xdg-basedir-compliant;
+
# Test failure after libxcrypt migration, reported upstream at
# https://github.com/phadej/crypt-sha512/issues/13
crypt-sha512 = dontCheck super.crypt-sha512;
+1 -1
pkgs/development/interpreters/ruby/default.nix
···
# When using a baseruby, ruby always sets "libdir" to the build
# directory, which nix rejects due to a reference in to /build/ in
# the final product. Removing this reference doesn't seem to break
-
# anything and fixes cross compliation.
./dont-refer-to-build-dir.patch
];
···
# When using a baseruby, ruby always sets "libdir" to the build
# directory, which nix rejects due to a reference in to /build/ in
# the final product. Removing this reference doesn't seem to break
+
# anything and fixes cross compilation.
./dont-refer-to-build-dir.patch
];
+1 -1
pkgs/development/libraries/ffmpeg/generic.nix
···
# all dependants in Nixpkgs
withSmallDeps ? ffmpegVariant == "small" || withFullDeps,
-
# Everything enabled; only guarded behind platform exclusivity or brokeness.
# If you need to depend on ffmpeg-full because ffmpeg is missing some feature
# your package needs, you should enable that feature in regular ffmpeg
# instead.
···
# all dependants in Nixpkgs
withSmallDeps ? ffmpegVariant == "small" || withFullDeps,
+
# Everything enabled; only guarded behind platform exclusivity or brokenness.
# If you need to depend on ffmpeg-full because ffmpeg is missing some feature
# your package needs, you should enable that feature in regular ffmpeg
# instead.
+1 -1
pkgs/development/libraries/openssl/default.nix
···
# This avoids conflicts between man pages of openssl subcommands (for
# example 'ts' and 'err') man pages and their equivalent top-level
# command in other packages (respectively man-pages and moreutils).
-
# This is done in ubuntu and archlinux, and possiibly many other distros.
"MANSUFFIX=ssl"
];
···
# This avoids conflicts between man pages of openssl subcommands (for
# example 'ts' and 'err') man pages and their equivalent top-level
# command in other packages (respectively man-pages and moreutils).
+
# This is done in ubuntu and archlinux, and possibly many other distros.
"MANSUFFIX=ssl"
];
+1 -1
pkgs/development/libraries/vigra/default.nix
···
};
patches = [
-
# Pathes to fix compiling on LLVM 19 from https://github.com/ukoethe/vigra/pull/592
./fix-llvm-19-1.patch
./fix-llvm-19-2.patch
];
···
};
patches = [
+
# Patches to fix compiling on LLVM 19 from https://github.com/ukoethe/vigra/pull/592
./fix-llvm-19-1.patch
./fix-llvm-19-2.patch
];
+1 -1
pkgs/development/python-modules/brotlicffi/default.nix
···
buildPythonPackage,
pythonOlder,
cffi,
-
# overriden as pkgs.brotli
brotli,
setuptools,
pytestCheckHook,
···
buildPythonPackage,
pythonOlder,
cffi,
+
# overridden as pkgs.brotli
brotli,
setuptools,
pytestCheckHook,
+1 -1
pkgs/development/python-modules/datadog/default.nix
···
# https://github.com/DataDog/datadogpy/issues/746
"TestDogshell"
-
# Flaky: test execution time aganst magic values
"test_distributed"
"test_timed"
"test_timed_in_ms"
···
# https://github.com/DataDog/datadogpy/issues/746
"TestDogshell"
+
# Flaky: test execution time against magic values
"test_distributed"
"test_timed"
"test_timed_in_ms"
+1 -1
pkgs/development/python-modules/dm-haiku/default.nix
···
# AttributeError: jax.core.Var was removed in JAX v0.6.0. Use jax.extend.core.Var instead, and
# see https://docs.jax.dev/en/latest/jax.extend.html for details.
-
# Alrady on master: https://github.com/google-deepmind/dm-haiku/commit/cfe8480d253a93100bf5e2d24c40435a95399c96
# TODO: remove at the next release
postPatch = ''
substituteInPlace haiku/_src/jaxpr_info.py \
···
# AttributeError: jax.core.Var was removed in JAX v0.6.0. Use jax.extend.core.Var instead, and
# see https://docs.jax.dev/en/latest/jax.extend.html for details.
+
# Already on master: https://github.com/google-deepmind/dm-haiku/commit/cfe8480d253a93100bf5e2d24c40435a95399c96
# TODO: remove at the next release
postPatch = ''
substituteInPlace haiku/_src/jaxpr_info.py \
+1 -1
pkgs/development/python-modules/flashinfer/default.nix
···
# requires the CUDA toolkit (via nvcc) to be available.
#
# This means that if you plan to use flashinfer, you will need to set the
-
# environment varaible `CUDA_HOME` to `cudatoolkit`.
{
lib,
config,
···
# requires the CUDA toolkit (via nvcc) to be available.
#
# This means that if you plan to use flashinfer, you will need to set the
+
# environment variable `CUDA_HOME` to `cudatoolkit`.
{
lib,
config,
+1 -1
pkgs/development/python-modules/fmpy/default.nix
···
};
# FMPy searches for sundials without the "lib"-prefix; strip it
-
# and symlink the so-files into existance.
postFixup = ''
pushd $out/lib
for so in *.so; do
···
};
# FMPy searches for sundials without the "lib"-prefix; strip it
+
# and symlink the so-files into existence.
postFixup = ''
pushd $out/lib
for so in *.so; do
+1 -1
pkgs/development/python-modules/itables/default.nix
···
# itables has 4 different node packages, each with their own
# package-lock.json, and partially depending on each other.
# Our fetchNpmDeps tooling in nixpkgs doesn't support this yet, so we fetch
-
# the source tarball from pypi, wich includes the javascript bundle already.
src = fetchPypi {
inherit pname version;
hash = "sha256-S5HASUVfqTny+Vu15MYSSrEffCaJuL7UhDOc3eudVWI=";
···
# itables has 4 different node packages, each with their own
# package-lock.json, and partially depending on each other.
# Our fetchNpmDeps tooling in nixpkgs doesn't support this yet, so we fetch
+
# the source tarball from pypi, which includes the javascript bundle already.
src = fetchPypi {
inherit pname version;
hash = "sha256-S5HASUVfqTny+Vu15MYSSrEffCaJuL7UhDOc3eudVWI=";
+1 -1
pkgs/development/python-modules/langchain-aws/default.nix
···
# Boto @ 1.35 has outstripped the version requirement
"boto3"
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
# Boto @ 1.35 has outstripped the version requirement
"boto3"
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain-azure-dynamic-sessions/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain-chroma/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
"numpy"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
"numpy"
];
+1 -1
pkgs/development/python-modules/langchain-community/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest langchain and -core.
-
# That prevents us from updating individul components.
"langchain"
"langchain-core"
"numpy"
···
pythonRelaxDeps = [
# Each component release requests the exact latest langchain and -core.
+
# That prevents us from updating individual components.
"langchain"
"langchain-core"
"numpy"
+1 -1
pkgs/development/python-modules/langchain-groq/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain-huggingface/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain-mongodb/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
"numpy"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
"numpy"
];
+1 -1
pkgs/development/python-modules/langchain-ollama/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain-openai/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain-tests/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
"numpy"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
"numpy"
];
+1 -1
pkgs/development/python-modules/langchain-text-splitters/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
];
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
];
+1 -1
pkgs/development/python-modules/langchain/default.nix
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
-
# That prevents us from updating individul components.
"langchain-core"
"numpy"
"tenacity"
···
pythonRelaxDeps = [
# Each component release requests the exact latest core.
+
# That prevents us from updating individual components.
"langchain-core"
"numpy"
"tenacity"
+1 -1
pkgs/development/python-modules/mpi-pytest/default.nix
···
hash = "sha256-r9UB5H+qAJc6k2SVAiOCI2yRDLNv2zKRmfrAan+cX9I=";
};
-
# A temporary fixup to support fork mode with openmpi implemention
# See https://github.com/firedrakeproject/mpi-pytest/pull/17
postPatch = lib.optionalString (mpi4py.mpi.pname == "openmpi") ''
substituteInPlace pytest_mpi/plugin.py \
···
hash = "sha256-r9UB5H+qAJc6k2SVAiOCI2yRDLNv2zKRmfrAan+cX9I=";
};
+
# A temporary fixup to support fork mode with openmpi implementation
# See https://github.com/firedrakeproject/mpi-pytest/pull/17
postPatch = lib.optionalString (mpi4py.mpi.pname == "openmpi") ''
substituteInPlace pytest_mpi/plugin.py \
+1 -1
pkgs/development/python-modules/piano-transcription-inference/default.nix
···
# Project has no tests.
# In order to make pythonImportsCheck work, NUMBA_CACHE_DIR env var need to
# be set to a writable dir (https://github.com/numba/numba/issues/4032#issuecomment-488102702).
-
# pythonImportsCheck has no pre* hook, use checkPhase to wordaround that.
checkPhase = ''
export NUMBA_CACHE_DIR="$(mktemp -d)"
'';
···
# Project has no tests.
# In order to make pythonImportsCheck work, NUMBA_CACHE_DIR env var need to
# be set to a writable dir (https://github.com/numba/numba/issues/4032#issuecomment-488102702).
+
# pythonImportsCheck has no pre* hook, use checkPhase to workaround that.
checkPhase = ''
export NUMBA_CACHE_DIR="$(mktemp -d)"
'';
+1 -1
pkgs/development/python-modules/pip/default.nix
···
tomli-w,
werkzeug,
-
# coupled downsteam dependencies
pip-tools,
}:
···
tomli-w,
werkzeug,
+
# coupled downstream dependencies
pip-tools,
}:
+1 -1
pkgs/development/python-modules/twisted/default.nix
···
"MulticastTests.test_multiListen"
];
"src/twisted/trial/test/test_script.py" = [
-
# Fails in LXC containers with less than all cores availaible (limits.cpu)
"AutoJobsTests.test_cpuCount"
];
"src/twisted/internet/test/test_unix.py" = [
···
"MulticastTests.test_multiListen"
];
"src/twisted/trial/test/test_script.py" = [
+
# Fails in LXC containers with less than all cores available (limits.cpu)
"AutoJobsTests.test_cpuCount"
];
"src/twisted/internet/test/test_unix.py" = [
+1 -1
pkgs/development/python-modules/yamllint/default.nix
···
[
# test failure reported upstream: https://github.com/adrienverge/yamllint/issues/373
"test_find_files_recursively"
-
# Issue wih fixture
"test_codec_built_in_equivalent"
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
···
[
# test failure reported upstream: https://github.com/adrienverge/yamllint/issues/373
"test_find_files_recursively"
+
# Issue with fixture
"test_codec_built_in_equivalent"
]
++ lib.optionals stdenv.hostPlatform.isDarwin [
+1 -1
pkgs/development/tools/build-managers/bazel/bazel_6/default.nix
···
# guarantee that it will always run in any nix context.
#
# See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses
-
# NIX_BUILD_TOP env var to conditionnally disable sleep features inside the
# sandbox.
#
# If you want to investigate the sandbox profile path,
···
# guarantee that it will always run in any nix context.
#
# See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses
+
# NIX_BUILD_TOP env var to conditionally disable sleep features inside the
# sandbox.
#
# If you want to investigate the sandbox profile path,
+3 -3
pkgs/development/tools/build-managers/bazel/bazel_7/default.nix
···
# --{,tool_}java_runtime_version=local_jdk and rely on the fact no java
# toolchain registered by default uses the local_jdk, making the selection
# unambiguous.
-
# This toolchain has the advantage that it can use any ambiant java jdk,
# not only a given, fixed version. It allows bazel to work correctly in any
# environment where JAVA_HOME is set to the right java version, like inside
# nix derivations.
-
# However, this patch breaks bazel hermeticity, by picking the ambiant java
# version instead of the more hermetic remote_jdk prebuilt binaries that
# rules_java provide by default. It also requires the user to have a
# JAVA_HOME set to the exact version required by the project.
···
# guarantee that it will always run in any nix context.
#
# See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses
-
# NIX_BUILD_TOP env var to conditionnally disable sleep features inside the
# sandbox.
#
# If you want to investigate the sandbox profile path,
···
# --{,tool_}java_runtime_version=local_jdk and rely on the fact no java
# toolchain registered by default uses the local_jdk, making the selection
# unambiguous.
+
# This toolchain has the advantage that it can use any ambient java jdk,
# not only a given, fixed version. It allows bazel to work correctly in any
# environment where JAVA_HOME is set to the right java version, like inside
# nix derivations.
+
# However, this patch breaks bazel hermeticity, by picking the ambient java
# version instead of the more hermetic remote_jdk prebuilt binaries that
# rules_java provide by default. It also requires the user to have a
# JAVA_HOME set to the exact version required by the project.
···
# guarantee that it will always run in any nix context.
#
# See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses
+
# NIX_BUILD_TOP env var to conditionally disable sleep features inside the
# sandbox.
#
# If you want to investigate the sandbox profile path,
+2 -2
pkgs/servers/home-assistant/default.nix
···
];
});
-
# Pinned due to home-assistant still needing 1.10.0 verison
-
# Remove this when home-assistant upates the jellyfin-apiclient-python version
jellyfin-apiclient-python = super.jellyfin-apiclient-python.overridePythonAttrs (oldAttrs: rec {
version = "1.10.0";
src = fetchFromGitHub {
···
];
});
+
# Pinned due to home-assistant still needing 1.10.0 version
+
# Remove this when home-assistant updates the jellyfin-apiclient-python version
jellyfin-apiclient-python = super.jellyfin-apiclient-python.overridePythonAttrs (oldAttrs: rec {
version = "1.10.0";
src = fetchFromGitHub {
+1 -1
pkgs/stdenv/generic/check-meta.nix
···
isMarkedInsecure = attrs: (attrs.meta.knownVulnerabilities or [ ]) != [ ];
-
# Alow granular checks to allow only some unfree packages
# Example:
# {pkgs, ...}:
# {
···
isMarkedInsecure = attrs: (attrs.meta.knownVulnerabilities or [ ]) != [ ];
+
# Allow granular checks to allow only some unfree packages
# Example:
# {pkgs, ...}:
# {
+1 -1
pkgs/test/texlive/default.nix
···
"outocp"
"pmxab"
-
# GUI scripts that accept no argument or crash without a graphics server; please test manualy
"epspdftk"
"texdoctk"
"tlshell"
···
"outocp"
"pmxab"
+
# GUI scripts that accept no argument or crash without a graphics server; please test manually
"epspdftk"
"texdoctk"
"tlshell"
+6 -6
pkgs/top-level/all-packages.nix
···
libhandy = callPackage ../development/libraries/libhandy { };
-
# Needed for apps that still depend on the unstable verison of the library (not libhandy-1)
libhandy_0 = callPackage ../development/libraries/libhandy/0.x.nix { };
libint = callPackage ../development/libraries/libint { };
···
zbar = libsForQt5.callPackage ../tools/graphics/zbar { };
-
# Nvidia support does not require any propietary libraries, so CI can build it.
# Note that when enabling this unconditionally, non-nvidia users will always have an empty "GPU" section.
zenith-nvidia = zenith.override {
nvidiaSupport = true;
···
# host platform.
#
# Because this is the *next* stages choice, it's a bit non-modular to put
-
# here. In theory, bootstraping is supposed to not be a chain but at tree,
# where each stage supports many "successor" stages, like multiple possible
# futures. We don't have a better alternative, but with this downside in
# mind, please be judicious when using this attribute. E.g. for building
···
stdenv = stdenvNoLibc;
};
-
# These are used when buiding compiler-rt / libgcc, prior to building libc.
preLibcCrossHeaders =
let
inherit (stdenv.targetPlatform) libc;
···
nginxModules = recurseIntoAttrs (callPackage ../servers/http/nginx/modules.nix { });
-
# We should move to dynmaic modules and create a nginxFull package with all modules
nginxShibboleth = nginxStable.override {
modules = [
nginxModules.rtmp
···
jdk = jdk17;
};
-
# perhaps there are better apps for this task? It's how I had configured my preivous system.
# And I don't want to rewrite all rules
profanity = callPackage ../applications/networking/instant-messengers/profanity (
{
···
libhandy = callPackage ../development/libraries/libhandy { };
+
# Needed for apps that still depend on the unstable version of the library (not libhandy-1)
libhandy_0 = callPackage ../development/libraries/libhandy/0.x.nix { };
libint = callPackage ../development/libraries/libint { };
···
zbar = libsForQt5.callPackage ../tools/graphics/zbar { };
+
# Nvidia support does not require any proprietary libraries, so CI can build it.
# Note that when enabling this unconditionally, non-nvidia users will always have an empty "GPU" section.
zenith-nvidia = zenith.override {
nvidiaSupport = true;
···
# host platform.
#
# Because this is the *next* stages choice, it's a bit non-modular to put
+
# here. In theory, bootstrapping is supposed to not be a chain but at tree,
# where each stage supports many "successor" stages, like multiple possible
# futures. We don't have a better alternative, but with this downside in
# mind, please be judicious when using this attribute. E.g. for building
···
stdenv = stdenvNoLibc;
};
+
# These are used when building compiler-rt / libgcc, prior to building libc.
preLibcCrossHeaders =
let
inherit (stdenv.targetPlatform) libc;
···
nginxModules = recurseIntoAttrs (callPackage ../servers/http/nginx/modules.nix { });
+
# We should move to dynamic modules and create a nginxFull package with all modules
nginxShibboleth = nginxStable.override {
modules = [
nginxModules.rtmp
···
jdk = jdk17;
};
+
# perhaps there are better apps for this task? It's how I had configured my previous system.
# And I don't want to rewrite all rules
profanity = callPackage ../applications/networking/instant-messengers/profanity (
{
+1 -1
pkgs/top-level/stage.nix
···
# The complete chain of package set builders, applied from top to bottom.
# stdenvOverlays must be last as it brings package forward from the
-
# previous bootstrapping phases which have already been overlayed.
toFix = lib.foldl' (lib.flip lib.extends) (self: { }) (
[
stdenvBootstappingAndPlatforms
···
# The complete chain of package set builders, applied from top to bottom.
# stdenvOverlays must be last as it brings package forward from the
+
# previous bootstrapping phases which have already been overlaid.
toFix = lib.foldl' (lib.flip lib.extends) (self: { }) (
[
stdenvBootstappingAndPlatforms