Merge staging-next into staging

Changed files
+3649 -785
.github
nixos
doc
manual
release-notes
modules
programs
services
display-managers
x11
system
tests
pkgs
applications
audio
go-musicfox
squeezelite
graphics
cq-editor
networking
cluster
office
radio
chirp
cloudlog
science
math
eigenmath
version-management
git-cola
video
kodi
addons
visualization-projectm
build-support
php
by-name
ad
adafruit-nrfutil
adwsteamgtk
fr
freefilesync
gm
gmetronome
li
libui-ng
ma
maxfetch
me
megapixels
nr
nrfconnect
re
renode-dts2repl
sp
spicetify-cli
sw
switch-to-configuration-ng
tr
treefmt
treefmt2
yd
ydotool
data
themes
colloid-gtk-theme
development
libraries
cjson
science
math
openspecfun
python-modules
aiounifi
androidtvremote2
b2sdk
bc-detect-secrets
blackjax
cadquery
cryptacular
devito
dirigera
dogpile-cache
equinox
flax
globre
hikari
jax
jaxlib
jaxopt
nanobind
objax
opower
pathlib
pxml
pycrdt-websocket
pyexploitdb
pymilter
pynws
pyscss
pysigma-backend-insightidr
pytrydan
qtile
slicedimage
snakemake-storage-plugin-s3
sphinxcontrib-confluencebuilder
sqlbag
sqlsoup
stem
tencentcloud-sdk-python
tensorflow
ruby-modules
gem-config
tools
analysis
checkov
changie
language-servers
fortls
mysql-shell
kde
os-specific
darwin
rectangle
linux
mstflint_access
servers
home-assistant
custom-components
xiaomi_gateway3
xiaomi_miot
http
nginx
knxd
shells
hishtory
test
tools
admin
aliyun-cli
google-cloud-sdk
misc
diffoscope
graylog
mstflint
ttyplot
undocker
security
qdigidoc
web-eid-app
text
mdbook-open-on-gh
wayland
mpvpaper
ydotool
top-level
+1 -1
.github/workflows/backport.yml
···
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs
+1 -1
.github/workflows/basic-eval.yml
···
runs-on: ubuntu-latest
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
- uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with:
+1 -1
.github/workflows/check-by-name.yml
···
exit 1
fi
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: ${{ env.mergedSha }}
+1 -1
.github/workflows/check-cherry-picks.yml
···
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
fetch-depth: 0
filter: blob:none
+1 -1
.github/workflows/check-maintainers-sorted.yaml
···
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
+1 -1
.github/workflows/check-nix-format.yml
···
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
+1 -1
.github/workflows/editorconfig.yml
···
- name: print list of changed files
run: |
cat "$HOME/changed_files"
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
+1 -1
.github/workflows/manual-nixos.yml
···
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
+1 -1
.github/workflows/manual-nixpkgs.yml
···
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
+1 -1
.github/workflows/nix-parse.yml
···
if [[ -s "$HOME/changed_files" ]]; then
echo "CHANGED_FILES=$HOME/changed_files" > "$GITHUB_ENV"
fi
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
+1 -1
.github/workflows/periodic-merge-24h.yml
···
into: staging-23.11
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0
+1 -1
.github/workflows/periodic-merge-6h.yml
···
into: staging
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0
+1 -1
.github/workflows/update-terraform-providers.yml
···
if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master
runs-on: ubuntu-latest
steps:
-
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
+2
nixos/doc/manual/release-notes/rl-2405.section.md
···
- [isolate](https://github.com/ioi/isolate), a sandbox for securely executing untrusted programs. Available as [security.isolate](#opt-security.isolate.enable).
+
- [ydotool](https://github.com/ReimuNotMoe/ydotool), a generic command-line automation tool now has a module. Available as [programs.ydotool](#opt-programs.ydotool.enable).
+
- [private-gpt](https://github.com/zylon-ai/private-gpt), a service to interact with your documents using the power of LLMs, 100% privately, no data leaks. Available as [services.private-gpt](#opt-services.private-gpt.enable).
- [keto](https://www.ory.sh/keto/), a permission & access control server, the first open source implementation of ["Zanzibar: Google's Consistent, Global Authorization System"](https://research.google/pubs/zanzibar-googles-consistent-global-authorization-system/).
+1
nixos/modules/module-list.nix
···
./programs/xwayland.nix
./programs/yabar.nix
./programs/yazi.nix
+
./programs/ydotool.nix
./programs/yubikey-touch-detector.nix
./programs/zmap.nix
./programs/zsh/oh-my-zsh.nix
+83
nixos/modules/programs/ydotool.nix
···
+
{
+
config,
+
lib,
+
pkgs,
+
...
+
}:
+
let
+
cfg = config.programs.ydotool;
+
in
+
{
+
meta = {
+
maintainers = with lib.maintainers; [ quantenzitrone ];
+
};
+
+
options.programs.ydotool = {
+
enable = lib.mkEnableOption ''
+
ydotoold system service and install ydotool.
+
Add yourself to the 'ydotool' group to be able to use it.
+
'';
+
};
+
+
config = lib.mkIf cfg.enable {
+
users.groups.ydotool = { };
+
+
systemd.services.ydotoold = {
+
description = "ydotoold - backend for ydotool";
+
wantedBy = [ "multi-user.target" ];
+
partOf = [ "multi-user.target" ];
+
serviceConfig = {
+
Group = "ydotool";
+
RuntimeDirectory = "ydotoold";
+
RuntimeDirectoryMode = "0750";
+
ExecStart = "${lib.getExe' pkgs.ydotool "ydotoold"} --socket-path=/run/ydotoold/socket --socket-perm=0660";
+
+
# hardening
+
+
## allow access to uinput
+
DeviceAllow = [ "/dev/uinput" ];
+
DevicePolicy = "closed";
+
+
## allow creation of unix sockets
+
RestrictAddressFamilies = [ "AF_UNIX" ];
+
+
CapabilityBoundingSet = "";
+
IPAddressDeny = "any";
+
LockPersonality = true;
+
MemoryDenyWriteExecute = true;
+
NoNewPrivileges = true;
+
PrivateNetwork = true;
+
PrivateTmp = true;
+
PrivateUsers = true;
+
ProcSubset = "pid";
+
ProtectClock = true;
+
ProtectControlGroups = true;
+
ProtectHome = true;
+
ProtectHostname = true;
+
ProtectKernelLogs = true;
+
ProtectKernelModules = true;
+
ProtectKernelTunables = true;
+
ProtectProc = "invisible";
+
ProtectSystem = "strict";
+
ProtectUser = true;
+
RestrictNamespaces = true;
+
RestrictRealtime = true;
+
RestrictSUIDSGID = true;
+
SystemCallArchitectures = "native";
+
SystemCallFilter = [
+
"@system-service"
+
"~@privileged"
+
"~@resources"
+
];
+
UMask = "0077";
+
+
# -> systemd-analyze security score 0.7 SAFE 😀
+
};
+
};
+
+
environment.variables = {
+
YDOTOOL_SOCKET = "/run/ydotoold/socket";
+
};
+
environment.systemPackages = with pkgs; [ ydotool ];
+
};
+
}
+13 -1
nixos/modules/services/display-managers/greetd.nix
···
'';
};
+
greeterManagesPlymouth = mkOption {
+
type = types.bool;
+
internal = true;
+
default = false;
+
description = ''
+
Don't configure the greetd service to wait for Plymouth to exit.
+
+
Enable this if the greeter you're using can manage Plymouth itself to provide a smoother handoff.
+
'';
+
};
+
vt = mkOption {
type = types.int;
default = 1;
···
];
After = [
"systemd-user-sessions.service"
+
"getty@${tty}.service"
+
] ++ lib.optionals (!cfg.greeterManagesPlymouth) [
"plymouth-quit-wait.service"
-
"getty@${tty}.service"
];
Conflicts = [
"getty@${tty}.service"
-3
nixos/modules/services/x11/xserver.nix
···
rm -f /tmp/.X0-lock
'';
-
# TODO: move declaring the systemd service to its own mkIf
-
script = mkIf (config.systemd.services.display-manager.enable == true) "${config.services.displayManager.execCmd}";
-
# Stop restarting if the display manager stops (crashes) 2 times
# in one minute. Starting X typically takes 3-4s.
startLimitIntervalSec = 30;
+74 -33
nixos/modules/system/activation/switchable-system.nix
···
perlWrapped = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]);
+
description = extra: ''
+
Whether to include the capability to switch configurations.
+
+
Disabling this makes the system unable to be reconfigured via `nixos-rebuild`.
+
+
${extra}
+
'';
+
in
{
-
options = {
-
system.switch.enable = lib.mkOption {
+
options.system.switch = {
+
enable = lib.mkOption {
type = lib.types.bool;
default = true;
-
description = ''
-
Whether to include the capability to switch configurations.
-
-
Disabling this makes the system unable to be reconfigured via `nixos-rebuild`.
-
+
description = description ''
This is good for image based appliances where updates are handled
outside the image. Reducing features makes the image lighter and
slightly more secure.
'';
};
+
+
enableNg = lib.mkOption {
+
type = lib.types.bool;
+
default = false;
+
description = description ''
+
Whether to use `switch-to-configuration-ng`, an experimental
+
re-implementation of `switch-to-configuration` with the goal of
+
replacing the original.
+
'';
+
};
};
-
config = lib.mkIf config.system.switch.enable {
-
system.activatableSystemBuilderCommands = ''
-
mkdir $out/bin
-
substitute ${./switch-to-configuration.pl} $out/bin/switch-to-configuration \
-
--subst-var out \
-
--subst-var-by toplevel ''${!toplevelVar} \
-
--subst-var-by coreutils "${pkgs.coreutils}" \
-
--subst-var-by distroId ${lib.escapeShellArg config.system.nixos.distroId} \
-
--subst-var-by installBootLoader ${lib.escapeShellArg config.system.build.installBootLoader} \
-
--subst-var-by localeArchive "${config.i18n.glibcLocales}/lib/locale/locale-archive" \
-
--subst-var-by perl "${perlWrapped}" \
-
--subst-var-by shell "${pkgs.bash}/bin/sh" \
-
--subst-var-by su "${pkgs.shadow.su}/bin/su" \
-
--subst-var-by systemd "${config.systemd.package}" \
-
--subst-var-by utillinux "${pkgs.util-linux}" \
-
;
+
config = lib.mkMerge [
+
{
+
assertions = [{
+
assertion = with config.system.switch; enable -> !enableNg;
+
message = "Only one of system.switch.enable and system.switch.enableNg may be enabled at a time";
+
}];
+
}
+
(lib.mkIf config.system.switch.enable {
+
system.activatableSystemBuilderCommands = ''
+
mkdir $out/bin
+
substitute ${./switch-to-configuration.pl} $out/bin/switch-to-configuration \
+
--subst-var out \
+
--subst-var-by toplevel ''${!toplevelVar} \
+
--subst-var-by coreutils "${pkgs.coreutils}" \
+
--subst-var-by distroId ${lib.escapeShellArg config.system.nixos.distroId} \
+
--subst-var-by installBootLoader ${lib.escapeShellArg config.system.build.installBootLoader} \
+
--subst-var-by localeArchive "${config.i18n.glibcLocales}/lib/locale/locale-archive" \
+
--subst-var-by perl "${perlWrapped}" \
+
--subst-var-by shell "${pkgs.bash}/bin/sh" \
+
--subst-var-by su "${pkgs.shadow.su}/bin/su" \
+
--subst-var-by systemd "${config.systemd.package}" \
+
--subst-var-by utillinux "${pkgs.util-linux}" \
+
;
+
+
chmod +x $out/bin/switch-to-configuration
+
${lib.optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
+
if ! output=$(${perlWrapped}/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
+
echo "switch-to-configuration syntax is not valid:"
+
echo "$output"
+
exit 1
+
fi
+
''}
+
'';
+
})
+
(lib.mkIf config.system.switch.enableNg {
+
# Use a subshell so we can source makeWrapper's setup hook without
+
# affecting the rest of activatableSystemBuilderCommands.
+
system.activatableSystemBuilderCommands = ''
+
(
+
source ${pkgs.buildPackages.makeWrapper}/nix-support/setup-hook
-
chmod +x $out/bin/switch-to-configuration
-
${lib.optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
-
if ! output=$(${perlWrapped}/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
-
echo "switch-to-configuration syntax is not valid:"
-
echo "$output"
-
exit 1
-
fi
-
''}
-
'';
-
};
+
mkdir $out/bin
+
ln -sf ${lib.getExe pkgs.switch-to-configuration-ng} $out/bin/switch-to-configuration
+
wrapProgram $out/bin/switch-to-configuration \
+
--set OUT $out \
+
--set TOPLEVEL ''${!toplevelVar} \
+
--set DISTRO_ID ${lib.escapeShellArg config.system.nixos.distroId} \
+
--set INSTALL_BOOTLOADER ${lib.escapeShellArg config.system.build.installBootLoader} \
+
--set LOCALE_ARCHIVE ${config.i18n.glibcLocales}/lib/locale/locale-archive \
+
--set SYSTEMD ${config.systemd.package}
+
)
+
'';
+
})
+
];
}
+3 -1
nixos/tests/all-tests.nix
···
swap-random-encryption = handleTest ./swap-random-encryption.nix {};
sway = handleTest ./sway.nix {};
swayfx = handleTest ./swayfx.nix {};
-
switchTest = handleTest ./switch-test.nix {};
+
switchTest = handleTest ./switch-test.nix { ng = false; };
+
switchTestNg = handleTest ./switch-test.nix { ng = true; };
sympa = handleTest ./sympa.nix {};
syncthing = handleTest ./syncthing.nix {};
syncthing-no-settings = handleTest ./syncthing-no-settings.nix {};
···
xterm = handleTest ./xterm.nix {};
xxh = handleTest ./xxh.nix {};
yabar = handleTest ./yabar.nix {};
+
ydotool = handleTest ./ydotool.nix {};
yggdrasil = handleTest ./yggdrasil.nix {};
zammad = handleTest ./zammad.nix {};
zeronet-conservancy = handleTest ./zeronet-conservancy.nix {};
+3
nixos/tests/fcitx5/default.nix
···
machine.succeed("xauth merge ${xauth}")
machine.sleep(5)
+
machine.wait_until_succeeds("pgrep fcitx5")
machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
machine.sleep(1)
machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
+
machine.wait_for_window("alice@machine")
+
machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
machine.sleep(10)
+6 -1
nixos/tests/switch-test.nix
···
# Test configuration switching.
-
import ./make-test-python.nix ({ lib, pkgs, ...} : let
+
import ./make-test-python.nix ({ lib, pkgs, ng, ...} : let
# Simple service that can either be socket-activated or that will
# listen on port 1234 if not socket-activated.
···
nodes = {
machine = { pkgs, lib, ... }: {
+
system.switch = {
+
enable = !ng;
+
enableNg = ng;
+
};
+
environment.systemPackages = [ pkgs.socat ]; # for the socket activation stuff
users.mutableUsers = false;
+115
nixos/tests/ydotool.nix
···
+
import ./make-test-python.nix (
+
{ pkgs, lib, ... }:
+
let
+
textInput = "This works.";
+
inputBoxText = "Enter input";
+
inputBox = pkgs.writeShellScript "zenity-input" ''
+
${lib.getExe pkgs.gnome.zenity} --entry --text '${inputBoxText}:' > /tmp/output &
+
'';
+
in
+
{
+
name = "ydotool";
+
+
meta = {
+
maintainers = with lib.maintainers; [
+
OPNA2608
+
quantenzitrone
+
];
+
};
+
+
nodes = {
+
headless =
+
{ config, ... }:
+
{
+
imports = [ ./common/user-account.nix ];
+
+
users.users.alice.extraGroups = [ "ydotool" ];
+
+
programs.ydotool.enable = true;
+
+
services.getty.autologinUser = "alice";
+
};
+
+
x11 =
+
{ config, ... }:
+
{
+
imports = [
+
./common/user-account.nix
+
./common/auto.nix
+
./common/x11.nix
+
];
+
+
users.users.alice.extraGroups = [ "ydotool" ];
+
+
programs.ydotool.enable = true;
+
+
test-support.displayManager.auto = {
+
enable = true;
+
user = "alice";
+
};
+
+
services.xserver.windowManager.dwm.enable = true;
+
services.displayManager.defaultSession = lib.mkForce "none+dwm";
+
};
+
+
wayland =
+
{ config, ... }:
+
{
+
imports = [ ./common/user-account.nix ];
+
+
services.cage = {
+
enable = true;
+
user = "alice";
+
};
+
+
programs.ydotool.enable = true;
+
+
services.cage.program = inputBox;
+
};
+
};
+
+
enableOCR = true;
+
+
testScript =
+
{ nodes, ... }:
+
''
+
def as_user(cmd: str):
+
"""
+
Return a shell command for running a shell command as a specific user.
+
"""
+
return f"sudo -u alice -i {cmd}"
+
+
start_all()
+
+
# Headless
+
headless.wait_for_unit("multi-user.target")
+
headless.wait_for_text("alice")
+
headless.succeed(as_user("ydotool type 'echo ${textInput} > /tmp/output'")) # text input
+
headless.succeed(as_user("ydotool key 28:1 28:0")) # text input
+
headless.screenshot("headless_input")
+
headless.wait_for_file("/tmp/output")
+
headless.wait_until_succeeds("grep '${textInput}' /tmp/output") # text input
+
+
# X11
+
x11.wait_for_x()
+
x11.execute(as_user("${inputBox}"))
+
x11.wait_for_text("${inputBoxText}")
+
x11.succeed(as_user("ydotool type '${textInput}'")) # text input
+
x11.screenshot("x11_input")
+
x11.succeed(as_user("ydotool mousemove -a 400 110")) # mouse input
+
x11.succeed(as_user("ydotool click 0xC0")) # mouse input
+
x11.wait_for_file("/tmp/output")
+
x11.wait_until_succeeds("grep '${textInput}' /tmp/output") # text input
+
+
# Wayland
+
wayland.wait_for_unit("graphical.target")
+
wayland.wait_for_text("${inputBoxText}")
+
wayland.succeed("ydotool type '${textInput}'") # text input
+
wayland.screenshot("wayland_input")
+
wayland.succeed("ydotool mousemove -a 100 100") # mouse input
+
wayland.succeed("ydotool click 0xC0") # mouse input
+
wayland.wait_for_file("/tmp/output")
+
wayland.wait_until_succeeds("grep '${textInput}' /tmp/output") # text input
+
'';
+
}
+
)
+3 -3
pkgs/applications/audio/go-musicfox/default.nix
···
buildGoModule rec {
pname = "go-musicfox";
-
version = "4.3.3";
+
version = "4.4.0";
src = fetchFromGitHub {
owner = "go-musicfox";
repo = "go-musicfox";
rev = "v${version}";
-
hash = "sha256-J6R3T92cHFUkKwc+GKm612tVjglP2Tc/kDUmzUMhvio=";
+
hash = "sha256-11N3dykuEDKAryieNVaPfb3G7E/a4A7Znx9rBvFTHC4=";
};
deleteVendor = true;
-
vendorHash = "sha256-KSIdBEEvYaYcDIDmzfRO857I8FSN4Ajw6phAPQLYEqg=";
+
vendorHash = "sha256-ey78zeCSEuRgteG5ZRb4uO88E6lwEgqSxKfjJg3NGT4=";
subPackages = [ "cmd/musicfox.go" ];
+3 -3
pkgs/applications/audio/squeezelite/default.nix
···
pname = binName;
# versions are specified in `squeezelite.h`
# see https://github.com/ralph-irving/squeezelite/issues/29
-
version = "2.0.0.1486";
+
version = "2.0.0.1488";
src = fetchFromGitHub {
owner = "ralph-irving";
repo = "squeezelite";
-
rev = "fd4a82e7d0e53124d9618320f3c115d90654509d";
-
hash = "sha256-nR2Px7VYjAktUsueEyBAV2392+/dX6JYIy7YSMh05c0=";
+
rev = "0e85ddfd79337cdc30b7d29922b1d790600bb6b4";
+
hash = "sha256-FGqo/c74JN000w/iRnvYUejqnYGDzHNZu9pEmR7yR3s=";
};
buildInputs = [ flac libmad libvorbis mpg123 ]
-63
pkgs/applications/graphics/cq-editor/default.nix
···
-
{ lib
-
, mkDerivationWith
-
, python3Packages
-
, fetchFromGitHub
-
, wrapQtAppsHook
-
}:
-
-
mkDerivationWith python3Packages.buildPythonApplication rec {
-
pname = "cq-editor";
-
version = "0.1.1";
-
-
src = fetchFromGitHub {
-
owner = "CadQuery";
-
repo = "CQ-editor";
-
rev = version;
-
sha256 = "1970izjaa60r5cg9i35rzz9lk5c5d8q1vw1rh2skvfbf63z1hnzv";
-
};
-
-
patches = [
-
./spyder4.patch
-
];
-
-
propagatedBuildInputs = with python3Packages; [
-
cadquery
-
logbook
-
pyqt5
-
pyparsing
-
pyqtgraph
-
spyder
-
path
-
qtconsole
-
requests
-
];
-
-
nativeBuildInputs = [ wrapQtAppsHook ];
-
preFixup = ''
-
makeWrapperArgs+=("''${qtWrapperArgs[@]}")
-
'';
-
-
nativeCheckInputs = with python3Packages; [
-
pytest
-
pytest-xvfb
-
pytest-mock
-
pytest-cov
-
pytest-repeat
-
pytest-qt
-
];
-
-
checkPhase = ''
-
pytest --no-xvfb
-
'';
-
-
# requires X server
-
doCheck = false;
-
-
meta = with lib; {
-
description = "CadQuery GUI editor based on PyQT";
-
homepage = "https://github.com/CadQuery/CQ-editor";
-
license = licenses.asl20;
-
maintainers = with maintainers; [ costrouc marcus7070 ];
-
};
-
-
}
+5 -5
pkgs/applications/networking/cluster/krane/Gemfile.lock
···
addressable (2.8.6)
public_suffix (>= 2.0.2, < 6.0)
base64 (0.2.0)
-
bigdecimal (3.1.7)
+
bigdecimal (3.1.8)
colorize (0.8.1)
concurrent-ruby (1.2.3)
connection_pool (2.4.1)
···
http-cookie (1.0.5)
domain_name (~> 0.5)
http-form_data (2.3.0)
-
i18n (1.14.4)
+
i18n (1.14.5)
concurrent-ruby (~> 1.0)
jsonpath (1.1.5)
multi_json
jwt (2.8.1)
base64
-
krane (3.5.2)
+
krane (3.5.3)
activesupport (>= 5.0)
colorize (~> 0.8)
concurrent-ruby (~> 1.1)
···
rake (~> 13.0)
mime-types (3.5.2)
mime-types-data (~> 3.2015)
-
mime-types-data (3.2024.0305)
+
mime-types-data (3.2024.0507)
minitest (5.22.3)
multi_json (1.15.0)
mutex_m (0.2.0)
···
krane
BUNDLED WITH
-
2.5.7
+
2.5.9
+8 -8
pkgs/applications/networking/cluster/krane/gemset.nix
···
platforms = [];
source = {
remotes = ["https://rubygems.org"];
-
sha256 = "0cq1c29zbkcxgdihqisirhcw76xc768z2zpd5vbccpq0l1lv76g7";
+
sha256 = "1gi7zqgmqwi5lizggs1jhc3zlwaqayy9rx2ah80sxy24bbnng558";
type = "gem";
};
-
version = "3.1.7";
+
version = "3.1.8";
};
colorize = {
groups = ["default"];
···
platforms = [];
source = {
remotes = ["https://rubygems.org"];
-
sha256 = "0lbm33fpb3w06wd2231sg58dwlwgjsvym93m548ajvl6s3mfvpn7";
+
sha256 = "1ffix518y7976qih9k1lgnc17i3v6yrlh0a3mckpxdb4wc2vrp16";
type = "gem";
};
-
version = "1.14.4";
+
version = "1.14.5";
};
jsonpath = {
dependencies = ["multi_json"];
···
platforms = [];
source = {
remotes = ["https://rubygems.org"];
-
sha256 = "1s2xc5igk3yg3jpl3abakvrsf4xl6hljhgyddjsrp2g05sksa9x6";
+
sha256 = "11jj5fpfsb1sfwgc356xv1vxl8yq65wbbshmawpagk555y0pzbpb";
type = "gem";
};
-
version = "3.5.2";
+
version = "3.5.3";
};
kubeclient = {
dependencies = ["http" "jsonpath" "recursive-open-struct" "rest-client"];
···
platforms = [];
source = {
remotes = ["https://rubygems.org"];
-
sha256 = "00x7w5xqsj9m33v3vkmy23wipkkysafksib53ypzn27p5g81w455";
+
sha256 = "0kybw1a6f7d1ipyawnpi5cwiy05rkz9qwglgfvhmd1z0l2gcigmm";
type = "gem";
};
-
version = "3.2024.0305";
+
version = "3.2024.0507";
};
minitest = {
groups = ["default"];
+11
pkgs/applications/office/gnucash/0005-disable-test-lots.patch
···
+
--- a/libgnucash/engine/test/CMakeLists.txt 2024-02-23 09:05:19.000000000 +0900
+
+++ b/libgnucash/engine/test/CMakeLists.txt 2024-05-08 22:08:04.572060359 +0900
+
@@ -57,7 +57,6 @@
+
+
add_engine_test(test-account-object test-account-object.cpp)
+
add_engine_test(test-group-vs-book test-group-vs-book.cpp)
+
-add_engine_test(test-lots test-lots.cpp)
+
add_engine_test(test-querynew test-querynew.c)
+
add_engine_test(test-query test-query.cpp)
+
add_engine_test(test-split-vs-account test-split-vs-account.cpp)
+
+3
pkgs/applications/office/gnucash/default.nix
···
./0003-remove-valgrind.patch
# this patch makes gnucash exec the Finance::Quote wrapper directly
./0004-exec-fq-wrapper.patch
+
# this patch disables a flaky test
+
# see https://bugs.gnucash.org/show_bug.cgi?id=799289
+
./0005-disable-test-lots.patch
# Fix importing QIF by backporting a fix. remove on next release
# https://bugs.gnucash.org/show_bug.cgi?id=799262
(fetchpatch {
+4
pkgs/applications/office/planify/default.nix
···
, evolution-data-server
, glib
, glib-networking
+
, gst_all_1
, gtk4
, gtksourceview5
, gxml
···
evolution-data-server
glib
glib-networking
+
# Needed for GtkMediaStream creation with success.ogg, see #311295.
+
gst_all_1.gst-plugins-base
+
gst_all_1.gst-plugins-good
gtk4
gtksourceview5
gxml
+3 -3
pkgs/applications/radio/chirp/default.nix
···
python3.pkgs.buildPythonApplication rec {
pname = "chirp";
-
version = "0.4.0-unstable-2024-05-03";
+
version = "0.4.0-unstable-2024-05-10";
src = fetchFromGitHub {
owner = "kk7ds";
repo = "chirp";
-
rev = "e95140ff433b805ca16df04cba501b7332a9ec95";
-
hash = "sha256-UU3Ve6Yb7UK2nOLTfJasrlMX4iu1cpLBLScvhRhTUJ0=";
+
rev = "d5dc5c8e053dbcf87c8b0ccf03109c0870c22bfb";
+
hash = "sha256-Tqq1dTjtzHTgaHUAio5B8V4Bo+P8EPa3s/kG181TrCc=";
};
buildInputs = [
glib
+2 -2
pkgs/applications/radio/cloudlog/default.nix
···
stdenvNoCC.mkDerivation rec {
pname = "cloudlog";
-
version = "2.6.10";
+
version = "2.6.11";
src = fetchFromGitHub {
owner = "magicbug";
repo = "Cloudlog";
rev = version;
-
hash = "sha256-VPiv/Df4dasiYUBA6UHrxBczmGAqzhQkPrknzSYJxAE=";
+
hash = "sha256-9JGhMCypoKOh1Gy1DdK7nrma+L4sJsYN/iuNhBPmHPA=";
};
postPatch = ''
+3 -3
pkgs/applications/science/math/eigenmath/default.nix
···
stdenv.mkDerivation rec {
pname = "eigenmath";
-
version = "0-unstable-2024-05-03";
+
version = "0-unstable-2024-05-12";
src = fetchFromGitHub {
owner = "georgeweigt";
repo = pname;
-
rev = "69ebfbaa7f328ddc9d97b7c404369818a5febe4a";
-
hash = "sha256-lHxVyUXIY9+YIStA6202Bhy/b0xaxJbW/VPI7FbNJk0=";
+
rev = "978b3bd582a90c8e82079f2e4e4a3a5038cbbe08";
+
hash = "sha256-DoGX8nUcWcioTq8ymB+HLsCnt9V6HTKSX4Zs2CQz78Q=";
};
checkPhase = let emulator = stdenv.hostPlatform.emulator buildPackages; in ''
+2 -2
pkgs/applications/version-management/git-cola/default.nix
···
python3Packages.buildPythonApplication rec {
pname = "git-cola";
-
version = "4.7.0";
+
version = "4.7.1";
pyproject = true;
src = fetchFromGitHub {
owner = "git-cola";
repo = "git-cola";
rev = "v${version}";
-
hash = "sha256-BiSs3vWJacCtGthHi/nFJm4Hqt0uO6XXZi/Zqvjb928=";
+
hash = "sha256-93aayGGMgkSghTpx8M5Cfbxf2szAwrSzuoWK6GCTqZ8=";
};
buildInputs = lib.optionals stdenv.isLinux [
+2 -2
pkgs/applications/video/kodi/addons/visualization-projectm/default.nix
···
buildKodiBinaryAddon rec {
pname = "visualization-projectm";
namespace = "visualization.projectm";
-
version = "20.2.0";
+
version = "21.0.1";
src = fetchFromGitHub {
owner = "xbmc";
repo = namespace;
rev = "${version}-${rel}";
-
hash = "sha256-Kcl1ep+RJlofFmxkrGT3T+XXdwiCofq1hggwU0PAd0E=";
+
hash = "sha256-wjSQmOtQb4KjY3iH3Xh7AdQwE6ked+cpW6/gdNYS+NA=";
};
extraBuildInputs = [ pkg-config libGL projectm ];
+8
pkgs/build-support/php/builders/v1/build-composer-project.nix
···
{
callPackage,
+
nix-update-script,
stdenvNoCC,
lib,
php,
···
composerNoScripts = previousAttrs.composerNoScripts or true;
composerStrictValidation = previousAttrs.composerStrictValidation or true;
});
+
+
# Projects providing a lockfile from upstream can be automatically updated.
+
passthru = previousAttrs.passthru or { } // {
+
updateScript =
+
previousAttrs.passthru.updateScript
+
or (if finalAttrs.composerRepository.composerLock == null then nix-update-script { } else null);
+
};
env = {
COMPOSER_CACHE_DIR = "/dev/null";
+6
pkgs/by-name/ad/adafruit-nrfutil/package.nix
···
patches = [
# Pull a patch which fixes the tests, but is not yet released in a new version:
# https://github.com/adafruit/Adafruit_nRF52_nrfutil/pull/38
+
# https://github.com/adafruit/Adafruit_nRF52_nrfutil/pull/42
(fetchpatch {
name = "fix-tests.patch";
url = "https://github.com/adafruit/Adafruit_nRF52_nrfutil/commit/e5fbcc8ee5958041db38c04139ba686bf7d1b845.patch";
sha256 = "sha256-0tbJldGtYcDdUzA3wZRv0lenXVn6dqV016U9nMpQ6/w=";
+
})
+
(fetchpatch {
+
name = "fix-test-test_get_vk_pem.patch";
+
url = "https://github.com/adafruit/Adafruit_nRF52_nrfutil/commit/f42cee3c2d7c8d0911f27ba24d6a140083cb85cf.patch";
+
sha256 = "sha256-7WoRqPKc8O5EYK7Fj1WrMJREwhueiVpkEizIfVnEPBU=";
})
];
+2 -2
pkgs/by-name/ad/adwsteamgtk/package.nix
···
python3Packages.buildPythonApplication rec {
pname = "adwsteamgtk";
-
version = "0.6.10";
+
version = "0.6.11";
# built with meson, not a python format
format = "other";
···
owner = "Foldex";
repo = "AdwSteamGtk";
rev = "refs/tags/v${version}";
-
hash = "sha256-sh4FLXG78i20Bt8pCCbhO6Sx815stjAZRLCD+X5Zk40=";
+
hash = "sha256-f7+2gTpG5Ntgq+U2AkQihzSTrO+oMsLWxgxe4dVyz8A=";
};
buildInputs = [
+2 -10
pkgs/by-name/fr/freefilesync/package.nix
···
stdenv.mkDerivation (finalAttrs: {
pname = "freefilesync";
-
version = "13.5";
+
version = "13.6";
src = fetchurl {
url = "https://freefilesync.org/download/FreeFileSync_${finalAttrs.version}_Source.zip";
···
rm -f $out
tryDownload "$url"
'';
-
hash = "sha256-8At8QobAQR2mQnFjFSPTkEuxmP9M8gINP0qH28J3ynY=";
+
hash = "sha256-lJ4LiisUy8w6OPd44wJufH+ol1YwjOfFQfqzj3HWb2w=";
};
sourceRoot = ".";
···
debianRevision = "1";
patch = "Disable_wxWidgets_uncaught_exception_handling.patch";
hash = "sha256-Fem7eDDKSqPFU/t12Jco8OmYC8FM9JgB4/QVy/ouvbI=";
-
})
-
# Disable update patch
-
(fetchDebianPatch {
-
pname = "freefilesync";
-
version = "13.3";
-
debianRevision = "1";
-
patch = "ffs_no_check_updates.patch";
-
hash = "sha256-lPyHpxhZz8BSnDI8QfAzKpKwVkp2jiF49RWjKNuZGII=";
})
];
+2 -2
pkgs/by-name/gm/gmetronome/package.nix
···
stdenv.mkDerivation rec {
pname = "gmetronome";
-
version = "0.3.3";
+
version = "0.3.4";
src = fetchFromGitLab {
domain = "gitlab.gnome.org";
owner = "dqpb";
repo = "gmetronome";
rev = version;
-
hash = "sha256-ilFO1HwleWIQ51Bkzck1sm1Yu3ugqkvZrpxPOYzXydM=";
+
hash = "sha256-fjtdM2/LMZx0dFt78Ih4BG5+NZoocPknh5INA+2e5qk=";
};
nativeBuildInputs = [
+3 -3
pkgs/by-name/li/libui-ng/package.nix
···
stdenv.mkDerivation rec {
pname = "libui-ng";
-
version = "4.1-unstable-2024-02-05";
+
version = "4.1-unstable-2024-05-03";
src = fetchFromGitHub {
owner = "libui-ng";
repo = "libui-ng";
-
rev = "4d46de31eafad84c88b939356bcd64e6c5ee3821";
-
hash = "sha256-Yb8VdJe75uBzRnsfTOVxUXstZmu6dJ9nBuOrf86KO5s=";
+
rev = "56f1ad65f0f32bb1eb67a268cca4658fbe4567c1";
+
hash = "sha256-wo4iS/a1ErdipFDPYKvaGpO/JGtk6eU/qMLC4eUoHnA=";
};
postPatch = lib.optionalString (stdenv.isDarwin && stdenv.isx86_64) ''
+39
pkgs/by-name/ma/maxfetch/package.nix
···
+
{ lib
+
, stdenvNoCC
+
, fetchFromGitHub
+
, makeBinaryWrapper
+
, gnused
+
, ncurses
+
, procps
+
}:
+
+
stdenvNoCC.mkDerivation {
+
pname = "maxfetch";
+
version = "unstable-2023-07-31";
+
+
src = fetchFromGitHub {
+
owner = "jobcmax";
+
repo = "maxfetch";
+
rev = "17baa4047073e20572403b70703c69696af6b68d";
+
hash = "sha256-LzOhrFFjGs9GIDjk1lUFKhlnzJuEUrKjBcv1eT3kaY8=";
+
};
+
+
nativeBuildInputs = [ makeBinaryWrapper ];
+
+
installPhase = ''
+
runHook preInstall
+
install -Dm755 maxfetch $out/bin/maxfetch
+
wrapProgram $out/bin/maxfetch \
+
--prefix PATH : ${lib.makeBinPath [ gnused ncurses procps ]}
+
runHook postInstall
+
'';
+
+
meta = with lib; {
+
description = "Nice fetching program written in sh";
+
homepage = "https://github.com/jobcmax/maxfetch";
+
license = licenses.gpl2Plus;
+
mainProgram = "maxfetch";
+
maintainers = with maintainers; [ jtbx ];
+
platforms = platforms.unix;
+
};
+
}
+2 -2
pkgs/by-name/me/megapixels/package.nix
···
in
stdenv.mkDerivation (finalAttrs: {
pname = "megapixels";
-
version = "1.8.1";
+
version = "1.8.2";
src = fetchFromGitLab {
owner = "megapixels-org";
repo = "Megapixels";
rev = finalAttrs.version;
-
hash = "sha256-TXiPJbd4TPpsEvmD97F7xkm4rS1g+ViTVTNlxeXrQaw=";
+
hash = "sha256-odsOYrk//ZhodsumLpJjhPDcwF1gkE/no166u+IDxSY=";
};
nativeBuildInputs = [
+2 -3
pkgs/by-name/nr/nrfconnect/package.nix
···
let
pname = "nrfconnect";
-
version = "4.3.0";
+
version = "4.4.1";
src = fetchurl {
url = "https://nsscprodmedia.blob.core.windows.net/prod/software-and-other-downloads/desktop-software/nrf-connect-for-desktop/${lib.versions.major version}-${lib.versions.minor version}-${lib.versions.patch version}/nrfconnect-${version}-x86_64.appimage";
-
hash = "sha256-G8//dZqPxn6mR8Bjzf/bAn9Gv7t2AFWIF9twCGbqMd8=";
+
hash = "sha256-x/vVSOEajuQtLATRXk8DVLlXHegCqP+acecaOFNeBb8=";
name = "${pname}-${version}.AppImage";
};
···
];
extraInstallCommands = ''
-
mv $out/bin/nrfconnect-* $out/bin/nrfconnect
install -Dm444 ${appimageContents}/nrfconnect.desktop -t $out/share/applications
install -Dm444 ${appimageContents}/usr/share/icons/hicolor/512x512/apps/nrfconnect.png \
-t $out/share/icons/hicolor/512x512/apps
+3 -3
pkgs/by-name/re/renode-dts2repl/package.nix
···
python3.pkgs.buildPythonApplication {
pname = "renode-dts2repl";
-
version = "0-unstable-2024-04-30";
+
version = "0-unstable-2024-05-09";
pyproject = true;
src = fetchFromGitHub {
owner = "antmicro";
repo = "dts2repl";
-
rev = "dc2212318aec6c2aa6920795e16fab7dfcb8796e";
-
hash = "sha256-XY2rQqu2CSKRImx0GmC5ggTZTyvrtX+LvqkYj0sEBuU=";
+
rev = "b95c930c2122e227bbacee42f35933a4c2d40771";
+
hash = "sha256-Sax+ckln+R6ll/UPztESJEjO8dtq8THmi309CaFTv0I=";
};
nativeBuildInputs = [
+2 -2
pkgs/by-name/sp/spicetify-cli/package.nix
···
buildGoModule rec {
pname = "spicetify-cli";
-
version = "2.36.10";
+
version = "2.36.11";
src = fetchFromGitHub {
owner = "spicetify";
repo = "spicetify-cli";
rev = "v${version}";
-
hash = "sha256-lwbd5sXqzC3H2GwmVqxAdt6Qcic00wh39l5Kp1UIYAs=";
+
hash = "sha256-ZqWGKuYDxuKVqz6cNxZ3cTcKTxkxuu42b48hlAialKc=";
};
vendorHash = "sha256-UPrLXzAdvCOmLm1tekzKyulQ4+2BSyPUF1k66GwKS88=";
+1
pkgs/by-name/sw/switch-to-configuration-ng/.gitignore
···
+
/target
+527
pkgs/by-name/sw/switch-to-configuration-ng/Cargo.lock
···
+
# This file is automatically @generated by Cargo.
+
# It is not intended for manual editing.
+
version = 3
+
+
[[package]]
+
name = "aho-corasick"
+
version = "1.1.3"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+
dependencies = [
+
"memchr",
+
]
+
+
[[package]]
+
name = "ansi_term"
+
version = "0.12.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+
dependencies = [
+
"winapi",
+
]
+
+
[[package]]
+
name = "anyhow"
+
version = "1.0.82"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
+
+
[[package]]
+
name = "atty"
+
version = "0.2.14"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+
dependencies = [
+
"hermit-abi",
+
"libc",
+
"winapi",
+
]
+
+
[[package]]
+
name = "bitflags"
+
version = "1.3.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+
[[package]]
+
name = "bitflags"
+
version = "2.5.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
+
+
[[package]]
+
name = "cfg-if"
+
version = "1.0.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+
[[package]]
+
name = "cfg_aliases"
+
version = "0.1.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
+
+
[[package]]
+
name = "clap"
+
version = "2.34.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+
dependencies = [
+
"ansi_term",
+
"atty",
+
"bitflags 1.3.2",
+
"strsim",
+
"textwrap",
+
"unicode-width",
+
"vec_map",
+
]
+
+
[[package]]
+
name = "const-random"
+
version = "0.1.18"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
+
dependencies = [
+
"const-random-macro",
+
]
+
+
[[package]]
+
name = "const-random-macro"
+
version = "0.1.16"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
+
dependencies = [
+
"getrandom",
+
"once_cell",
+
"tiny-keccak",
+
]
+
+
[[package]]
+
name = "crunchy"
+
version = "0.2.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
+
[[package]]
+
name = "dbus"
+
version = "0.9.7"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "1bb21987b9fb1613058ba3843121dd18b163b254d8a6e797e144cbac14d96d1b"
+
dependencies = [
+
"libc",
+
"libdbus-sys",
+
"winapi",
+
]
+
+
[[package]]
+
name = "dbus-codegen"
+
version = "0.11.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "bcd91775d91fc83c7d526aa7c08078bac0b30f382706689901ac819fe6ddc812"
+
dependencies = [
+
"clap",
+
"dbus",
+
"xml-rs",
+
]
+
+
[[package]]
+
name = "deranged"
+
version = "0.3.11"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
+
dependencies = [
+
"powerfmt",
+
]
+
+
[[package]]
+
name = "dlv-list"
+
version = "0.5.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f"
+
dependencies = [
+
"const-random",
+
]
+
+
[[package]]
+
name = "error-chain"
+
version = "0.12.4"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc"
+
dependencies = [
+
"version_check",
+
]
+
+
[[package]]
+
name = "getrandom"
+
version = "0.2.14"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
+
dependencies = [
+
"cfg-if",
+
"libc",
+
"wasi",
+
]
+
+
[[package]]
+
name = "glob"
+
version = "0.3.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
+
+
[[package]]
+
name = "hashbrown"
+
version = "0.14.5"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
+
+
[[package]]
+
name = "hermit-abi"
+
version = "0.1.19"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+
dependencies = [
+
"libc",
+
]
+
+
[[package]]
+
name = "hostname"
+
version = "0.3.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
+
dependencies = [
+
"libc",
+
"match_cfg",
+
"winapi",
+
]
+
+
[[package]]
+
name = "itoa"
+
version = "1.0.11"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+
+
[[package]]
+
name = "libc"
+
version = "0.2.154"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
+
+
[[package]]
+
name = "libdbus-sys"
+
version = "0.2.5"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "06085512b750d640299b79be4bad3d2fa90a9c00b1fd9e1b46364f66f0485c72"
+
dependencies = [
+
"pkg-config",
+
]
+
+
[[package]]
+
name = "log"
+
version = "0.4.21"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
+
+
[[package]]
+
name = "match_cfg"
+
version = "0.1.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
+
+
[[package]]
+
name = "memchr"
+
version = "2.7.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
+
+
[[package]]
+
name = "nix"
+
version = "0.28.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
+
dependencies = [
+
"bitflags 2.5.0",
+
"cfg-if",
+
"cfg_aliases",
+
"libc",
+
]
+
+
[[package]]
+
name = "num-conv"
+
version = "0.1.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+
+
[[package]]
+
name = "num_threads"
+
version = "0.1.7"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9"
+
dependencies = [
+
"libc",
+
]
+
+
[[package]]
+
name = "once_cell"
+
version = "1.19.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+
+
[[package]]
+
name = "ordered-multimap"
+
version = "0.7.3"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79"
+
dependencies = [
+
"dlv-list",
+
"hashbrown",
+
]
+
+
[[package]]
+
name = "pkg-config"
+
version = "0.3.30"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
+
+
[[package]]
+
name = "powerfmt"
+
version = "0.2.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+
+
[[package]]
+
name = "proc-macro2"
+
version = "1.0.81"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba"
+
dependencies = [
+
"unicode-ident",
+
]
+
+
[[package]]
+
name = "quote"
+
version = "1.0.36"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
+
dependencies = [
+
"proc-macro2",
+
]
+
+
[[package]]
+
name = "regex"
+
version = "1.10.4"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
+
dependencies = [
+
"aho-corasick",
+
"memchr",
+
"regex-automata",
+
"regex-syntax",
+
]
+
+
[[package]]
+
name = "regex-automata"
+
version = "0.4.6"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
+
dependencies = [
+
"aho-corasick",
+
"memchr",
+
"regex-syntax",
+
]
+
+
[[package]]
+
name = "regex-syntax"
+
version = "0.8.3"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
+
+
[[package]]
+
name = "rust-ini"
+
version = "0.21.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "0d625ed57d8f49af6cfa514c42e1a71fadcff60eb0b1c517ff82fe41aa025b41"
+
dependencies = [
+
"cfg-if",
+
"ordered-multimap",
+
"trim-in-place",
+
]
+
+
[[package]]
+
name = "serde"
+
version = "1.0.200"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f"
+
dependencies = [
+
"serde_derive",
+
]
+
+
[[package]]
+
name = "serde_derive"
+
version = "1.0.200"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb"
+
dependencies = [
+
"proc-macro2",
+
"quote",
+
"syn",
+
]
+
+
[[package]]
+
name = "strsim"
+
version = "0.8.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
+
+
[[package]]
+
name = "switch-to-configuration"
+
version = "0.1.0"
+
dependencies = [
+
"anyhow",
+
"dbus",
+
"dbus-codegen",
+
"glob",
+
"log",
+
"nix",
+
"regex",
+
"rust-ini",
+
"syslog",
+
]
+
+
[[package]]
+
name = "syn"
+
version = "2.0.60"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3"
+
dependencies = [
+
"proc-macro2",
+
"quote",
+
"unicode-ident",
+
]
+
+
[[package]]
+
name = "syslog"
+
version = "6.1.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "dfc7e95b5b795122fafe6519e27629b5ab4232c73ebb2428f568e82b1a457ad3"
+
dependencies = [
+
"error-chain",
+
"hostname",
+
"libc",
+
"log",
+
"time",
+
]
+
+
[[package]]
+
name = "textwrap"
+
version = "0.11.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+
dependencies = [
+
"unicode-width",
+
]
+
+
[[package]]
+
name = "time"
+
version = "0.3.36"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
+
dependencies = [
+
"deranged",
+
"itoa",
+
"libc",
+
"num-conv",
+
"num_threads",
+
"powerfmt",
+
"serde",
+
"time-core",
+
"time-macros",
+
]
+
+
[[package]]
+
name = "time-core"
+
version = "0.1.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
+
+
[[package]]
+
name = "time-macros"
+
version = "0.2.18"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
+
dependencies = [
+
"num-conv",
+
"time-core",
+
]
+
+
[[package]]
+
name = "tiny-keccak"
+
version = "2.0.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
+
dependencies = [
+
"crunchy",
+
]
+
+
[[package]]
+
name = "trim-in-place"
+
version = "0.1.7"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc"
+
+
[[package]]
+
name = "unicode-ident"
+
version = "1.0.12"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+
[[package]]
+
name = "unicode-width"
+
version = "0.1.12"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6"
+
+
[[package]]
+
name = "vec_map"
+
version = "0.8.2"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+
+
[[package]]
+
name = "version_check"
+
version = "0.9.4"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+
[[package]]
+
name = "wasi"
+
version = "0.11.0+wasi-snapshot-preview1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+
[[package]]
+
name = "winapi"
+
version = "0.3.9"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+
dependencies = [
+
"winapi-i686-pc-windows-gnu",
+
"winapi-x86_64-pc-windows-gnu",
+
]
+
+
[[package]]
+
name = "winapi-i686-pc-windows-gnu"
+
version = "0.4.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+
[[package]]
+
name = "winapi-x86_64-pc-windows-gnu"
+
version = "0.4.0"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+
[[package]]
+
name = "xml-rs"
+
version = "0.8.20"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193"
+19
pkgs/by-name/sw/switch-to-configuration-ng/Cargo.toml
···
+
[package]
+
name = "switch-to-configuration"
+
version = "0.1.0"
+
edition = "2021"
+
+
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+
[dependencies]
+
anyhow = "1.0.82"
+
dbus = "0.9.7"
+
glob = "0.3.1"
+
log = "0.4.21"
+
nix = { version = "0.28.0", features = ["fs", "signal"] }
+
regex = "1.10.4"
+
rust-ini = "0.21.0"
+
syslog = "6.1.1"
+
+
[build-dependencies]
+
dbus-codegen = "0.11.0"
+30
pkgs/by-name/sw/switch-to-configuration-ng/build.rs
···
+
use std::io::Write;
+
+
fn code_for_dbus_xml(xml: impl AsRef<std::path::Path>) -> String {
+
dbus_codegen::generate(
+
&std::fs::read_to_string(xml).unwrap(),
+
&dbus_codegen::GenOpts {
+
methodtype: None,
+
connectiontype: dbus_codegen::ConnectionType::Blocking,
+
..Default::default()
+
},
+
)
+
.unwrap()
+
}
+
+
fn main() {
+
let systemd_dbus_interface_dir = std::env::var("SYSTEMD_DBUS_INTERFACE_DIR").unwrap();
+
let systemd_dbus_interface_dir = std::path::Path::new(systemd_dbus_interface_dir.as_str());
+
+
let out_path = std::path::PathBuf::from(std::env::var("OUT_DIR").unwrap());
+
+
let systemd_manager_code =
+
code_for_dbus_xml(systemd_dbus_interface_dir.join("org.freedesktop.systemd1.Manager.xml"));
+
let mut file = std::fs::File::create(out_path.join("systemd_manager.rs")).unwrap();
+
file.write_all(systemd_manager_code.as_bytes()).unwrap();
+
+
let logind_manager_code =
+
code_for_dbus_xml(systemd_dbus_interface_dir.join("org.freedesktop.login1.Manager.xml"));
+
let mut file = std::fs::File::create(out_path.join("logind_manager.rs")).unwrap();
+
file.write_all(logind_manager_code.as_bytes()).unwrap();
+
}
+36
pkgs/by-name/sw/switch-to-configuration-ng/package.nix
···
+
{
+
buildPackages,
+
dbus,
+
lib,
+
pkg-config,
+
rustPlatform,
+
}:
+
+
rustPlatform.buildRustPackage {
+
pname = "switch-to-configuration";
+
version = "0.1.0";
+
+
src = lib.fileset.toSource {
+
root = ./.;
+
fileset = lib.fileset.unions [
+
./Cargo.lock
+
./Cargo.toml
+
./build.rs
+
./src
+
];
+
};
+
+
cargoLock.lockFile = ./Cargo.lock;
+
+
nativeBuildInputs = [ pkg-config ];
+
buildInputs = [ dbus ];
+
+
env.SYSTEMD_DBUS_INTERFACE_DIR = "${buildPackages.systemd}/share/dbus-1/interfaces";
+
+
meta = {
+
description = "NixOS switch-to-configuration program";
+
mainProgram = "switch-to-configuration";
+
maintainers = with lib.maintainers; [ jmbaur ];
+
license = lib.licenses.mit;
+
};
+
}
+2077
pkgs/by-name/sw/switch-to-configuration-ng/src/main.rs
···
+
use std::{
+
cell::RefCell,
+
collections::HashMap,
+
io::{BufRead, Write},
+
os::unix::{fs::PermissionsExt, process::CommandExt},
+
path::{Path, PathBuf},
+
rc::Rc,
+
str::FromStr,
+
sync::OnceLock,
+
time::Duration,
+
};
+
+
use anyhow::{anyhow, bail, Context, Result};
+
use dbus::{
+
blocking::{stdintf::org_freedesktop_dbus::Properties, LocalConnection, Proxy},
+
Message,
+
};
+
use glob::glob;
+
use ini::Ini;
+
use log::LevelFilter;
+
use nix::{
+
fcntl::{Flock, FlockArg, OFlag},
+
sys::{
+
signal::{self, SigHandler, Signal},
+
stat::Mode,
+
},
+
};
+
use regex::Regex;
+
use syslog::Facility;
+
+
mod systemd_manager {
+
#![allow(non_upper_case_globals)]
+
#![allow(non_camel_case_types)]
+
#![allow(non_snake_case)]
+
#![allow(unused)]
+
include!(concat!(env!("OUT_DIR"), "/systemd_manager.rs"));
+
}
+
+
mod logind_manager {
+
#![allow(non_upper_case_globals)]
+
#![allow(non_camel_case_types)]
+
#![allow(non_snake_case)]
+
#![allow(unused)]
+
include!(concat!(env!("OUT_DIR"), "/logind_manager.rs"));
+
}
+
+
use crate::systemd_manager::OrgFreedesktopSystemd1Manager;
+
use crate::{
+
logind_manager::OrgFreedesktopLogin1Manager,
+
systemd_manager::{
+
OrgFreedesktopSystemd1ManagerJobRemoved, OrgFreedesktopSystemd1ManagerReloading,
+
},
+
};
+
+
type UnitInfo = HashMap<String, HashMap<String, Vec<String>>>;
+
+
const SYSINIT_REACTIVATION_TARGET: &str = "sysinit-reactivation.target";
+
+
// To be robust against interruption, record what units need to be started etc. We read these files
+
// again every time this program starts to make sure we continue where the old (interrupted) script
+
// left off.
+
const START_LIST_FILE: &str = "/run/nixos/start-list";
+
const RESTART_LIST_FILE: &str = "/run/nixos/restart-list";
+
const RELOAD_LIST_FILE: &str = "/run/nixos/reload-list";
+
+
// Parse restart/reload requests by the activation script. Activation scripts may write
+
// newline-separated units to the restart file and switch-to-configuration will handle them. While
+
// `stopIfChanged = true` is ignored, switch-to-configuration will handle `restartIfChanged =
+
// false` and `reloadIfChanged = true`. This is the same as specifying a restart trigger in the
+
// NixOS module.
+
//
+
// The reload file asks this program to reload a unit. This is the same as specifying a reload
+
// trigger in the NixOS module and can be ignored if the unit is restarted in this activation.
+
const RESTART_BY_ACTIVATION_LIST_FILE: &str = "/run/nixos/activation-restart-list";
+
const RELOAD_BY_ACTIVATION_LIST_FILE: &str = "/run/nixos/activation-reload-list";
+
const DRY_RESTART_BY_ACTIVATION_LIST_FILE: &str = "/run/nixos/dry-activation-restart-list";
+
const DRY_RELOAD_BY_ACTIVATION_LIST_FILE: &str = "/run/nixos/dry-activation-reload-list";
+
+
#[derive(Debug, Clone, PartialEq)]
+
enum Action {
+
Switch,
+
Boot,
+
Test,
+
DryActivate,
+
}
+
+
impl std::str::FromStr for Action {
+
type Err = anyhow::Error;
+
+
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
+
Ok(match s {
+
"switch" => Self::Switch,
+
"boot" => Self::Boot,
+
"test" => Self::Test,
+
"dry-activate" => Self::DryActivate,
+
_ => bail!("invalid action {s}"),
+
})
+
}
+
}
+
+
impl Into<&'static str> for &Action {
+
fn into(self) -> &'static str {
+
match self {
+
Action::Switch => "switch",
+
Action::Boot => "boot",
+
Action::Test => "test",
+
Action::DryActivate => "dry-activate",
+
}
+
}
+
}
+
+
// Allow for this switch-to-configuration to remain consistent with the perl implementation.
+
// Perl's "die" uses errno to set the exit code: https://perldoc.perl.org/perlvar#%24%21
+
fn die() -> ! {
+
std::process::exit(std::io::Error::last_os_error().raw_os_error().unwrap_or(1));
+
}
+
+
fn parse_os_release() -> Result<HashMap<String, String>> {
+
Ok(std::fs::read_to_string("/etc/os-release")
+
.context("Failed to read /etc/os-release")?
+
.lines()
+
.into_iter()
+
.fold(HashMap::new(), |mut acc, line| {
+
if let Some((k, v)) = line.split_once('=') {
+
acc.insert(k.to_string(), v.to_string());
+
}
+
+
acc
+
}))
+
}
+
+
fn do_install_bootloader(command: &str, toplevel: &Path) -> Result<()> {
+
let mut cmd_split = command.split_whitespace();
+
let Some(argv0) = cmd_split.next() else {
+
bail!("missing first argument in install bootloader commands");
+
};
+
+
match std::process::Command::new(argv0)
+
.args(cmd_split.collect::<Vec<&str>>())
+
.arg(toplevel)
+
.spawn()
+
.map(|mut child| child.wait())
+
{
+
Ok(Ok(status)) if status.success() => {}
+
_ => {
+
eprintln!("Failed to install bootloader");
+
die();
+
}
+
}
+
+
Ok(())
+
}
+
+
extern "C" fn handle_sigpipe(_signal: nix::libc::c_int) {}
+
+
fn required_env(var: &str) -> anyhow::Result<String> {
+
std::env::var(var).with_context(|| format!("missing required environment variable ${var}"))
+
}
+
+
#[derive(Debug)]
+
struct UnitState {
+
state: String,
+
substate: String,
+
}
+
+
// Asks the currently running systemd instance via dbus which units are active. Returns a hash
+
// where the key is the name of each unit and the value a hash of load, state, substate.
+
fn get_active_units<'a>(
+
systemd_manager: &Proxy<'a, &LocalConnection>,
+
) -> Result<HashMap<String, UnitState>> {
+
let units = systemd_manager
+
.list_units_by_patterns(Vec::new(), Vec::new())
+
.context("Failed to list systemd units")?;
+
+
Ok(units
+
.into_iter()
+
.filter_map(
+
|(
+
id,
+
_description,
+
_load_state,
+
active_state,
+
sub_state,
+
following,
+
_unit_path,
+
_job_id,
+
_job_type,
+
_job_path,
+
)| {
+
if following == "" && active_state != "inactive" {
+
Some((id, active_state, sub_state))
+
} else {
+
None
+
}
+
},
+
)
+
.fold(HashMap::new(), |mut acc, (id, active_state, sub_state)| {
+
acc.insert(
+
id,
+
UnitState {
+
state: active_state,
+
substate: sub_state,
+
},
+
);
+
+
acc
+
}))
+
}
+
+
// This function takes a single ini file that specified systemd configuration like unit
+
// configuration and parses it into a HashMap where the keys are the sections of the unit file and
+
// the values are HashMaps themselves. These HashMaps have the unit file keys as their keys (left
+
// side of =) and an array of all values that were set as their values. If a value is empty (for
+
// example `ExecStart=`), then all current definitions are removed.
+
//
+
// Instead of returning the HashMap, this function takes a mutable reference to a HashMap to return
+
// the data in. This allows calling the function multiple times with the same Hashmap to parse
+
// override files.
+
fn parse_systemd_ini(data: &mut UnitInfo, unit_file: &Path) -> Result<()> {
+
let ini = Ini::load_from_file(unit_file)
+
.with_context(|| format!("Failed to load unit file {}", unit_file.display()))?;
+
+
// Copy over all sections
+
for (section, properties) in ini.iter() {
+
let Some(section) = section else {
+
continue;
+
};
+
+
if section == "Install" {
+
// Skip the [Install] section because it has no relevant keys for us
+
continue;
+
}
+
+
let section_map = if let Some(section_map) = data.get_mut(section) {
+
section_map
+
} else {
+
data.insert(section.to_string(), HashMap::new());
+
data.get_mut(section)
+
.ok_or(anyhow!("section name should exist in hashmap"))?
+
};
+
+
for (ini_key, _) in properties {
+
let values = properties.get_all(ini_key);
+
let values = values
+
.into_iter()
+
.map(String::from)
+
.collect::<Vec<String>>();
+
+
// Go over all values
+
let mut new_vals = Vec::new();
+
let mut clear_existing = false;
+
+
for val in values {
+
// If a value is empty, it's an override that tells us to clean the value
+
if val.is_empty() {
+
new_vals.clear();
+
clear_existing = true;
+
} else {
+
new_vals.push(val);
+
}
+
}
+
+
match (section_map.get_mut(ini_key), clear_existing) {
+
(Some(existing_vals), false) => existing_vals.extend(new_vals),
+
_ => _ = section_map.insert(ini_key.to_string(), new_vals),
+
};
+
}
+
}
+
+
Ok(())
+
}
+
+
// This function takes the path to a systemd configuration file (like a unit configuration) and
+
// parses it into a UnitInfo structure.
+
//
+
// If a directory with the same basename ending in .d exists next to the unit file, it will be
+
// assumed to contain override files which will be parsed as well and handled properly.
+
fn parse_unit(unit_file: &Path, base_unit_file: &Path) -> Result<UnitInfo> {
+
// Parse the main unit and all overrides
+
let mut unit_data = HashMap::new();
+
+
parse_systemd_ini(&mut unit_data, base_unit_file)?;
+
+
for entry in
+
glob(&format!("{}.d/*.conf", base_unit_file.display())).context("Invalid glob pattern")?
+
{
+
let Ok(entry) = entry else {
+
continue;
+
};
+
+
parse_systemd_ini(&mut unit_data, &entry)?;
+
}
+
+
// Handle drop-in template-unit instance overrides
+
if unit_file != base_unit_file {
+
for entry in
+
glob(&format!("{}.d/*.conf", unit_file.display())).context("Invalid glob pattern")?
+
{
+
let Ok(entry) = entry else {
+
continue;
+
};
+
+
parse_systemd_ini(&mut unit_data, &entry)?;
+
}
+
}
+
+
Ok(unit_data)
+
}
+
+
// Checks whether a specified boolean in a systemd unit is true or false, with a default that is
+
// applied when the value is not set.
+
fn parse_systemd_bool(
+
unit_data: Option<&UnitInfo>,
+
section_name: &str,
+
bool_name: &str,
+
default: bool,
+
) -> bool {
+
if let Some(Some(Some(Some(b)))) = unit_data.map(|data| {
+
data.get(section_name).map(|section| {
+
section.get(bool_name).map(|vals| {
+
vals.last()
+
.map(|last| matches!(last.as_str(), "1" | "yes" | "true" | "on"))
+
})
+
})
+
}) {
+
b
+
} else {
+
default
+
}
+
}
+
+
#[derive(Debug, PartialEq)]
+
enum UnitComparison {
+
Equal,
+
UnequalNeedsRestart,
+
UnequalNeedsReload,
+
}
+
+
// Compare the contents of two unit files and return whether the unit needs to be restarted or
+
// reloaded. If the units differ, the service is restarted unless the only difference is
+
// `X-Reload-Triggers` in the `Unit` section. If this is the only modification, the unit is
+
// reloaded instead of restarted. If the only difference is `Options` in the `[Mount]` section, the
+
// unit is reloaded rather than restarted.
+
fn compare_units(current_unit: &UnitInfo, new_unit: &UnitInfo) -> UnitComparison {
+
let mut ret = UnitComparison::Equal;
+
+
let unit_section_ignores = HashMap::from(
+
[
+
"X-Reload-Triggers",
+
"Description",
+
"Documentation",
+
"OnFailure",
+
"OnSuccess",
+
"OnFailureJobMode",
+
"IgnoreOnIsolate",
+
"StopWhenUnneeded",
+
"RefuseManualStart",
+
"RefuseManualStop",
+
"AllowIsolate",
+
"CollectMode",
+
"SourcePath",
+
]
+
.map(|name| (name, ())),
+
);
+
+
let mut section_cmp = new_unit.keys().fold(HashMap::new(), |mut acc, key| {
+
acc.insert(key.as_str(), ());
+
acc
+
});
+
+
// Iterate over the sections
+
for (section_name, section_val) in current_unit {
+
// Missing section in the new unit?
+
if !section_cmp.contains_key(section_name.as_str()) {
+
// If the [Unit] section was removed, make sure that only keys were in it that are
+
// ignored
+
if section_name == "Unit" {
+
for (ini_key, _ini_val) in section_val {
+
if !unit_section_ignores.contains_key(ini_key.as_str()) {
+
return UnitComparison::UnequalNeedsRestart;
+
}
+
}
+
continue; // check the next section
+
} else {
+
return UnitComparison::UnequalNeedsRestart;
+
}
+
}
+
+
section_cmp.remove(section_name.as_str());
+
+
// Comparison hash for the section contents
+
let mut ini_cmp = new_unit
+
.get(section_name)
+
.map(|section_val| {
+
section_val.keys().fold(HashMap::new(), |mut acc, ini_key| {
+
acc.insert(ini_key.as_str(), ());
+
acc
+
})
+
})
+
.unwrap_or_default();
+
+
// Iterate over the keys of the section
+
for (ini_key, current_value) in section_val {
+
ini_cmp.remove(ini_key.as_str());
+
let Some(Some(new_value)) = new_unit
+
.get(section_name)
+
.map(|section| section.get(ini_key))
+
else {
+
// If the key is missing in the new unit, they are different unless the key that is
+
// now missing is one of the ignored keys
+
if section_name == "Unit" && unit_section_ignores.contains_key(ini_key.as_str()) {
+
continue;
+
}
+
return UnitComparison::UnequalNeedsRestart;
+
};
+
+
// If the contents are different, the units are different
+
if current_value != new_value {
+
if section_name == "Unit" {
+
if ini_key == "X-Reload-Triggers" {
+
ret = UnitComparison::UnequalNeedsReload;
+
continue;
+
} else if unit_section_ignores.contains_key(ini_key.as_str()) {
+
continue;
+
}
+
}
+
+
// If this is a mount unit, check if it was only `Options`
+
if section_name == "Mount" && ini_key == "Options" {
+
ret = UnitComparison::UnequalNeedsReload;
+
continue;
+
}
+
+
return UnitComparison::UnequalNeedsRestart;
+
}
+
}
+
+
// A key was introduced that was missing in the previous unit
+
if !ini_cmp.is_empty() {
+
if section_name == "Unit" {
+
for (ini_key, _) in ini_cmp {
+
if ini_key == "X-Reload-Triggers" {
+
ret = UnitComparison::UnequalNeedsReload;
+
} else if unit_section_ignores.contains_key(ini_key) {
+
continue;
+
} else {
+
return UnitComparison::UnequalNeedsRestart;
+
}
+
}
+
} else {
+
return UnitComparison::UnequalNeedsRestart;
+
}
+
}
+
}
+
+
// A section was introduced that was missing in the previous unit
+
if !section_cmp.is_empty() {
+
if section_cmp.keys().len() == 1 && section_cmp.contains_key("Unit") {
+
if let Some(new_unit_unit) = new_unit.get("Unit") {
+
for (ini_key, _) in new_unit_unit {
+
if !unit_section_ignores.contains_key(ini_key.as_str()) {
+
return UnitComparison::UnequalNeedsRestart;
+
} else if ini_key == "X-Reload-Triggers" {
+
ret = UnitComparison::UnequalNeedsReload;
+
}
+
}
+
}
+
} else {
+
return UnitComparison::UnequalNeedsRestart;
+
}
+
}
+
+
ret
+
}
+
+
// Called when a unit exists in both the old systemd and the new system and the units differ. This
+
// figures out of what units are to be stopped, restarted, reloaded, started, and skipped.
+
fn handle_modified_unit(
+
toplevel: &Path,
+
unit: &str,
+
base_name: &str,
+
new_unit_file: &Path,
+
new_base_unit_file: &Path,
+
new_unit_info: Option<&UnitInfo>,
+
active_cur: &HashMap<String, UnitState>,
+
units_to_stop: &mut HashMap<String, ()>,
+
units_to_start: &mut HashMap<String, ()>,
+
units_to_reload: &mut HashMap<String, ()>,
+
units_to_restart: &mut HashMap<String, ()>,
+
units_to_skip: &mut HashMap<String, ()>,
+
) -> Result<()> {
+
let use_restart_as_stop_and_start = new_unit_info.is_none();
+
+
if matches!(
+
unit,
+
"sysinit.target" | "basic.target" | "multi-user.target" | "graphical.target"
+
) || unit.ends_with(".unit")
+
|| unit.ends_with(".slice")
+
{
+
// Do nothing. These cannot be restarted directly.
+
+
// Slices and Paths don't have to be restarted since properties (resource limits and
+
// inotify watches) seem to get applied on daemon-reload.
+
} else if unit.ends_with(".mount") {
+
// Just restart the unit. We wouldn't have gotten into this subroutine if only `Options`
+
// was changed, in which case the unit would be reloaded. The only exception is / and /nix
+
// because it's very unlikely we can safely unmount them so we reload them instead. This
+
// means that we may not get all changes into the running system but it's better than
+
// crashing it.
+
if unit == "-.mount" || unit == "nix.mount" {
+
units_to_reload.insert(unit.to_string(), ());
+
record_unit(RELOAD_LIST_FILE, unit);
+
} else {
+
units_to_restart.insert(unit.to_string(), ());
+
record_unit(RESTART_LIST_FILE, unit);
+
}
+
} else if unit.ends_with(".socket") {
+
// FIXME: do something?
+
// Attempt to fix this: https://github.com/NixOS/nixpkgs/pull/141192
+
// Revert of the attempt: https://github.com/NixOS/nixpkgs/pull/147609
+
// More details: https://github.com/NixOS/nixpkgs/issues/74899#issuecomment-981142430
+
} else {
+
let fallback = parse_unit(new_unit_file, new_base_unit_file)?;
+
let new_unit_info = if new_unit_info.is_some() {
+
new_unit_info
+
} else {
+
Some(&fallback)
+
};
+
+
if parse_systemd_bool(new_unit_info, "Service", "X-ReloadIfChanged", false)
+
&& !units_to_restart.contains_key(unit)
+
&& !(if use_restart_as_stop_and_start {
+
units_to_restart.contains_key(unit)
+
} else {
+
units_to_stop.contains_key(unit)
+
})
+
{
+
units_to_reload.insert(unit.to_string(), ());
+
record_unit(RELOAD_LIST_FILE, unit);
+
} else if !parse_systemd_bool(new_unit_info, "Service", "X-RestartIfChanged", true)
+
|| parse_systemd_bool(new_unit_info, "Unit", "RefuseManualStop", false)
+
|| parse_systemd_bool(new_unit_info, "Unit", "X-OnlyManualStart", false)
+
{
+
units_to_skip.insert(unit.to_string(), ());
+
} else {
+
// It doesn't make sense to stop and start non-services because they can't have
+
// ExecStop=
+
if !parse_systemd_bool(new_unit_info, "Service", "X-StopIfChanged", true)
+
|| !unit.ends_with(".service")
+
{
+
// This unit should be restarted instead of stopped and started.
+
units_to_restart.insert(unit.to_string(), ());
+
record_unit(RESTART_LIST_FILE, unit);
+
// Remove from units to reload so we don't restart and reload
+
if units_to_reload.contains_key(unit) {
+
units_to_reload.remove(unit);
+
unrecord_unit(RELOAD_LIST_FILE, unit);
+
}
+
} else {
+
// If this unit is socket-activated, then stop the socket unit(s) as well, and
+
// restart the socket(s) instead of the service.
+
let mut socket_activated = false;
+
if unit.ends_with(".service") {
+
let mut sockets = if let Some(Some(Some(sockets))) = new_unit_info.map(|info| {
+
info.get("Service")
+
.map(|service_section| service_section.get("Sockets"))
+
}) {
+
sockets
+
.join(" ")
+
.split_whitespace()
+
.into_iter()
+
.map(String::from)
+
.collect()
+
} else {
+
Vec::new()
+
};
+
+
if sockets.is_empty() {
+
sockets.push(format!("{}.socket", base_name));
+
}
+
+
for socket in &sockets {
+
if active_cur.contains_key(socket) {
+
// We can now be sure this is a socket-activated unit
+
+
if use_restart_as_stop_and_start {
+
units_to_restart.insert(socket.to_string(), ());
+
} else {
+
units_to_stop.insert(socket.to_string(), ());
+
}
+
+
// Only restart sockets that actually exist in new configuration:
+
if toplevel.join("etc/systemd/system").join(socket).exists() {
+
if use_restart_as_stop_and_start {
+
units_to_restart.insert(socket.to_string(), ());
+
record_unit(RESTART_LIST_FILE, socket);
+
} else {
+
units_to_start.insert(socket.to_string(), ());
+
record_unit(START_LIST_FILE, socket);
+
}
+
+
socket_activated = true;
+
}
+
+
// Remove from units to reload so we don't restart and reload
+
if units_to_reload.contains_key(unit) {
+
units_to_reload.remove(unit);
+
unrecord_unit(RELOAD_LIST_FILE, unit);
+
}
+
}
+
}
+
}
+
+
// If the unit is not socket-activated, record that this unit needs to be started
+
// below. We write this to a file to ensure that the service gets restarted if
+
// we're interrupted.
+
if !socket_activated {
+
if use_restart_as_stop_and_start {
+
units_to_restart.insert(unit.to_string(), ());
+
record_unit(RESTART_LIST_FILE, unit);
+
} else {
+
units_to_start.insert(unit.to_string(), ());
+
record_unit(START_LIST_FILE, unit);
+
}
+
}
+
+
if use_restart_as_stop_and_start {
+
units_to_restart.insert(unit.to_string(), ());
+
} else {
+
units_to_stop.insert(unit.to_string(), ());
+
}
+
// Remove from units to reload so we don't restart and reload
+
if units_to_reload.contains_key(unit) {
+
units_to_reload.remove(unit);
+
unrecord_unit(RELOAD_LIST_FILE, unit);
+
}
+
}
+
}
+
}
+
+
Ok(())
+
}
+
+
// Writes a unit name into a given file to be more resilient against crashes of the script. Does
+
// nothing when the action is dry-activate.
+
fn record_unit(p: impl AsRef<Path>, unit: &str) {
+
if ACTION.get() != Some(&Action::DryActivate) {
+
if let Ok(mut f) = std::fs::File::options().append(true).create(true).open(p) {
+
_ = writeln!(&mut f, "{unit}");
+
}
+
}
+
}
+
+
// The opposite of record_unit, removes a unit name from a file
+
fn unrecord_unit(p: impl AsRef<Path>, unit: &str) {
+
if ACTION.get() != Some(&Action::DryActivate) {
+
if let Ok(contents) = std::fs::read_to_string(&p) {
+
if let Ok(mut f) = std::fs::File::options()
+
.write(true)
+
.truncate(true)
+
.create(true)
+
.open(&p)
+
{
+
contents
+
.lines()
+
.into_iter()
+
.filter(|line| line != &unit)
+
.for_each(|line| _ = writeln!(&mut f, "{line}"))
+
}
+
}
+
}
+
}
+
+
fn map_from_list_file(p: impl AsRef<Path>) -> HashMap<String, ()> {
+
std::fs::read_to_string(p)
+
.unwrap_or_default()
+
.lines()
+
.filter(|line| !line.is_empty())
+
.into_iter()
+
.fold(HashMap::new(), |mut acc, line| {
+
acc.insert(line.to_string(), ());
+
acc
+
})
+
}
+
+
#[derive(Debug)]
+
struct Filesystem {
+
device: String,
+
fs_type: String,
+
options: String,
+
}
+
+
#[derive(Debug)]
+
#[allow(unused)]
+
struct Swap(String);
+
+
// Parse a fstab file, given its path. Returns a tuple of filesystems and swaps.
+
//
+
// Filesystems is a hash of mountpoint and { device, fsType, options } Swaps is a hash of device
+
// and { options }
+
fn parse_fstab(fstab: impl BufRead) -> (HashMap<String, Filesystem>, HashMap<String, Swap>) {
+
let mut filesystems = HashMap::new();
+
let mut swaps = HashMap::new();
+
+
for line in fstab.lines() {
+
let Ok(line) = line else {
+
break;
+
};
+
+
if line.contains('#') {
+
continue;
+
}
+
+
let mut split = line.split_whitespace();
+
let (Some(device), Some(mountpoint), Some(fs_type), options) = (
+
split.next(),
+
split.next(),
+
split.next(),
+
split.next().unwrap_or_default(),
+
) else {
+
continue;
+
};
+
+
if fs_type == "swap" {
+
swaps.insert(device.to_string(), Swap(options.to_string()));
+
} else {
+
filesystems.insert(
+
mountpoint.to_string(),
+
Filesystem {
+
device: device.to_string(),
+
fs_type: fs_type.to_string(),
+
options: options.to_string(),
+
},
+
);
+
}
+
}
+
+
(filesystems, swaps)
+
}
+
+
// Converts a path to the name of a systemd mount unit that would be responsible for mounting this
+
// path.
+
fn path_to_unit_name(bin_path: &Path, path: &str) -> String {
+
let Ok(output) = std::process::Command::new(bin_path.join("systemd-escape"))
+
.arg("--suffix=mount")
+
.arg("-p")
+
.arg(path)
+
.output()
+
else {
+
eprintln!("Unable to escape {}!", path);
+
die();
+
};
+
+
let Ok(unit) = String::from_utf8(output.stdout) else {
+
eprintln!("Unable to convert systemd-espape output to valid UTF-8");
+
die();
+
};
+
+
unit.trim().to_string()
+
}
+
+
// Returns a HashMap containing the same contents as the passed in `units`, minus the units in
+
// `units_to_filter`.
+
fn filter_units(
+
units_to_filter: &HashMap<String, ()>,
+
units: &HashMap<String, ()>,
+
) -> HashMap<String, ()> {
+
let mut res = HashMap::new();
+
+
for (unit, _) in units {
+
if !units_to_filter.contains_key(unit) {
+
res.insert(unit.to_string(), ());
+
}
+
}
+
+
res
+
}
+
+
fn unit_is_active<'a>(conn: &LocalConnection, unit: &str) -> Result<bool> {
+
let unit_object_path = conn
+
.with_proxy(
+
"org.freedesktop.systemd1",
+
"/org/freedesktop/systemd1",
+
Duration::from_millis(5000),
+
)
+
.get_unit(unit)
+
.with_context(|| format!("Failed to get unit {unit}"))?;
+
+
let active_state: String = conn
+
.with_proxy(
+
"org.freedesktop.systemd1",
+
unit_object_path,
+
Duration::from_millis(5000),
+
)
+
.get("org.freedesktop.systemd1.Unit", "ActiveState")
+
.with_context(|| format!("Failed to get ExecMainStatus for {unit}"))?;
+
+
Ok(matches!(active_state.as_str(), "active" | "activating"))
+
}
+
+
static ACTION: OnceLock<Action> = OnceLock::new();
+
+
#[derive(Debug)]
+
enum Job {
+
Start,
+
Restart,
+
Reload,
+
Stop,
+
}
+
+
impl std::fmt::Display for Job {
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+
write!(
+
f,
+
"{}",
+
match self {
+
Job::Start => "start",
+
Job::Restart => "restart",
+
Job::Reload => "reload",
+
Job::Stop => "stop",
+
}
+
)
+
}
+
}
+
+
fn new_dbus_proxies<'a>(
+
conn: &'a LocalConnection,
+
) -> (
+
Proxy<'a, &'a LocalConnection>,
+
Proxy<'a, &'a LocalConnection>,
+
) {
+
(
+
conn.with_proxy(
+
"org.freedesktop.systemd1",
+
"/org/freedesktop/systemd1",
+
Duration::from_millis(5000),
+
),
+
conn.with_proxy(
+
"org.freedesktop.login1",
+
"/org/freedesktop/login1",
+
Duration::from_millis(5000),
+
),
+
)
+
}
+
+
fn block_on_jobs(
+
conn: &LocalConnection,
+
submitted_jobs: &Rc<RefCell<HashMap<dbus::Path<'static>, Job>>>,
+
) {
+
while !submitted_jobs.borrow().is_empty() {
+
_ = conn.process(Duration::from_millis(500));
+
}
+
}
+
+
fn remove_file_if_exists(p: impl AsRef<Path>) -> std::io::Result<()> {
+
match std::fs::remove_file(p) {
+
Err(err) if err.kind() != std::io::ErrorKind::NotFound => Err(err),
+
_ => Ok(()),
+
}
+
}
+
+
/// Performs switch-to-configuration functionality for a single non-root user
+
fn do_user_switch(parent_exe: String) -> anyhow::Result<()> {
+
if Path::new(&parent_exe)
+
!= Path::new("/proc/self/exe")
+
.canonicalize()
+
.context("Failed to get full path to current executable")?
+
.as_path()
+
{
+
eprintln!(
+
r#"This program is not meant to be called from outside of switch-to-configuration."#
+
);
+
die();
+
}
+
+
let dbus_conn = LocalConnection::new_session().context("Failed to open dbus connection")?;
+
let (systemd, _) = new_dbus_proxies(&dbus_conn);
+
+
let nixos_activation_done = Rc::new(RefCell::new(false));
+
let _nixos_activation_done = nixos_activation_done.clone();
+
let jobs_token = systemd
+
.match_signal(
+
move |signal: OrgFreedesktopSystemd1ManagerJobRemoved,
+
_: &LocalConnection,
+
_: &Message| {
+
if signal.unit.as_str() == "nixos-activation.service" {
+
*_nixos_activation_done.borrow_mut() = true;
+
}
+
+
true
+
},
+
)
+
.context("Failed to add signal match for systemd removed jobs")?;
+
+
// The systemd user session seems to not send a Reloaded signal, so we don't have anything to
+
// wait on here.
+
_ = systemd.reexecute();
+
+
systemd
+
.restart_unit("nixos-activation.service", "replace")
+
.context("Failed to restart nixos-activation.service")?;
+
+
while !*nixos_activation_done.borrow() {
+
_ = dbus_conn
+
.process(Duration::from_secs(500))
+
.context("Failed to process dbus messages")?;
+
}
+
+
dbus_conn
+
.remove_match(jobs_token)
+
.context("Failed to remove jobs token")?;
+
+
Ok(())
+
}
+
+
/// Performs switch-to-configuration functionality for the entire system
+
fn do_system_switch() -> anyhow::Result<()> {
+
let out = PathBuf::from(required_env("OUT")?);
+
let toplevel = PathBuf::from(required_env("TOPLEVEL")?);
+
let distro_id = required_env("DISTRO_ID")?;
+
let install_bootloader = required_env("INSTALL_BOOTLOADER")?;
+
let locale_archive = required_env("LOCALE_ARCHIVE")?;
+
let new_systemd = PathBuf::from(required_env("SYSTEMD")?);
+
+
let mut args = std::env::args();
+
let argv0 = args.next().ok_or(anyhow!("no argv[0]"))?;
+
+
let Some(Ok(action)) = args.next().map(|a| Action::from_str(&a)) else {
+
eprintln!(
+
r#"Usage: {} [switch|boot|test|dry-activate]
+
switch: make the configuration the boot default and activate now
+
boot: make the configuration the boot default
+
test: activate the configuration, but don't make it the boot default
+
dry-activate: show what would be done if this configuration were activated
+
"#,
+
argv0
+
.split(std::path::MAIN_SEPARATOR_STR)
+
.last()
+
.unwrap_or("switch-to-configuration")
+
);
+
std::process::exit(1);
+
};
+
+
let action = ACTION.get_or_init(|| action);
+
+
// The action that is to be performed (like switch, boot, test, dry-activate) Also exposed via
+
// environment variable from now on
+
std::env::set_var("NIXOS_ACTION", Into::<&'static str>::into(action));
+
+
// Expose the locale archive as an environment variable for systemctl and the activation script
+
if !locale_archive.is_empty() {
+
std::env::set_var("LOCALE_ARCHIVE", locale_archive);
+
}
+
+
let current_system_bin = std::path::PathBuf::from("/run/current-system/sw/bin")
+
.canonicalize()
+
.context("/run/current-system/sw/bin is missing")?;
+
+
let os_release = parse_os_release().context("Failed to parse os-release")?;
+
+
let distro_id_re = Regex::new(format!("^\"?{}\"?$", distro_id).as_str())
+
.context("Invalid regex for distro ID")?;
+
+
// This is a NixOS installation if it has /etc/NIXOS or a proper /etc/os-release.
+
if !Path::new("/etc/NIXOS").is_file()
+
&& !os_release
+
.get("ID")
+
.map(|id| distro_id_re.is_match(id))
+
.unwrap_or_default()
+
{
+
eprintln!("This is not a NixOS installation!");
+
die();
+
}
+
+
std::fs::create_dir_all("/run/nixos").context("Failed to create /run/nixos directory")?;
+
let perms = std::fs::Permissions::from_mode(0o755);
+
std::fs::set_permissions("/run/nixos", perms)
+
.context("Failed to set permissions on /run/nixos directory")?;
+
+
let Ok(lock) = std::fs::OpenOptions::new()
+
.append(true)
+
.create(true)
+
.open("/run/nixos/switch-to-configuration.lock")
+
else {
+
eprintln!("Could not open lock");
+
die();
+
};
+
+
let Ok(_lock) = Flock::lock(lock, FlockArg::LockExclusive) else {
+
eprintln!("Could not acquire lock");
+
die();
+
};
+
+
if syslog::init(Facility::LOG_USER, LevelFilter::Debug, Some("nixos")).is_err() {
+
bail!("Failed to initialize logger");
+
}
+
+
// Install or update the bootloader.
+
if matches!(action, Action::Switch | Action::Boot) {
+
do_install_bootloader(&install_bootloader, &toplevel)?;
+
}
+
+
// Just in case the new configuration hangs the system, do a sync now.
+
if std::env::var("NIXOS_NO_SYNC")
+
.as_deref()
+
.unwrap_or_default()
+
!= "1"
+
{
+
let fd = nix::fcntl::open("/nix/store", OFlag::O_NOCTTY, Mode::S_IROTH)
+
.context("Failed to open /nix/store")?;
+
nix::unistd::syncfs(fd).context("Failed to sync /nix/store")?;
+
}
+
+
if *action == Action::Boot {
+
std::process::exit(0);
+
}
+
+
let current_init_interface_version =
+
std::fs::read_to_string("/run/current-system/init-interface-version").unwrap_or_default();
+
+
let new_init_interface_version =
+
std::fs::read_to_string(toplevel.join("init-interface-version"))
+
.context("File init-interface-version should exist")?;
+
+
// Check if we can activate the new configuration.
+
if current_init_interface_version != new_init_interface_version {
+
eprintln!(
+
r#"Warning: the new NixOS configuration has an ‘init’ that is
+
incompatible with the current configuration. The new configuration
+
won't take effect until you reboot the system.
+
"#
+
);
+
std::process::exit(100);
+
}
+
+
// Ignore SIGHUP so that we're not killed if we're running on (say) virtual console 1 and we
+
// restart the "tty1" unit.
+
let handler = SigHandler::Handler(handle_sigpipe);
+
unsafe { signal::signal(Signal::SIGPIPE, handler) }.context("Failed to set SIGPIPE handler")?;
+
+
let mut units_to_stop = HashMap::new();
+
let mut units_to_skip = HashMap::new();
+
let mut units_to_filter = HashMap::new(); // units not shown
+
+
let mut units_to_start = map_from_list_file(START_LIST_FILE);
+
let mut units_to_restart = map_from_list_file(RESTART_LIST_FILE);
+
let mut units_to_reload = map_from_list_file(RELOAD_LIST_FILE);
+
+
let dbus_conn = LocalConnection::new_system().context("Failed to open dbus connection")?;
+
let (systemd, logind) = new_dbus_proxies(&dbus_conn);
+
+
let submitted_jobs = Rc::new(RefCell::new(HashMap::new()));
+
let finished_jobs = Rc::new(RefCell::new(HashMap::new()));
+
+
let systemd_reload_status = Rc::new(RefCell::new(false));
+
+
systemd
+
.subscribe()
+
.context("Failed to subscribe to systemd dbus messages")?;
+
+
// Wait for the system to have finished booting.
+
loop {
+
let system_state: String = systemd
+
.get("org.freedesktop.systemd1.Manager", "SystemState")
+
.context("Failed to get system state")?;
+
+
match system_state.as_str() {
+
"running" | "degraded" | "maintenance" => break,
+
_ => {
+
_ = dbus_conn
+
.process(Duration::from_millis(500))
+
.context("Failed to process dbus messages")?
+
}
+
}
+
}
+
+
let _systemd_reload_status = systemd_reload_status.clone();
+
let reloading_token = systemd
+
.match_signal(
+
move |signal: OrgFreedesktopSystemd1ManagerReloading,
+
_: &LocalConnection,
+
_msg: &Message| {
+
*_systemd_reload_status.borrow_mut() = signal.active;
+
+
true
+
},
+
)
+
.context("Failed to add systemd Reloading match")?;
+
+
let _submitted_jobs = submitted_jobs.clone();
+
let _finished_jobs = finished_jobs.clone();
+
let job_removed_token = systemd
+
.match_signal(
+
move |signal: OrgFreedesktopSystemd1ManagerJobRemoved,
+
_: &LocalConnection,
+
_msg: &Message| {
+
if let Some(old) = _submitted_jobs.borrow_mut().remove(&signal.job) {
+
let mut finished_jobs = _finished_jobs.borrow_mut();
+
finished_jobs.insert(signal.job, (signal.unit, old, signal.result));
+
}
+
+
true
+
},
+
)
+
.context("Failed to add systemd JobRemoved match")?;
+
+
let current_active_units = get_active_units(&systemd)?;
+
+
let template_unit_re = Regex::new(r"^(.*)@[^\.]*\.(.*)$")
+
.context("Invalid regex for matching systemd template units")?;
+
let unit_name_re = Regex::new(r"^(.*)\.[[:lower:]]*$")
+
.context("Invalid regex for matching systemd unit names")?;
+
+
for (unit, unit_state) in &current_active_units {
+
let current_unit_file = Path::new("/etc/systemd/system").join(&unit);
+
let new_unit_file = toplevel.join("etc/systemd/system").join(&unit);
+
+
let mut base_unit = unit.clone();
+
let mut current_base_unit_file = current_unit_file.clone();
+
let mut new_base_unit_file = new_unit_file.clone();
+
+
// Detect template instances
+
if let Some((Some(template_name), Some(template_instance))) =
+
template_unit_re.captures(&unit).map(|captures| {
+
(
+
captures.get(1).map(|c| c.as_str()),
+
captures.get(2).map(|c| c.as_str()),
+
)
+
})
+
{
+
if !current_unit_file.exists() && !new_unit_file.exists() {
+
base_unit = format!("{}@.{}", template_name, template_instance);
+
current_base_unit_file = Path::new("/etc/systemd/system").join(&base_unit);
+
new_base_unit_file = toplevel.join("etc/systemd/system").join(&base_unit);
+
}
+
}
+
+
let mut base_name = base_unit.as_str();
+
if let Some(Some(new_base_name)) = unit_name_re
+
.captures(&base_unit)
+
.map(|capture| capture.get(1).map(|first| first.as_str()))
+
{
+
base_name = new_base_name;
+
}
+
+
if current_base_unit_file.exists()
+
&& (unit_state.state == "active" || unit_state.state == "activating")
+
{
+
if new_base_unit_file
+
.canonicalize()
+
.map(|full_path| full_path == Path::new("/dev/null"))
+
.unwrap_or(true)
+
{
+
let current_unit_info = parse_unit(&current_unit_file, &current_base_unit_file)?;
+
if parse_systemd_bool(Some(&current_unit_info), "Unit", "X-StopOnRemoval", true) {
+
_ = units_to_stop.insert(unit.to_string(), ());
+
}
+
} else if unit.ends_with(".target") {
+
let new_unit_info = parse_unit(&new_unit_file, &new_base_unit_file)?;
+
+
// Cause all active target units to be restarted below. This should start most
+
// changed units we stop here as well as any new dependencies (including new mounts
+
// and swap devices). FIXME: the suspend target is sometimes active after the
+
// system has resumed, which probably should not be the case. Just ignore it.
+
if !matches!(
+
unit.as_str(),
+
"suspend.target" | "hibernate.target" | "hybrid-sleep.target"
+
) {
+
if !(parse_systemd_bool(
+
Some(&new_unit_info),
+
"Unit",
+
"RefuseManualStart",
+
false,
+
) || parse_systemd_bool(
+
Some(&new_unit_info),
+
"Unit",
+
"X-OnlyManualStart",
+
false,
+
)) {
+
units_to_start.insert(unit.to_string(), ());
+
record_unit(START_LIST_FILE, unit);
+
// Don't spam the user with target units that always get started.
+
if std::env::var("STC_DISPLAY_ALL_UNITS").as_deref() != Ok("1") {
+
units_to_filter.insert(unit.to_string(), ());
+
}
+
}
+
}
+
+
// Stop targets that have X-StopOnReconfiguration set. This is necessary to respect
+
// dependency orderings involving targets: if unit X starts after target Y and
+
// target Y starts after unit Z, then if X and Z have both changed, then X should
+
// be restarted after Z. However, if target Y is in the "active" state, X and Z
+
// will be restarted at the same time because X's dependency on Y is already
+
// satisfied. Thus, we need to stop Y first. Stopping a target generally has no
+
// effect on other units (unless there is a PartOf dependency), so this is just a
+
// bookkeeping thing to get systemd to do the right thing.
+
if parse_systemd_bool(
+
Some(&new_unit_info),
+
"Unit",
+
"X-StopOnReconfiguration",
+
false,
+
) {
+
units_to_stop.insert(unit.to_string(), ());
+
}
+
} else {
+
let current_unit_info = parse_unit(&current_unit_file, &current_base_unit_file)?;
+
let new_unit_info = parse_unit(&new_unit_file, &new_base_unit_file)?;
+
match compare_units(&current_unit_info, &new_unit_info) {
+
UnitComparison::UnequalNeedsRestart => {
+
handle_modified_unit(
+
&toplevel,
+
&unit,
+
base_name,
+
&new_unit_file,
+
&new_base_unit_file,
+
Some(&new_unit_info),
+
&current_active_units,
+
&mut units_to_stop,
+
&mut units_to_start,
+
&mut units_to_reload,
+
&mut units_to_restart,
+
&mut units_to_skip,
+
)?;
+
}
+
UnitComparison::UnequalNeedsReload if !units_to_restart.contains_key(unit) => {
+
units_to_reload.insert(unit.clone(), ());
+
record_unit(RELOAD_LIST_FILE, &unit);
+
}
+
_ => {}
+
}
+
}
+
}
+
}
+
+
// Compare the previous and new fstab to figure out which filesystems need a remount or need to
+
// be unmounted. New filesystems are mounted automatically by starting local-fs.target.
+
// FIXME: might be nicer if we generated units for all mounts; then we could unify this with
+
// the unit checking code above.
+
let (current_filesystems, current_swaps) = std::fs::read_to_string("/etc/fstab")
+
.map(|fstab| parse_fstab(std::io::Cursor::new(fstab)))
+
.unwrap_or_default();
+
let (new_filesystems, new_swaps) = std::fs::read_to_string(toplevel.join("etc/fstab"))
+
.map(|fstab| parse_fstab(std::io::Cursor::new(fstab)))
+
.unwrap_or_default();
+
+
for (mountpoint, current_filesystem) in current_filesystems {
+
// Use current version of systemctl binary before daemon is reexeced.
+
let unit = path_to_unit_name(&current_system_bin, &mountpoint);
+
if let Some(new_filesystem) = new_filesystems.get(&mountpoint) {
+
if current_filesystem.fs_type != new_filesystem.fs_type
+
|| current_filesystem.device != new_filesystem.device
+
{
+
if matches!(mountpoint.as_str(), "/" | "/nix") {
+
if current_filesystem.options != new_filesystem.options {
+
// Mount options changes, so remount it.
+
units_to_reload.insert(unit.to_string(), ());
+
record_unit(RELOAD_LIST_FILE, &unit)
+
} else {
+
// Don't unmount / or /nix if the device changed
+
units_to_skip.insert(unit, ());
+
}
+
} else {
+
// Filesystem type or device changed, so unmount and mount it.
+
units_to_restart.insert(unit.to_string(), ());
+
record_unit(RESTART_LIST_FILE, &unit);
+
}
+
} else if current_filesystem.options != new_filesystem.options {
+
// Mount options changes, so remount it.
+
units_to_reload.insert(unit.to_string(), ());
+
record_unit(RELOAD_LIST_FILE, &unit)
+
}
+
} else {
+
// Filesystem entry disappeared, so unmount it.
+
units_to_stop.insert(unit, ());
+
}
+
}
+
+
// Also handles swap devices.
+
for (device, _) in current_swaps {
+
if new_swaps.get(&device).is_none() {
+
// Swap entry disappeared, so turn it off. Can't use "systemctl stop" here because
+
// systemd has lots of alias units that prevent a stop from actually calling "swapoff".
+
if *action == Action::DryActivate {
+
eprintln!("would stop swap device: {}", &device);
+
} else {
+
eprintln!("stopping swap device: {}", &device);
+
let c_device = std::ffi::CString::new(device.clone())
+
.context("failed to convert device to cstring")?;
+
if unsafe { nix::libc::swapoff(c_device.as_ptr()) } != 0 {
+
let err = std::io::Error::last_os_error();
+
eprintln!("Failed to stop swapping to {device}: {err}");
+
}
+
}
+
}
+
// FIXME: update swap options (i.e. its priority).
+
}
+
+
// Should we have systemd re-exec itself?
+
let current_pid1_path = Path::new("/proc/1/exe")
+
.canonicalize()
+
.unwrap_or_else(|_| PathBuf::from("/unknown"));
+
let current_systemd_system_config = Path::new("/etc/systemd/system.conf")
+
.canonicalize()
+
.unwrap_or_else(|_| PathBuf::from("/unknown"));
+
let Ok(new_pid1_path) = new_systemd.join("lib/systemd/systemd").canonicalize() else {
+
die();
+
};
+
let new_systemd_system_config = toplevel
+
.join("etc/systemd/system.conf")
+
.canonicalize()
+
.unwrap_or_else(|_| PathBuf::from("/unknown"));
+
+
let restart_systemd = current_pid1_path != new_pid1_path
+
|| current_systemd_system_config != new_systemd_system_config;
+
+
let units_to_stop_filtered = filter_units(&units_to_filter, &units_to_stop);
+
+
// Show dry-run actions.
+
if *action == Action::DryActivate {
+
if !units_to_stop_filtered.is_empty() {
+
let mut units = units_to_stop_filtered
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("would stop the following units: {}", units.join(", "));
+
}
+
+
if !units_to_skip.is_empty() {
+
let mut units = units_to_skip
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!(
+
"would NOT stop the following changed units: {}",
+
units.join(", ")
+
);
+
}
+
+
eprintln!("would activate the configuration...");
+
_ = std::process::Command::new(out.join("dry-activate"))
+
.arg(&out)
+
.spawn()
+
.map(|mut child| child.wait());
+
+
// Handle the activation script requesting the restart or reload of a unit.
+
for unit in std::fs::read_to_string(DRY_RESTART_BY_ACTIVATION_LIST_FILE)
+
.unwrap_or_default()
+
.lines()
+
{
+
let current_unit_file = Path::new("/etc/systemd/system").join(unit);
+
let new_unit_file = toplevel.join("etc/systemd/system").join(unit);
+
let mut base_unit = unit.to_string();
+
let mut new_base_unit_file = new_unit_file.clone();
+
+
// Detect template instances.
+
if let Some((Some(template_name), Some(template_instance))) =
+
template_unit_re.captures(&unit).map(|captures| {
+
(
+
captures.get(1).map(|c| c.as_str()),
+
captures.get(2).map(|c| c.as_str()),
+
)
+
})
+
{
+
if !current_unit_file.exists() && !new_unit_file.exists() {
+
base_unit = format!("{}@.{}", template_name, template_instance);
+
new_base_unit_file = toplevel.join("etc/systemd/system").join(&base_unit);
+
}
+
}
+
+
let mut base_name = base_unit.as_str();
+
if let Some(Some(new_base_name)) = unit_name_re
+
.captures(&base_unit)
+
.map(|capture| capture.get(1).map(|first| first.as_str()))
+
{
+
base_name = new_base_name;
+
}
+
+
// Start units if they were not active previously
+
if !current_active_units.contains_key(unit) {
+
units_to_start.insert(unit.to_string(), ());
+
continue;
+
}
+
+
handle_modified_unit(
+
&toplevel,
+
unit,
+
base_name,
+
&new_unit_file,
+
&new_base_unit_file,
+
None,
+
&current_active_units,
+
&mut units_to_stop,
+
&mut units_to_start,
+
&mut units_to_reload,
+
&mut units_to_restart,
+
&mut units_to_skip,
+
)?;
+
}
+
+
remove_file_if_exists(DRY_RESTART_BY_ACTIVATION_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", DRY_RESTART_BY_ACTIVATION_LIST_FILE))?;
+
+
for unit in std::fs::read_to_string(DRY_RELOAD_BY_ACTIVATION_LIST_FILE)
+
.unwrap_or_default()
+
.lines()
+
{
+
if current_active_units.contains_key(unit)
+
&& !units_to_restart.contains_key(unit)
+
&& !units_to_stop.contains_key(unit)
+
{
+
units_to_reload.insert(unit.to_string(), ());
+
record_unit(RELOAD_LIST_FILE, unit);
+
}
+
}
+
+
remove_file_if_exists(DRY_RELOAD_BY_ACTIVATION_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", DRY_RELOAD_BY_ACTIVATION_LIST_FILE))?;
+
+
if restart_systemd {
+
eprintln!("would restart systemd");
+
}
+
+
if !units_to_reload.is_empty() {
+
let mut units = units_to_reload
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("would reload the following units: {}", units.join(", "));
+
}
+
+
if !units_to_restart.is_empty() {
+
let mut units = units_to_restart
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("would restart the following units: {}", units.join(", "));
+
}
+
+
let units_to_start_filtered = filter_units(&units_to_filter, &units_to_start);
+
if !units_to_start_filtered.is_empty() {
+
let mut units = units_to_start_filtered
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("would start the following units: {}", units.join(", "));
+
}
+
+
std::process::exit(0);
+
}
+
+
log::info!("switching to system configuration {}", toplevel.display());
+
+
if !units_to_stop.is_empty() {
+
if !units_to_stop_filtered.is_empty() {
+
let mut units = units_to_stop_filtered
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("stopping the following units: {}", units.join(", "));
+
}
+
+
for unit in units_to_stop.keys() {
+
match systemd.stop_unit(unit, "replace") {
+
Ok(job_path) => {
+
let mut j = submitted_jobs.borrow_mut();
+
j.insert(job_path.to_owned(), Job::Stop);
+
}
+
Err(_) => {}
+
};
+
}
+
+
block_on_jobs(&dbus_conn, &submitted_jobs);
+
}
+
+
if !units_to_skip.is_empty() {
+
let mut units = units_to_skip
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!(
+
"NOT restarting the following changed units: {}",
+
units.join(", "),
+
);
+
}
+
+
// Wait for all stop jobs to finish
+
block_on_jobs(&dbus_conn, &submitted_jobs);
+
+
let mut exit_code = 0;
+
+
// Activate the new configuration (i.e., update /etc, make accounts, and so on).
+
eprintln!("activating the configuration...");
+
match std::process::Command::new(out.join("activate"))
+
.arg(&out)
+
.spawn()
+
.map(|mut child| child.wait())
+
{
+
Ok(Ok(status)) if status.success() => {}
+
Err(_) => {
+
// allow toplevel to not have an activation script
+
}
+
_ => {
+
eprintln!("Failed to run activate script");
+
exit_code = 2;
+
}
+
}
+
+
// Handle the activation script requesting the restart or reload of a unit.
+
for unit in std::fs::read_to_string(RESTART_BY_ACTIVATION_LIST_FILE)
+
.unwrap_or_default()
+
.lines()
+
{
+
let new_unit_file = toplevel.join("etc/systemd/system").join(unit);
+
let mut base_unit = unit.to_string();
+
let mut new_base_unit_file = new_unit_file.clone();
+
+
// Detect template instances.
+
if let Some((Some(template_name), Some(template_instance))) =
+
template_unit_re.captures(&unit).map(|captures| {
+
(
+
captures.get(1).map(|c| c.as_str()),
+
captures.get(2).map(|c| c.as_str()),
+
)
+
})
+
{
+
if !new_unit_file.exists() {
+
base_unit = format!("{}@.{}", template_name, template_instance);
+
new_base_unit_file = toplevel.join("etc/systemd/system").join(&base_unit);
+
}
+
}
+
+
let mut base_name = base_unit.as_str();
+
if let Some(Some(new_base_name)) = unit_name_re
+
.captures(&base_unit)
+
.map(|capture| capture.get(1).map(|first| first.as_str()))
+
{
+
base_name = new_base_name;
+
}
+
+
// Start units if they were not active previously
+
if !current_active_units.contains_key(unit) {
+
units_to_start.insert(unit.to_string(), ());
+
record_unit(START_LIST_FILE, unit);
+
continue;
+
}
+
+
handle_modified_unit(
+
&toplevel,
+
unit,
+
base_name,
+
&new_unit_file,
+
&new_base_unit_file,
+
None,
+
&current_active_units,
+
&mut units_to_stop,
+
&mut units_to_start,
+
&mut units_to_reload,
+
&mut units_to_restart,
+
&mut units_to_skip,
+
)?;
+
}
+
+
// We can remove the file now because it has been propagated to the other restart/reload files
+
remove_file_if_exists(RESTART_BY_ACTIVATION_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", RESTART_BY_ACTIVATION_LIST_FILE))?;
+
+
for unit in std::fs::read_to_string(RELOAD_BY_ACTIVATION_LIST_FILE)
+
.unwrap_or_default()
+
.lines()
+
{
+
if current_active_units.contains_key(unit)
+
&& !units_to_restart.contains_key(unit)
+
&& !units_to_stop.contains_key(unit)
+
{
+
units_to_reload.insert(unit.to_string(), ());
+
record_unit(RELOAD_LIST_FILE, unit);
+
}
+
}
+
+
// We can remove the file now because it has been propagated to the other reload file
+
remove_file_if_exists(RELOAD_BY_ACTIVATION_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", RELOAD_BY_ACTIVATION_LIST_FILE))?;
+
+
// Restart systemd if necessary. Note that this is done using the current version of systemd,
+
// just in case the new one has trouble communicating with the running pid 1.
+
if restart_systemd {
+
eprintln!("restarting systemd...");
+
_ = systemd.reexecute(); // we don't get a dbus reply here
+
+
while !*systemd_reload_status.borrow() {
+
_ = dbus_conn
+
.process(Duration::from_millis(500))
+
.context("Failed to process dbus messages")?;
+
}
+
}
+
+
// Forget about previously failed services.
+
systemd
+
.reset_failed()
+
.context("Failed to reset failed units")?;
+
+
// Make systemd reload its units.
+
_ = systemd.reload(); // we don't get a dbus reply here
+
while !*systemd_reload_status.borrow() {
+
_ = dbus_conn
+
.process(Duration::from_millis(500))
+
.context("Failed to process dbus messages")?;
+
}
+
+
dbus_conn
+
.remove_match(reloading_token)
+
.context("Failed to cleanup systemd Reloading match")?;
+
+
// Reload user units
+
match logind.list_users() {
+
Err(err) => {
+
eprintln!("Unable to list users with logind: {err}");
+
die();
+
}
+
Ok(users) => {
+
for (uid, name, _) in users {
+
eprintln!("reloading user units for {}...", name);
+
let myself = Path::new("/proc/self/exe")
+
.canonicalize()
+
.context("Failed to get full path to /proc/self/exe")?;
+
+
std::process::Command::new(&myself)
+
.uid(uid)
+
.env("XDG_RUNTIME_DIR", format!("/run/user/{}", uid))
+
.env("__NIXOS_SWITCH_TO_CONFIGURATION_PARENT_EXE", &myself)
+
.spawn()
+
.map(|mut child| _ = child.wait())
+
.with_context(|| format!("Failed to run user activation for {name}"))?;
+
}
+
}
+
}
+
+
// Restart sysinit-reactivation.target. This target only exists to restart services ordered
+
// before sysinit.target. We cannot use X-StopOnReconfiguration to restart sysinit.target
+
// because then ALL services of the system would be restarted since all normal services have a
+
// default dependency on sysinit.target. sysinit-reactivation.target ensures that services
+
// ordered BEFORE sysinit.target get re-started in the correct order. Ordering between these
+
// services is respected.
+
eprintln!("restarting {SYSINIT_REACTIVATION_TARGET}");
+
match systemd.restart_unit(SYSINIT_REACTIVATION_TARGET, "replace") {
+
Ok(job_path) => {
+
let mut jobs = submitted_jobs.borrow_mut();
+
jobs.insert(job_path, Job::Restart);
+
}
+
Err(err) => {
+
eprintln!("Failed to restart {SYSINIT_REACTIVATION_TARGET}: {err}");
+
exit_code = 4;
+
}
+
}
+
+
// Wait for the restart job of sysinit-reactivation.service to finish
+
block_on_jobs(&dbus_conn, &submitted_jobs);
+
+
// Before reloading we need to ensure that the units are still active. They may have been
+
// deactivated because one of their requirements got stopped. If they are inactive but should
+
// have been reloaded, the user probably expects them to be started.
+
if !units_to_reload.is_empty() {
+
for (unit, _) in units_to_reload.clone() {
+
if !unit_is_active(&dbus_conn, &unit)? {
+
// Figure out if we need to start the unit
+
let unit_info = parse_unit(
+
toplevel.join("etc/systemd/system").join(&unit).as_path(),
+
toplevel.join("etc/systemd/system").join(&unit).as_path(),
+
)?;
+
if !parse_systemd_bool(Some(&unit_info), "Unit", "RefuseManualStart", false)
+
|| parse_systemd_bool(Some(&unit_info), "Unit", "X-OnlyManualStart", false)
+
{
+
units_to_start.insert(unit.clone(), ());
+
record_unit(START_LIST_FILE, &unit);
+
}
+
// Don't reload the unit, reloading would fail
+
units_to_reload.remove(&unit);
+
unrecord_unit(RELOAD_LIST_FILE, &unit);
+
}
+
}
+
}
+
+
// Reload units that need it. This includes remounting changed mount units.
+
if !units_to_reload.is_empty() {
+
let mut units = units_to_reload
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("reloading the following units: {}", units.join(", "));
+
+
for unit in units {
+
match systemd.reload_unit(unit, "replace") {
+
Ok(job_path) => {
+
submitted_jobs
+
.borrow_mut()
+
.insert(job_path.clone(), Job::Reload);
+
}
+
Err(err) => {
+
eprintln!("Failed to reload {unit}: {err}");
+
exit_code = 4;
+
}
+
}
+
}
+
+
block_on_jobs(&dbus_conn, &submitted_jobs);
+
+
remove_file_if_exists(RELOAD_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", RELOAD_LIST_FILE))?;
+
}
+
+
// Restart changed services (those that have to be restarted rather than stopped and started).
+
if !units_to_restart.is_empty() {
+
let mut units = units_to_restart
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("restarting the following units: {}", units.join(", "));
+
+
for unit in units {
+
match systemd.restart_unit(unit, "replace") {
+
Ok(job_path) => {
+
let mut jobs = submitted_jobs.borrow_mut();
+
jobs.insert(job_path, Job::Restart);
+
}
+
Err(err) => {
+
eprintln!("Failed to restart {unit}: {err}");
+
exit_code = 4;
+
}
+
}
+
}
+
+
block_on_jobs(&dbus_conn, &submitted_jobs);
+
+
remove_file_if_exists(RESTART_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", RESTART_LIST_FILE))?;
+
}
+
+
// Start all active targets, as well as changed units we stopped above. The latter is necessary
+
// because some may not be dependencies of the targets (i.e., they were manually started).
+
// FIXME: detect units that are symlinks to other units. We shouldn't start both at the same
+
// time because we'll get a "Failed to add path to set" error from systemd.
+
let units_to_start_filtered = filter_units(&units_to_filter, &units_to_start);
+
if !units_to_start_filtered.is_empty() {
+
let mut units = units_to_start_filtered
+
.keys()
+
.into_iter()
+
.map(String::as_str)
+
.collect::<Vec<&str>>();
+
units.sort_by_key(|name| name.to_lowercase());
+
eprintln!("starting the following units: {}", units.join(", "));
+
}
+
+
for unit in units_to_start.keys() {
+
match systemd.start_unit(unit, "replace") {
+
Ok(job_path) => {
+
let mut jobs = submitted_jobs.borrow_mut();
+
jobs.insert(job_path, Job::Start);
+
}
+
Err(err) => {
+
eprintln!("Failed to start {unit}: {err}");
+
exit_code = 4;
+
}
+
}
+
}
+
+
block_on_jobs(&dbus_conn, &submitted_jobs);
+
+
remove_file_if_exists(START_LIST_FILE)
+
.with_context(|| format!("Failed to remove {}", START_LIST_FILE))?;
+
+
for (unit, job, result) in finished_jobs.borrow().values() {
+
match result.as_str() {
+
"timeout" | "failed" | "dependency" => {
+
eprintln!("Failed to {} {}", job, unit);
+
exit_code = 4;
+
}
+
_ => {}
+
}
+
}
+
+
dbus_conn
+
.remove_match(job_removed_token)
+
.context("Failed to cleanup systemd job match")?;
+
+
// Print failed and new units.
+
let mut failed_units = Vec::new();
+
let mut new_units = Vec::new();
+
+
// NOTE: We want switch-to-configuration to be able to report to the user any units that failed
+
// to start or units that systemd had to restart due to having previously failed. This is
+
// inherently a race condition between how long our program takes to run and how long the unit
+
// in question takes to potentially fail. The amount of time we wait for new messages on the
+
// bus to settle is purely tuned so that this program is compatible with the Perl
+
// implementation.
+
//
+
// Wait for events from systemd to settle. process() will return true if we have received any
+
// messages on the bus.
+
while dbus_conn
+
.process(Duration::from_millis(250))
+
.unwrap_or_default()
+
{}
+
+
let new_active_units = get_active_units(&systemd)?;
+
+
for (unit, unit_state) in new_active_units {
+
if &unit_state.state == "failed" {
+
failed_units.push(unit);
+
continue;
+
}
+
+
if unit_state.substate == "auto-restart" && unit.ends_with(".service") {
+
// A unit in auto-restart substate is a failure *if* it previously failed to start
+
let unit_object_path = systemd
+
.get_unit(&unit)
+
.context("Failed to get unit info for {unit}")?;
+
let exec_main_status: i32 = dbus_conn
+
.with_proxy(
+
"org.freedesktop.systemd1",
+
unit_object_path,
+
Duration::from_millis(5000),
+
)
+
.get("org.freedesktop.systemd1.Service", "ExecMainStatus")
+
.context("Failed to get ExecMainStatus for {unit}")?;
+
+
if exec_main_status != 0 {
+
failed_units.push(unit);
+
continue;
+
}
+
}
+
+
// Ignore scopes since they are not managed by this script but rather created and managed
+
// by third-party services via the systemd dbus API. This only lists units that are not
+
// failed (including ones that are in auto-restart but have not failed previously)
+
if unit_state.state != "failed"
+
&& !current_active_units.contains_key(&unit)
+
&& !unit.ends_with(".scope")
+
{
+
new_units.push(unit);
+
}
+
}
+
+
if !new_units.is_empty() {
+
new_units.sort_by_key(|name| name.to_lowercase());
+
eprintln!(
+
"the following new units were started: {}",
+
new_units.join(", ")
+
);
+
}
+
+
if !failed_units.is_empty() {
+
failed_units.sort_by_key(|name| name.to_lowercase());
+
eprintln!(
+
"warning: the following units failed: {}",
+
failed_units.join(", ")
+
);
+
_ = std::process::Command::new(new_systemd.join("bin/systemctl"))
+
.arg("status")
+
.arg("--no-pager")
+
.arg("--full")
+
.args(failed_units)
+
.spawn()
+
.map(|mut child| child.wait());
+
+
exit_code = 4;
+
}
+
+
if exit_code == 0 {
+
log::info!(
+
"finished switching to system configuration {}",
+
toplevel.display()
+
);
+
} else {
+
log::error!(
+
"switching to system configuration {} failed (status {})",
+
toplevel.display(),
+
exit_code
+
);
+
}
+
+
std::process::exit(exit_code);
+
}
+
+
fn main() -> anyhow::Result<()> {
+
match (
+
unsafe { nix::libc::geteuid() },
+
std::env::var("__NIXOS_SWITCH_TO_CONFIGURATION_PARENT_EXE").ok(),
+
) {
+
(0, None) => do_system_switch(),
+
(1..=u32::MAX, None) => bail!("This program does not support being ran outside of the switch-to-configuration environment"),
+
(_, Some(parent_exe)) => do_user_switch(parent_exe),
+
}
+
}
+
+
#[cfg(test)]
+
mod tests {
+
use std::collections::HashMap;
+
+
#[test]
+
fn parse_fstab() {
+
{
+
let (filesystems, swaps) = super::parse_fstab(std::io::Cursor::new(""));
+
assert!(filesystems.is_empty());
+
assert!(swaps.is_empty());
+
}
+
+
{
+
let (filesystems, swaps) = super::parse_fstab(std::io::Cursor::new(
+
r#"\
+
invalid
+
"#,
+
));
+
assert!(filesystems.is_empty());
+
assert!(swaps.is_empty());
+
}
+
+
{
+
let (filesystems, swaps) = super::parse_fstab(std::io::Cursor::new(
+
r#"\
+
# This is a generated file. Do not edit!
+
#
+
# To make changes, edit the fileSystems and swapDevices NixOS options
+
# in your /etc/nixos/configuration.nix file.
+
#
+
# <file system> <mount point> <type> <options> <dump> <pass>
+
+
# Filesystems.
+
/dev/mapper/root / btrfs x-initrd.mount,compress=zstd,noatime,defaults 0 0
+
/dev/disk/by-partlabel/BOOT /boot vfat x-systemd.automount 0 2
+
/dev/disk/by-partlabel/home /home ext4 defaults 0 2
+
/dev/mapper/usr /nix/.ro-store erofs x-initrd.mount,ro 0 2
+
+
+
# Swap devices.
+
"#,
+
));
+
assert_eq!(filesystems.len(), 4);
+
assert_eq!(swaps.len(), 0);
+
let home_fs = filesystems.get("/home").unwrap();
+
assert_eq!(home_fs.fs_type, "ext4");
+
assert_eq!(home_fs.device, "/dev/disk/by-partlabel/home");
+
assert_eq!(home_fs.options, "defaults");
+
}
+
}
+
+
#[test]
+
fn filter_units() {
+
assert_eq!(
+
super::filter_units(&HashMap::from([]), &HashMap::from([])),
+
HashMap::from([])
+
);
+
+
assert_eq!(
+
super::filter_units(
+
&HashMap::from([("foo".to_string(), ())]),
+
&HashMap::from([("foo".to_string(), ()), ("bar".to_string(), ())])
+
),
+
HashMap::from([("bar".to_string(), ())])
+
);
+
}
+
+
#[test]
+
fn compare_units() {
+
{
+
assert!(
+
super::compare_units(&HashMap::from([]), &HashMap::from([]))
+
== super::UnitComparison::Equal
+
);
+
+
assert!(
+
super::compare_units(
+
&HashMap::from([("Unit".to_string(), HashMap::from([]))]),
+
&HashMap::from([])
+
) == super::UnitComparison::Equal
+
);
+
+
assert!(
+
super::compare_units(
+
&HashMap::from([(
+
"Unit".to_string(),
+
HashMap::from([(
+
"X-Reload-Triggers".to_string(),
+
vec!["foobar".to_string()]
+
)])
+
)]),
+
&HashMap::from([])
+
) == super::UnitComparison::Equal
+
);
+
}
+
+
{
+
assert!(
+
super::compare_units(
+
&HashMap::from([("foobar".to_string(), HashMap::from([]))]),
+
&HashMap::from([])
+
) == super::UnitComparison::UnequalNeedsRestart
+
);
+
+
assert!(
+
super::compare_units(
+
&HashMap::from([(
+
"Mount".to_string(),
+
HashMap::from([("Options".to_string(), vec![])])
+
)]),
+
&HashMap::from([(
+
"Mount".to_string(),
+
HashMap::from([("Options".to_string(), vec!["ro".to_string()])])
+
)])
+
) == super::UnitComparison::UnequalNeedsReload
+
);
+
}
+
+
{
+
assert!(
+
super::compare_units(
+
&HashMap::from([]),
+
&HashMap::from([(
+
"Unit".to_string(),
+
HashMap::from([(
+
"X-Reload-Triggers".to_string(),
+
vec!["foobar".to_string()]
+
)])
+
)])
+
) == super::UnitComparison::UnequalNeedsReload
+
);
+
+
assert!(
+
super::compare_units(
+
&HashMap::from([(
+
"Unit".to_string(),
+
HashMap::from([(
+
"X-Reload-Triggers".to_string(),
+
vec!["foobar".to_string()]
+
)])
+
)]),
+
&HashMap::from([(
+
"Unit".to_string(),
+
HashMap::from([(
+
"X-Reload-Triggers".to_string(),
+
vec!["barfoo".to_string()]
+
)])
+
)])
+
) == super::UnitComparison::UnequalNeedsReload
+
);
+
+
assert!(
+
super::compare_units(
+
&HashMap::from([(
+
"Mount".to_string(),
+
HashMap::from([("Type".to_string(), vec!["ext4".to_string()])])
+
)]),
+
&HashMap::from([(
+
"Mount".to_string(),
+
HashMap::from([("Type".to_string(), vec!["btrfs".to_string()])])
+
)])
+
) == super::UnitComparison::UnequalNeedsRestart
+
);
+
}
+
}
+
}
+33
pkgs/by-name/tr/treefmt2/package.nix
···
+
{ lib, buildGoModule, fetchFromGitHub }:
+
buildGoModule rec {
+
pname = "treefmt";
+
version = "2.0.0-rc1";
+
+
src = fetchFromGitHub {
+
owner = "numtide";
+
repo = "treefmt";
+
rev = "v${version}";
+
hash = "sha256-iRjd7iYd3617XZrGD6Bi6d1SoE8dgATMbT4AMXklfgM=";
+
};
+
+
vendorHash = "sha256-xbXy5Csl2JD5/F5mtvh8J36VZqrUIfO3OBV/LE+KzWA=";
+
+
subPackages = [ "." ];
+
+
CGO_ENABLED = 1;
+
+
ldflags = [
+
"-s"
+
"-w"
+
"-X git.numtide.com/numtide/treefmt/build.Name=${pname}"
+
"-X git.numtide.com/numtide/treefmt/build.Version=v${version}"
+
];
+
+
meta = {
+
description = "one CLI to format the code tree";
+
homepage = "https://github.com/numtide/treefmt";
+
license = lib.licenses.mit;
+
maintainers = [ lib.maintainers.brianmcgee lib.maintainers.zimbatm ];
+
mainProgram = "treefmt";
+
};
+
}
+49
pkgs/by-name/yd/ydotool/package.nix
···
+
{
+
lib,
+
stdenv,
+
fetchFromGitHub,
+
cmake,
+
scdoc,
+
util-linux,
+
xorg,
+
nixosTests,
+
}:
+
+
stdenv.mkDerivation (finalAttrs: {
+
pname = "ydotool";
+
version = "1.0.4";
+
+
src = fetchFromGitHub {
+
owner = "ReimuNotMoe";
+
repo = "ydotool";
+
rev = "v${finalAttrs.version}";
+
hash = "sha256-MtanR+cxz6FsbNBngqLE+ITKPZFHmWGsD1mBDk0OVng=";
+
};
+
+
postPatch = ''
+
substituteInPlace Daemon/ydotoold.c \
+
--replace "/usr/bin/xinput" "${xorg.xinput}/bin/xinput"
+
substituteInPlace Daemon/ydotool.service.in \
+
--replace "/usr/bin/kill" "${util-linux}/bin/kill"
+
'';
+
+
strictDeps = true;
+
nativeBuildInputs = [
+
cmake
+
scdoc
+
];
+
+
passthru.tests.basic = nixosTests.ydotool;
+
+
meta = {
+
description = "Generic Linux command-line automation tool";
+
homepage = "https://github.com/ReimuNotMoe/ydotool";
+
license = lib.licenses.agpl3Plus;
+
mainProgram = "ydotool";
+
maintainers = with lib.maintainers; [
+
willibutz
+
kraem
+
];
+
platforms = lib.platforms.linux;
+
};
+
})
+2 -2
pkgs/data/themes/colloid-gtk-theme/default.nix
···
stdenvNoCC.mkDerivation rec {
inherit pname;
-
version = "2024-04-14";
+
version = "2024-05-13";
src = fetchFromGitHub {
owner = "vinceliuice";
repo = pname;
rev = version;
-
hash = "sha256-DQYnR2V86KbIJfAA4ObRtKQq1IuECie1qdOBYVu8NtA=";
+
hash = "sha256-24U1iMByy+cFQuLUWYPBSuvJwYzwS0rCr7L6OWyMUz0=";
};
nativeBuildInputs = [
+2 -2
pkgs/development/libraries/cjson/default.nix
···
stdenv.mkDerivation rec {
pname = "cjson";
-
version = "1.7.17";
+
version = "1.7.18";
src = fetchFromGitHub {
owner = "DaveGamble";
repo = "cJSON";
rev = "v${version}";
-
sha256 = "sha256-jU9UbXvdXiNXFh7c9p/LppMsuqryFK40NTTyQGbNU84=";
+
sha256 = "sha256-UgUWc/+Zie2QNijxKK5GFe4Ypk97EidG8nTiiHhn5Ys=";
};
nativeBuildInputs = [ cmake ];
+2 -2
pkgs/development/libraries/science/math/openspecfun/default.nix
···
stdenv.mkDerivation rec {
pname = "openspecfun";
-
version = "0.5.6";
+
version = "0.5.7";
src = fetchFromGitHub {
owner = "JuliaLang";
repo = "openspecfun";
rev = "v${version}";
-
sha256 = "sha256-4MPoRMtDTkdvDfhNXKk/80pZjXRNEPcysLNTb5ohxWk=";
+
sha256 = "sha256-fx9z6bbU2V4x6Pr7/vmlSxkWxZ6qTYuPxnfqKLv08CA=";
};
makeFlags = [ "prefix=$(out)" ];
+2 -2
pkgs/development/python-modules/aiounifi/default.nix
···
buildPythonPackage rec {
pname = "aiounifi";
-
version = "77";
+
version = "78";
pyproject = true;
disabled = pythonOlder "3.11";
···
owner = "Kane610";
repo = "aiounifi";
rev = "refs/tags/v${version}";
-
hash = "sha256-c3UR/AwnQLm6h1jsM6mk6MOii2/xQzFcrci+oG4BsDs=";
+
hash = "sha256-fY3VsXJfs/uqqQjR6Sp03XlkCOk+8GvXq476dgagJQ8=";
};
postPatch = ''
+16 -19
pkgs/development/python-modules/androidtvremote2/default.nix
···
-
{ lib
-
, aiofiles
-
, buildPythonPackage
-
, cryptography
-
, fetchFromGitHub
-
, protobuf
-
, pythonOlder
-
, setuptools
+
{
+
lib,
+
aiofiles,
+
buildPythonPackage,
+
cryptography,
+
fetchFromGitHub,
+
protobuf,
+
pythonOlder,
+
setuptools,
}:
buildPythonPackage rec {
pname = "androidtvremote2";
-
version = "0.0.15";
-
format = "pyproject";
+
version = "0.1.1";
+
pyproject = true;
-
disabled = pythonOlder "3.7";
+
disabled = pythonOlder "3.10";
src = fetchFromGitHub {
owner = "tronikos";
repo = "androidtvremote2";
rev = "refs/tags/v${version}";
-
hash = "sha256-aLNmuMuwaMhl/Utv6bVU4fR8zmhj0fUiInL4RHT4dVw=";
+
hash = "sha256-Zem2IWBUWmyVdBjqoVKFk+/lg5T7CPXCKFXhFusQFLY=";
};
-
nativeBuildInputs = [
-
setuptools
-
];
+
build-system = [ setuptools ];
-
propagatedBuildInputs = [
+
dependencies = [
aiofiles
cryptography
protobuf
];
-
pythonImportsCheck = [
-
"androidtvremote2"
-
];
+
pythonImportsCheck = [ "androidtvremote2" ];
# Module only has a dummy test
doCheck = false;
+2 -2
pkgs/development/python-modules/b2sdk/default.nix
···
buildPythonPackage rec {
pname = "b2sdk";
-
version = "2.1.0";
+
version = "2.2.1";
pyproject = true;
disabled = pythonOlder "3.7";
···
owner = "Backblaze";
repo = "b2-sdk-python";
rev = "refs/tags/v${version}";
-
hash = "sha256-/TuPT+Y0asGGtfRNxxvpX0WkS4O4asFmIuqSdQ4I9fQ=";
+
hash = "sha256-ENEAynUd66sjS+/Qoy9qyffPpSvxdnY1Nwdi+JTE96I=";
};
build-system = [ pdm-backend ];
+2 -2
pkgs/development/python-modules/bc-detect-secrets/default.nix
···
buildPythonPackage rec {
pname = "bc-detect-secrets";
-
version = "1.5.9";
+
version = "1.5.10";
pyproject = true;
disabled = pythonOlder "3.8";
···
owner = "bridgecrewio";
repo = "detect-secrets";
rev = "refs/tags/${version}";
-
hash = "sha256-3O1taxOxVI+36h2Qz+dzOuilmapm9QO4X4XjRgX4kUc=";
+
hash = "sha256-b0t5xv4fWiErQsYvDKTJuweiGLqS2WpR9ECGo/cpvQ8=";
};
build-system = [ setuptools ];
+6 -2
pkgs/development/python-modules/blackjax/default.nix
···
buildPythonPackage rec {
pname = "blackjax";
-
version = "1.2.0";
+
version = "1.2.1";
pyproject = true;
disabled = pythonOlder "3.9";
···
owner = "blackjax-devs";
repo = "blackjax";
rev = "refs/tags/${version}";
-
hash = "sha256-vXyxK3xALKG61YGK7fmoqQNGfOiagHFrvnU02WKZThw=";
+
hash = "sha256-VoWBCjFMyE5LVJyf7du/pKlnvDHj22lguiP6ZUzH9ak=";
};
build-system = [
···
disabledTests = [
# too slow
"test_adaptive_tempered_smc"
+
] ++ lib.optionals (stdenv.isLinux && stdenv.isAarch64) [
+
# Numerical test (AssertionError)
+
# https://github.com/blackjax-devs/blackjax/issues/668
+
"test_chees_adaptation"
];
pythonImportsCheck = [
-100
pkgs/development/python-modules/cadquery/default.nix
···
-
{ lib
-
, buildPythonPackage
-
, toPythonModule
-
, pythonOlder
-
, pythonAtLeast
-
, fetchFromGitHub
-
, pyparsing
-
, opencascade-occt
-
, stdenv
-
, python
-
, cmake
-
, swig
-
, freetype
-
, libGL
-
, libGLU
-
, libX11
-
, six
-
, pytest
-
, makeFontsConf
-
, freefont_ttf
-
, Cocoa
-
}:
-
-
let
-
pythonocc-core-cadquery = toPythonModule (stdenv.mkDerivation {
-
pname = "pythonocc-core-cadquery";
-
version = "0.18.2";
-
format = "setuptools";
-
-
src = fetchFromGitHub {
-
owner = "CadQuery";
-
repo = "pythonocc-core";
-
# no proper release to to use, this commit copied from the Anaconda receipe
-
rev = "701e924ae40701cbe6f9992bcbdc2ef22aa9b5ab";
-
sha256 = "07zmiiw74dyj4v0ar5vqkvk30wzcpjjzbi04nsdk5mnlzslmyi6c";
-
};
-
-
nativeBuildInputs = [
-
cmake
-
swig
-
];
-
-
buildInputs = [
-
python
-
opencascade-occt
-
freetype
-
libGL
-
libGLU
-
libX11
-
] ++ lib.optionals stdenv.isDarwin [ Cocoa ];
-
-
propagatedBuildInputs = [
-
six
-
];
-
-
cmakeFlags = [
-
"-Wno-dev"
-
"-DPYTHONOCC_INSTALL_DIRECTORY=${placeholder "out"}/${python.sitePackages}/OCC"
-
];
-
});
-
-
in
-
buildPythonPackage rec {
-
pname = "cadquery";
-
version = "2.0";
-
-
src = fetchFromGitHub {
-
owner = "CadQuery";
-
repo = pname;
-
rev = version;
-
sha256 = "1n63b6cjjrdwdfmwq0zx1xabjnhndk9mgfkm4w7z9ardcfpvg84l";
-
};
-
-
buildInputs = [
-
opencascade-occt
-
];
-
-
propagatedBuildInputs = [
-
pyparsing
-
pythonocc-core-cadquery
-
];
-
-
FONTCONFIG_FILE = makeFontsConf {
-
fontDirectories = [ freefont_ttf ];
-
};
-
-
nativeCheckInputs = [
-
pytest
-
];
-
-
disabled = pythonOlder "3.6" || pythonAtLeast "3.8";
-
-
meta = with lib; {
-
description = "Parametric scripting language for creating and traversing CAD models";
-
homepage = "https://github.com/CadQuery/cadquery";
-
license = licenses.asl20;
-
maintainers = with maintainers; [ marcus7070 ];
-
broken = true;
-
};
-
}
-27
pkgs/development/python-modules/cryptacular/default.nix
···
-
{ lib, buildPythonPackage, fetchPypi, isPy27, pythonAtLeast
-
, coverage, nose, pbkdf2 }:
-
-
buildPythonPackage rec {
-
pname = "cryptacular";
-
version = "1.6.2";
-
format = "setuptools";
-
-
src = fetchPypi {
-
inherit pname version;
-
sha256 = "7b529cb2b8a3c7e5be77921bf1ebc653d4d3a8f791375cc6f971b20db2404176";
-
};
-
-
buildInputs = [ coverage nose ];
-
propagatedBuildInputs = [ pbkdf2 ];
-
-
# TODO: tests fail: TypeError: object of type 'NoneType' has no len()
-
doCheck = false;
-
-
# Python >=2.7.15, >=3.6.5 are incompatible:
-
# https://bitbucket.org/dholth/cryptacular/issues/11
-
disabled = isPy27 || pythonAtLeast "3.6";
-
-
meta = with lib; {
-
maintainers = with maintainers; [ domenkozar ];
-
};
-
}
+19 -5
pkgs/development/python-modules/devito/default.nix
···
, nbval
, psutil
, py-cpuinfo
-
, pyrevolve
, pytest-xdist
, pytestCheckHook
, pythonOlder
···
buildPythonPackage rec {
pname = "devito";
-
version = "4.8.3";
+
version = "4.8.6";
format = "setuptools";
disabled = pythonOlder "3.7";
···
owner = "devitocodes";
repo = "devito";
rev = "refs/tags/v${version}";
-
hash = "sha256-g9rRJF1JrZ6+s3tj4RZHuGOjt5LJjtK9I5CJmq4CJL4=";
+
hash = "sha256-unuJLp+zTyGpOk5O78xYbW6Zrzp60WyqgT9mf2YpTG4=";
};
pythonRemoveDeps = [
···
pythonRelaxDepsHook
];
-
propagatedBuildInputs = [
+
dependencies = [
anytree
cached-property
cgen
···
multidict
psutil
py-cpuinfo
-
pyrevolve
scipy
sympy
] ++ lib.optionals stdenv.cc.isClang [
···
"test_setupWOverQ"
"test_shortcuts"
"test_subdomainset_mpi"
+
"test_subdomains_mpi"
+
] ++ lib.optionals (stdenv.isLinux && stdenv.isAarch64) [
+
# FAILED tests/test_unexpansion.py::Test2Pass::test_v0 - assert False
+
"test_v0"
+
] ++ lib.optionals stdenv.isDarwin [
+
# FAILED tests/test_caching.py::TestCaching::test_special_symbols - ValueError: not enough values to unpack (expected 3, got 2)
+
"test_special_symbols"
+
+
# FAILED tests/test_unexpansion.py::Test2Pass::test_v0 - codepy.CompileError: module compilation failed
+
"test_v0"
+
] ++ lib.optionals (stdenv.isDarwin && stdenv.isAarch64) [
+
# Numerical tests
+
"test_lm_fb"
+
"test_lm_ds"
];
disabledTestPaths = [
···
"tests/test_data.py"
"tests/test_dse.py"
"tests/test_gradient.py"
+
] ++ lib.optionals ((stdenv.isLinux && stdenv.isAarch64) || stdenv.isDarwin) [
+
"tests/test_dle.py"
];
pythonImportsCheck = [
+2 -2
pkgs/development/python-modules/dirigera/default.nix
···
buildPythonPackage rec {
pname = "dirigera";
-
version = "1.1.6";
+
version = "1.1.7";
pyproject = true;
disabled = pythonOlder "3.7";
···
owner = "Leggin";
repo = "dirigera";
rev = "refs/tags/v${version}";
-
hash = "sha256-OXq8eJyZQBsJEK81GxstfMHqDShlZyOWSXLwP9Zfpqw=";
+
hash = "sha256-KAwXpcs0EvmaxBVgZ7TlT1hDI0m7f8hJigUTluVATsw=";
};
build-system = [ setuptools ];
+2 -2
pkgs/development/python-modules/dogpile-cache/default.nix
···
buildPythonPackage rec {
pname = "dogpile-cache";
-
version = "1.3.2";
+
version = "1.3.3";
format = "pyproject";
disabled = pythonOlder "3.6";
···
src = fetchPypi {
pname = "dogpile.cache";
inherit version;
-
hash = "sha256-T3HcAzOtNRycb3BPW6Kje/Ucbu0EN9Gt9W4HWVmv5js=";
+
hash = "sha256-+EuO0LD7KX0VEFVEf6jcr3uuVm1Nve/s3MHzdmKrWIs=";
};
nativeBuildInputs = [
+15 -2
pkgs/development/python-modules/equinox/default.nix
···
pythonImportsCheck = [ "equinox" ];
disabledTests = [
-
# Failed: DID NOT WARN. No warnings of type (<class 'UserWarning'>,) were emitted.
-
"test_tracetime"
+
# For simplicity, JAX has removed its internal frames from the traceback of the following exception.
+
# https://github.com/patrick-kidger/equinox/issues/716
+
"test_abstract"
+
"test_complicated"
+
"test_grad"
+
"test_jvp"
+
"test_mlp"
+
"test_num_traces"
+
"test_pytree_in"
+
"test_simple"
+
"test_vmap"
+
+
# AssertionError: assert 'foo:\n pri...pe=float32)\n' == 'foo:\n pri...pe=float32)\n'
+
# Also reported in patrick-kidger/equinox#716
+
"test_backward_nan"
];
meta = with lib; {
+4 -4
pkgs/development/python-modules/flax/default.nix
···
buildPythonPackage rec {
pname = "flax";
-
version = "0.8.2";
+
version = "0.8.3";
pyproject = true;
disabled = pythonOlder "3.9";
···
owner = "google";
repo = "flax";
rev = "refs/tags/v${version}";
-
hash = "sha256-UABgJGe1grUSkwOJpjeIoFqhXsqG//HlC1YyYPxXV+g=";
+
hash = "sha256-uDGTyksUZTTL6FiTJP+qteFLOjr75dcTj9yRJ6Jm8xU=";
};
-
nativeBuildInputs = [
+
build-system = [
jaxlib
pythonRelaxDepsHook
setuptools-scm
];
-
propagatedBuildInputs = [
+
dependencies = [
jax
msgpack
numpy
-29
pkgs/development/python-modules/globre/default.nix
···
-
{ lib
-
, pythonAtLeast
-
, buildPythonPackage
-
, fetchPypi
-
, nose
-
, coverage
-
}:
-
-
buildPythonPackage rec {
-
pname = "globre";
-
version = "0.1.5";
-
format = "setuptools";
-
# https://github.com/metagriffin/globre/issues/7
-
disabled = pythonAtLeast "3.7";
-
-
src = fetchPypi {
-
inherit pname version;
-
sha256 = "1qhjpg0722871dm5m7mmldf6c7mx58fbdvk1ix5i3s9py82448gf";
-
};
-
-
nativeCheckInputs = [ nose coverage ];
-
-
meta = with lib; {
-
homepage = "https://github.com/metagriffin/globre";
-
description = "A python glob-like regular expression generation library.";
-
maintainers = with maintainers; [ glittershark ];
-
license = licenses.gpl3;
-
};
-
}
+2 -2
pkgs/development/python-modules/hikari/default.nix
···
}:
buildPythonPackage rec {
pname = "hikari";
-
version = "2.0.0.dev124";
+
version = "2.0.0.dev125";
src = fetchFromGitHub {
owner = "hikari-py";
repo = "hikari";
rev = version;
-
hash = "sha256-zDgU3Ol/I3YNnwXm+aBh20KwonW746p5TObuwuWORog=";
+
hash = "sha256-qxgIYquXUWrm8bS8EamERMHOnjI2aPyK7bQieVG66uA=";
# The git commit is part of the `hikari.__git_sha1__` original output;
# leave that output the same in nixpkgs. Use the `.git` directory
# to retrieve the commit SHA, and remove the directory afterwards,
+10 -2
pkgs/development/python-modules/jax/default.nix
···
in
buildPythonPackage rec {
pname = "jax";
-
version = "0.4.25";
+
version = "0.4.28";
pyproject = true;
disabled = pythonOlder "3.9";
···
repo = "jax";
# google/jax contains tags for jax and jaxlib. Only use jax tags!
rev = "refs/tags/jax-v${version}";
-
hash = "sha256-poQQo2ZgEhPYzK3aCs+BjaHTNZbezJAECd+HOdY1Yok=";
+
hash = "sha256-qSHPwi3is6Ts7pz5s4KzQHBMbcjGp+vAOsejW3o36Ek=";
};
nativeBuildInputs = [
···
"-W ignore::DeprecationWarning"
"tests/"
];
+
+
# Prevents `tests/export_back_compat_test.py::CompatTest::test_*` tests from failing on darwin with
+
# PermissionError: [Errno 13] Permission denied: '/tmp/back_compat_testdata/test_*.py'
+
# See https://github.com/google/jax/blob/jaxlib-v0.4.27/jax/_src/internal_test_util/export_back_compat_test_util.py#L240-L241
+
# NOTE: this doesn't seem to be an issue on linux
+
preCheck = lib.optionalString stdenv.isDarwin ''
+
export TEST_UNDECLARED_OUTPUTS_DIR=$(mktemp -d)
+
'';
disabledTests = [
# Exceeds tolerance when the machine is busy
+22 -38
pkgs/development/python-modules/jaxlib/bin.nix
···
, stdenv
# Options:
, cudaSupport ? config.cudaSupport
-
, cudaPackagesGoogle
+
, cudaPackages
}:
let
-
inherit (cudaPackagesGoogle) cudaVersion;
+
inherit (cudaPackages) cudaVersion;
-
version = "0.4.24";
+
version = "0.4.28";
inherit (python) pythonVersion;
-
cudaLibPath = lib.makeLibraryPath (with cudaPackagesGoogle; [
+
cudaLibPath = lib.makeLibraryPath (with cudaPackages; [
cuda_cudart.lib # libcudart.so
cuda_cupti.lib # libcupti.so
cudnn.lib # libcudnn.so
···
"3.9-x86_64-linux" = getSrcFromPypi {
platform = "manylinux2014_x86_64";
dist = "cp39";
-
hash = "sha256-6P5ArMoLZiUkHUoQ/mJccbNj5/7el/op+Qo6cGQ33xE=";
+
hash = "sha256-Slbr8FtKTBeRaZ2HTgcvP4CPCYa0AQsU+1SaackMqdw=";
};
"3.9-aarch64-darwin" = getSrcFromPypi {
platform = "macosx_11_0_arm64";
dist = "cp39";
-
hash = "sha256-23JQZRwMLtt7sK/JlCBqqRyfTVIAVJFN2sL+nAkQgvU=";
+
hash = "sha256-sBVi7IrXVxm30DiXUkiel+trTctMjBE75JFjTVKCrTw=";
};
"3.9-x86_64-darwin" = getSrcFromPypi {
platform = "macosx_10_14_x86_64";
dist = "cp39";
-
hash = "sha256-OgMedn9GHGs5THZf3pkP3Aw/jJ0vL5qK1b+Lzf634Ik=";
+
hash = "sha256-T5jMg3srbG3P4Kt/+esQkxSSCUYRmqOvn6oTlxj/J4c=";
};
"3.10-x86_64-linux" = getSrcFromPypi {
platform = "manylinux2014_x86_64";
dist = "cp310";
-
hash = "sha256-/VwUIIa7mTs/wLz0ArsEfNrz2pGriVVT5GX9XRFRxfY=";
+
hash = "sha256-47zcb45g+FVPQVwU2TATTmAuPKM8OOVGJ0/VRfh1dps=";
};
"3.10-aarch64-darwin" = getSrcFromPypi {
platform = "macosx_11_0_arm64";
dist = "cp310";
-
hash = "sha256-LgICOyDGts840SQQJh+yOMobMASb62llvJjpGvhzrSw=";
+
hash = "sha256-8Djmi9ENGjVUcisLvjbmpEg4RDenWqnSg/aW8O2fjAk=";
};
"3.10-x86_64-darwin" = getSrcFromPypi {
platform = "macosx_10_14_x86_64";
dist = "cp310";
-
hash = "sha256-vhyULw+zBpz1UEi2tqgBMQEzY9a6YBgEIg6A4PPh3bQ=";
+
hash = "sha256-pCHSN/jCXShQFm0zRgPGc925tsJvUrxJZwS4eCKXvWY=";
};
"3.11-x86_64-linux" = getSrcFromPypi {
platform = "manylinux2014_x86_64";
dist = "cp311";
-
hash = "sha256-VJO/VVwBFkOEtq4y/sLVgAV8Cung01JULiuT6W96E/8=";
+
hash = "sha256-Rc4PPIQM/4I2z/JsN/Jsn/B4aV+T4MFiwyDCgfUEEnU=";
};
"3.11-aarch64-darwin" = getSrcFromPypi {
platform = "macosx_11_0_arm64";
dist = "cp311";
-
hash = "sha256-VtuwXxurpSp1KI8ty1bizs5cdy8GEBN2MgS227sOCmE=";
+
hash = "sha256-eThX+vN/Nxyv51L+pfyBH0NeQ7j7S1AgWERKf17M+Ck=";
};
"3.11-x86_64-darwin" = getSrcFromPypi {
platform = "macosx_10_14_x86_64";
dist = "cp311";
-
hash = "sha256-4Dj5dEGKb9hpg3HlVogNO1Gc9UibJhy1eym2mjivxAQ=";
+
hash = "sha256-L/gpDtx7ksfq5SUX9lSSYz4mey6QZ7rT5MMj0hPnfPU=";
};
"3.12-x86_64-linux" = getSrcFromPypi {
platform = "manylinux2014_x86_64";
dist = "cp312";
-
hash = "sha256-TlrGVtb3NTLmhnILWPLJR+jISCZ5SUV4wxNFpSfkCBo=";
+
hash = "sha256-RqGqhX9P7uikP8upXA4Kti1AwmzJcwtsaWVZCLo1n40=";
};
"3.12-aarch64-darwin" = getSrcFromPypi {
platform = "macosx_11_0_arm64";
dist = "cp312";
-
hash = "sha256-FIwK5CGykQjteuWzLZnbtAggIxLQeGV96bXlZGEytN0=";
+
hash = "sha256-jdi//jhTcC9jzZJNoO4lc0pNGc1ckmvgM9dyun0cF10=";
};
"3.12-x86_64-darwin" = getSrcFromPypi {
platform = "macosx_10_14_x86_64";
dist = "cp312";
-
hash = "sha256-9/jw/wr6oUD9pOadVAaMRL086iVMUXwVgnUMcG1UNvE=";
+
hash = "sha256-1sCaVFMpciRhrwVuc1FG0sjHTCKsdCaoRetp8ya096A=";
};
};
···
gpuSrcs = {
"cuda12.2-3.9" = fetchurl {
url = "https://storage.googleapis.com/jax-releases/cuda12/jaxlib-${version}+cuda12.cudnn89-cp39-cp39-manylinux2014_x86_64.whl";
-
hash = "sha256-xdJKLPtx+CIza2CrWKM3M0cZJzyNFVTTTsvlgh38bfM=";
+
hash = "sha256-d8LIl22gIvmWfoyKfXKElZJXicPQIZxdS4HumhwQGCw=";
};
"cuda12.2-3.10" = fetchurl {
url = "https://storage.googleapis.com/jax-releases/cuda12/jaxlib-${version}+cuda12.cudnn89-cp310-cp310-manylinux2014_x86_64.whl";
-
hash = "sha256-QCjrOczD2mp+CDwVXBc0/4rJnAizeV62AK0Dpx9X6TE=";
+
hash = "sha256-PXtWv+UEcMWF8LhWe6Z1UGkf14PG3dkJ0Iop0LiimnQ=";
};
"cuda12.2-3.11" = fetchurl {
url = "https://storage.googleapis.com/jax-releases/cuda12/jaxlib-${version}+cuda12.cudnn89-cp311-cp311-manylinux2014_x86_64.whl";
-
hash = "sha256-Ipy3vk1yUplpNzECAFt63aOIhgEWgXG7hkoeTIk9bQQ=";
+
hash = "sha256-QO2WSOzmJ48VaCha596mELiOfPsAGLpGctmdzcCHE/o=";
};
"cuda12.2-3.12" = fetchurl {
url = "https://storage.googleapis.com/jax-releases/cuda12/jaxlib-${version}+cuda12.cudnn89-cp312-cp312-manylinux2014_x86_64.whl";
-
hash = "sha256-LSnZHaUga/8Z65iKXWBnZDk4yUpNykFTu3vukCchO6Q=";
-
};
-
"cuda11.8-3.9" = fetchurl {
-
url = "https://storage.googleapis.com/jax-releases/cuda11/jaxlib-${version}+cuda11.cudnn86-cp39-cp39-manylinux2014_x86_64.whl";
-
hash = "sha256-UmyugL0VjlXkiD7fuDPWgW8XUpr/QaP5ggp6swoZTzU=";
-
};
-
"cuda11.8-3.10" = fetchurl {
-
url = "https://storage.googleapis.com/jax-releases/cuda11/jaxlib-${version}+cuda11.cudnn86-cp310-cp310-manylinux2014_x86_64.whl";
-
hash = "sha256-luKULEiV1t/sO6eckDxddJTiOFa0dtJeDlrvp+WYmHk=";
-
};
-
"cuda11.8-3.11" = fetchurl {
-
url = "https://storage.googleapis.com/jax-releases/cuda11/jaxlib-${version}+cuda11.cudnn86-cp311-cp311-manylinux2014_x86_64.whl";
-
hash = "sha256-4+uJ8Ij6mFGEmjFEgi3fLnSLZs+v18BRoOt7mZuqydw=";
-
};
-
"cuda11.8-3.12" = fetchurl {
-
url = "https://storage.googleapis.com/jax-releases/cuda11/jaxlib-${version}+cuda11.cudnn86-cp312-cp312-manylinux2014_x86_64.whl";
-
hash = "sha256-bUDFb94Ar/65SzzR9RLIs/SL/HdjaPT1Su5whmjkS00=";
+
hash = "sha256-ixWMaIChy4Ammsn23/3cCoala0lFibuUxyUr3tjfFKU=";
};
};
···
# for more info.
postInstall = lib.optional cudaSupport ''
mkdir -p $out/${python.sitePackages}/jaxlib/cuda/bin
-
ln -s ${lib.getExe' cudaPackagesGoogle.cuda_nvcc "ptxas"} $out/${python.sitePackages}/jaxlib/cuda/bin/ptxas
+
ln -s ${lib.getExe' cudaPackages.cuda_nvcc "ptxas"} $out/${python.sitePackages}/jaxlib/cuda/bin/ptxas
'';
inherit (jaxlib-build) pythonImportsCheck;
···
platforms = [ "aarch64-darwin" "x86_64-linux" "x86_64-darwin" ];
broken =
!(cudaSupport -> lib.versionAtLeast cudaVersion "11.1")
-
|| !(cudaSupport -> lib.versionAtLeast cudaPackagesGoogle.cudnn.version "8.2")
+
|| !(cudaSupport -> lib.versionAtLeast cudaPackages.cudnn.version "8.2")
|| !(cudaSupport -> stdenv.isLinux)
|| !(cudaSupport -> (gpuSrcs ? "cuda${cudaVersion}-${pythonVersion}"))
# Fails at pythonImportsCheckPhase:
+15 -25
pkgs/development/python-modules/jaxlib/default.nix
···
, curl
, cython
, fetchFromGitHub
-
, fetchpatch
, git
, IOKit
, jsoncpp
···
, config
# CUDA flags:
, cudaSupport ? config.cudaSupport
-
, cudaPackagesGoogle
+
, cudaPackages
# MKL:
, mklSupport ? true
}@inputs:
let
-
inherit (cudaPackagesGoogle) cudaFlags cudaVersion cudnn nccl;
+
inherit (cudaPackages) cudaFlags cudaVersion cudnn nccl;
pname = "jaxlib";
-
version = "0.4.24";
+
version = "0.4.28";
# It's necessary to consistently use backendStdenv when building with CUDA
# support, otherwise we get libstdc++ errors downstream
stdenv = throw "Use effectiveStdenv instead";
-
effectiveStdenv = if cudaSupport then cudaPackagesGoogle.backendStdenv else inputs.stdenv;
+
effectiveStdenv = if cudaSupport then cudaPackages.backendStdenv else inputs.stdenv;
meta = with lib; {
description = "JAX is Autograd and XLA, brought together for high-performance machine learning research.";
···
# These are necessary at build time and run time.
cuda_libs_joined = symlinkJoin {
name = "cuda-joined";
-
paths = with cudaPackagesGoogle; [
+
paths = with cudaPackages; [
cuda_cudart.lib # libcudart.so
cuda_cudart.static # libcudart_static.a
cuda_cupti.lib # libcupti.so
···
# These are only necessary at build time.
cuda_build_deps_joined = symlinkJoin {
name = "cuda-build-deps-joined";
-
paths = with cudaPackagesGoogle; [
+
paths = with cudaPackages; [
cuda_libs_joined
# Binaries
-
cudaPackagesGoogle.cuda_nvcc.bin # nvcc
+
cudaPackages.cuda_nvcc.bin # nvcc
# Headers
cuda_cccl.dev # block_load.cuh
···
owner = "openxla";
repo = "xla";
# Update this according to https://github.com/google/jax/blob/jaxlib-v${version}/third_party/xla/workspace.bzl.
-
rev = "12eee889e1f2ad41e27d7b0e970cb92d282d3ec5";
-
hash = "sha256-68kjjgwYjRlcT0TVJo9BN6s+WTkdu5UMJqQcfHpBT90=";
+
rev = "e8247c3ea1d4d7f31cf27def4c7ac6f2ce64ecd4";
+
hash = "sha256-ZhgMIVs3Z4dTrkRWDqaPC/i7yJz2dsYXrZbjzqvPX3E=";
};
-
patches = [
-
# Resolves "could not convert ‘result’ from ‘SmallVector<[...],6>’ to
-
# ‘SmallVector<[...],4>’" compilation error. See https://github.com/google/jax/issues/19814#issuecomment-1945141259.
-
(fetchpatch {
-
url = "https://github.com/openxla/xla/commit/7a614cd346594fc7ea2fe75570c9c53a4a444f60.patch";
-
hash = "sha256-RtuQTH8wzNiJcOtISLhf+gMlH1gg8hekvxEB+4wX6BM=";
-
})
-
];
-
dontBuild = true;
# This is necessary for patchShebangs to know the right path to use.
···
repo = "jax";
# google/jax contains tags for jax and jaxlib. Only use jaxlib tags!
rev = "refs/tags/${pname}-v${version}";
-
hash = "sha256-hmx7eo3pephc6BQfoJ3U0QwWBWmhkAc+7S4QmW32qQs=";
+
hash = "sha256-qSHPwi3is6Ts7pz5s4KzQHBMbcjGp+vAOsejW3o36Ek=";
};
nativeBuildInputs = [
···
];
sha256 = (if cudaSupport then {
-
x86_64-linux = "sha256-8JilAoTbqOjOOJa/Zc/n/quaEDcpdcLXCNb34mfB+OM=";
+
x86_64-linux = "sha256-VGNMf5/DgXbgsu1w5J1Pmrukw+7UO31BNU+crKVsX5k=";
} else {
-
x86_64-linux = "sha256-iqS+I1FQLNWXNMsA20cJp7YkyGUeshee5b2QfRBNZtk=";
-
aarch64-linux = "sha256-qmJ0Fm/VGMTmko4PhKs1P8/GLEJmVxb8xg+ss/HsakY==";
+
x86_64-linux = "sha256-uOoAyMBLHPX6jzdN43b5wZV5eW0yI8sCDD7BSX2h4oQ=";
+
aarch64-linux = "sha256-+SnGKY9LIT1Qhu/x6Uh7sHRaAEjlc//qyKj1m4t16PA=";
}).${effectiveStdenv.system} or (throw "jaxlib: unsupported system: ${effectiveStdenv.system}");
};
···
# for more info.
postInstall = lib.optionalString cudaSupport ''
mkdir -p $out/bin
-
ln -s ${cudaPackagesGoogle.cuda_nvcc.bin}/bin/ptxas $out/bin/ptxas
+
ln -s ${cudaPackages.cuda_nvcc.bin}/bin/ptxas $out/bin/ptxas
find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
patchelf --add-rpath "${lib.makeLibraryPath [cuda_libs_joined cudnn nccl]}" "$lib"
···
nativeBuildInputs = lib.optionals cudaSupport [ autoAddDriverRunpath ];
-
propagatedBuildInputs = [
+
dependencies = [
absl-py
curl
double-conversion
+17 -3
pkgs/development/python-modules/jaxopt/default.nix
···
, fetchpatch
, pytest-xdist
, pytestCheckHook
+
, setuptools
, absl-py
, cvxpy
, jax
···
buildPythonPackage rec {
pname = "jaxopt";
version = "0.8.3";
-
format = "setuptools";
+
pyproject = true;
disabled = pythonOlder "3.8";
···
})
];
-
propagatedBuildInputs = [
+
build-system = [
+
setuptools
+
];
+
+
dependencies = [
absl-py
jax
jaxlib
···
"jaxopt.tree_util"
];
-
disabledTests = lib.optionals (stdenv.isLinux && stdenv.isAarch64) [
+
disabledTests = [
+
# https://github.com/google/jaxopt/issues/592
+
"test_solve_sparse"
+
] ++ lib.optionals (stdenv.isLinux && stdenv.isAarch64) [
# https://github.com/google/jaxopt/issues/577
"test_binary_logit_log_likelihood"
"test_solve_sparse"
"test_logreg_with_intercept_manual_loop3"
+
+
# https://github.com/google/jaxopt/issues/593
+
# Makes the test suite crash
+
"test_dtype_consistency"
+
# AssertionError: Array(0.01411963, dtype=float32) not less than or equal to 0.01
+
"test_multiclass_logreg6"
];
meta = with lib; {
+4 -2
pkgs/development/python-modules/nanobind/default.nix
···
scipy
torch
tensorflow
-
jax
-
jaxlib
+
# Uncomment at next release (1.9.3)
+
# See https://github.com/wjakob/nanobind/issues/578
+
# jax
+
# jaxlib
];
meta = with lib; {
+7 -3
pkgs/development/python-modules/objax/default.nix
···
{ lib
, buildPythonPackage
, fetchFromGitHub
-
, fetchpatch
, jax
, jaxlib
, keras
···
hash = "sha256-WD+pmR8cEay4iziRXqF3sHUzCMBjmLJ3wZ3iYOD+hzk=";
};
-
nativeBuildInputs = [
+
patches = [
+
# Issue reported upstream: https://github.com/google/objax/issues/270
+
./replace-deprecated-device_buffers.patch
+
];
+
+
build-system = [
setuptools
];
···
jaxlib
];
-
propagatedBuildInputs = [
+
dependencies = [
jax
numpy
parameterized
+14
pkgs/development/python-modules/objax/replace-deprecated-device_buffers.patch
···
+
diff --git a/objax/util/util.py b/objax/util/util.py
+
index c31a356..344cf9a 100644
+
--- a/objax/util/util.py
+
+++ b/objax/util/util.py
+
@@ -117,7 +117,8 @@ def get_local_devices():
+
if _local_devices is None:
+
x = jn.zeros((jax.local_device_count(), 1), dtype=jn.float32)
+
sharded_x = map_to_device(x)
+
- _local_devices = [b.device() for b in sharded_x.device_buffers]
+
+ device_buffers = [buf.data for buf in sharded_x.addressable_shards]
+
+ _local_devices = [list(b.devices())[0] for b in device_buffers]
+
return _local_devices
+
+
+2 -2
pkgs/development/python-modules/opower/default.nix
···
buildPythonPackage rec {
pname = "opower";
-
version = "0.4.4";
+
version = "0.4.5";
pyproject = true;
disabled = pythonOlder "3.9";
···
owner = "tronikos";
repo = "opower";
rev = "refs/tags/v${version}";
-
hash = "sha256-GG7r0/rZcB4cuog3m0qCOrmcYiscPQggQhRC9sQGG40=";
+
hash = "sha256-PBxxLbVOvJSFmDXgKeI5sICUR7NJGUEUUahK9eBsvbE=";
};
build-system = [ setuptools ];
-27
pkgs/development/python-modules/pathlib/default.nix
···
-
{ lib
-
, buildPythonPackage
-
, fetchPypi
-
, unittestCheckHook
-
, pythonAtLeast
-
}:
-
-
buildPythonPackage rec {
-
pname = "pathlib";
-
version = "1.0.1";
-
format = "setuptools";
-
disabled = pythonAtLeast "3.4"; # Was added to std library in Python 3.4
-
-
src = fetchPypi {
-
inherit pname version;
-
sha256 = "17zajiw4mjbkkv6ahp3xf025qglkj0805m9s41c45zryzj6p2h39";
-
};
-
-
nativeCheckInputs = [ unittestCheckHook ];
-
-
meta = {
-
description = "Object-oriented filesystem paths";
-
homepage = "https://pathlib.readthedocs.org/";
-
license = lib.licenses.mit;
-
maintainers = with lib.maintainers; [ ];
-
};
-
}
-37
pkgs/development/python-modules/pxml/default.nix
···
-
{ lib
-
, pythonAtLeast
-
, buildPythonPackage
-
, fetchPypi
-
, blessings
-
, six
-
, nose
-
, coverage
-
}:
-
-
buildPythonPackage rec {
-
pname = "pxml";
-
version = "0.2.13";
-
format = "setuptools";
-
disabled = pythonAtLeast "3.8";
-
-
src = fetchPypi {
-
inherit pname version;
-
sha256 = "0c9zzfv6ciyf9qm7556wil45xxgykg1cj8isp1b88gimwcb2hxg4";
-
};
-
-
propagatedBuildInputs = [ blessings six ];
-
nativeCheckInputs = [ nose coverage ];
-
-
# test_prefixedWhitespace fails due to a python3 StringIO issue requiring
-
# bytes rather than str
-
checkPhase = ''
-
nosetests -e 'test_prefixedWhitespace'
-
'';
-
-
meta = with lib; {
-
homepage = "https://github.com/metagriffin/pxml";
-
description = ''A python library and command-line tool to "prettify" and colorize XML.'';
-
maintainers = with maintainers; [ glittershark ];
-
license = licenses.gpl3;
-
};
-
}
+2 -2
pkgs/development/python-modules/pycrdt-websocket/default.nix
···
buildPythonPackage rec {
pname = "pycrdt-websocket";
-
version = "0.13.3";
+
version = "0.13.4";
pyproject = true;
disabled = pythonOlder "3.8";
···
owner = "jupyter-server";
repo = "pycrdt-websocket";
rev = "refs/tags/v${version}";
-
hash = "sha256-4kxPRPb8XfbQHYmQqnSNfqywUZxQy5b0qWIryLDtP8w=";
+
hash = "sha256-nkm1ZQ6bYBRDDoREovhEthDZoHApYxzAnwVgDgpWW/s=";
};
build-system = [
+2 -2
pkgs/development/python-modules/pyexploitdb/default.nix
···
buildPythonPackage rec {
pname = "pyexploitdb";
-
version = "0.2.16";
+
version = "0.2.17";
pyproject = true;
disabled = pythonOlder "3.7";
···
src = fetchPypi {
pname = "pyExploitDb";
inherit version;
-
hash = "sha256-PP9dR8Jl4eWsky3vO9Pgraw0plik/5aWvmOAEc2/Qpo=";
+
hash = "sha256-I7uDmKvHVx8mpqlq+kOyp57wCii8fy9ecJTSm7uHbZ0=";
};
build-system = [ setuptools ];
+19 -1
pkgs/development/python-modules/pymilter/default.nix
···
-
{ lib, python, buildPythonPackage, fetchFromGitHub, libmilter, bsddb3, pydns, iana-etc, libredirect }:
+
{ lib
+
, python
+
, buildPythonPackage
+
, fetchFromGitHub
+
, fetchpatch
+
, libmilter
+
, bsddb3
+
, pydns
+
, iana-etc
+
, libredirect
+
, pyasyncore
+
}:
buildPythonPackage rec {
pname = "pymilter";
···
};
buildInputs = [ libmilter ];
+
nativeCheckInputs = [ pyasyncore ];
propagatedBuildInputs = [ bsddb3 pydns ];
+
patches = [ (fetchpatch {
+
name = "Remove-calls-to-the-deprecated-method-assertEquals";
+
url = "https://github.com/sdgathman/pymilter/pull/57.patch";
+
hash = "sha256-/5LlDR15nMR3l7rkVjT3w4FbDTFAAgNdERWlPNL2TVg=";
+
})
+
];
preBuild = ''
sed -i 's/import thread/import _thread as thread/' Milter/greylist.py
+21 -21
pkgs/development/python-modules/pynws/default.nix
···
-
{ lib
-
, aiohttp
-
, buildPythonPackage
-
, fetchFromGitHub
-
, freezegun
-
, metar
-
, pytest-aiohttp
-
, pytest-asyncio
-
, pytest-cov
-
, pytestCheckHook
-
, pythonOlder
-
, setuptools
-
, setuptools-scm
-
, tenacity
+
{
+
lib,
+
aiohttp,
+
buildPythonPackage,
+
fetchFromGitHub,
+
freezegun,
+
metar,
+
pytest-aiohttp,
+
pytest-asyncio,
+
pytest-cov,
+
pytestCheckHook,
+
pythonOlder,
+
setuptools,
+
setuptools-scm,
+
tenacity,
}:
buildPythonPackage rec {
pname = "pynws";
-
version = "1.7.0";
+
version = "1.8.0";
pyproject = true;
-
disabled = pythonOlder "3.6";
+
disabled = pythonOlder "3.8";
src = fetchFromGitHub {
owner = "MatthewFlamm";
-
repo = pname;
+
repo = "pynws";
rev = "refs/tags/v${version}";
-
hash = "sha256-JjXGDjLITzJxEmCIv7RPvb+Jqe9hm++ptpJOryuK9M0=";
+
hash = "sha256-KUCylHYng6mn2TWKf8C7k0IoerM22OIQ7pJMKi5SF3A=";
};
build-system = [
···
metar
];
-
optional-dependencies.retry = [
-
tenacity
-
];
+
optional-dependencies.retry = [ tenacity ];
nativeCheckInputs = [
freezegun
···
meta = with lib; {
description = "Python library to retrieve data from NWS/NOAA";
homepage = "https://github.com/MatthewFlamm/pynws";
+
changelog = "https://github.com/MatthewFlamm/pynws/releases/tag/v${version}";
license = with licenses; [ mit ];
maintainers = with maintainers; [ fab ];
};
+1 -5
pkgs/development/python-modules/pyscss/default.nix
···
, fetchFromGitHub
, pytestCheckHook
, six
-
, enum34
-
, pathlib
-
, pythonOlder
}:
buildPythonPackage rec {
···
nativeCheckInputs = [ pytestCheckHook ];
-
propagatedBuildInputs = [ six ]
-
++ lib.optionals (pythonOlder "3.4") [ enum34 pathlib ];
+
propagatedBuildInputs = [ six ];
# Test suite is broken.
# See https://github.com/Kronuz/pyScss/issues/415
+2 -2
pkgs/development/python-modules/pysigma-backend-insightidr/default.nix
···
buildPythonPackage rec {
pname = "pysigma-backend-insightidr";
-
version = "0.2.2";
+
version = "0.2.3";
format = "pyproject";
disabled = pythonOlder "3.8";
···
owner = "SigmaHQ";
repo = "pySigma-backend-insightidr";
rev = "refs/tags/v${version}";
-
hash = "sha256-B42MADteF0+GC/CPJPLaTGdGcQjC8KEsK9u3tBmtObg=";
+
hash = "sha256-wQMnnJ0KU+53MS3PIBkwIhUiyUdCrDbdUT6upk2Pp/8=";
};
nativeBuildInputs = [
+2 -2
pkgs/development/python-modules/pytrydan/default.nix
···
buildPythonPackage rec {
pname = "pytrydan";
-
version = "0.6.0";
+
version = "0.6.1";
pyproject = true;
disabled = pythonOlder "3.10";
···
owner = "dgomes";
repo = "pytrydan";
rev = "refs/tags/v${version}";
-
hash = "sha256-+hFwBFYtRseVwesZtSrL3J/ZnsMAjD2ZAhTlk41hfqU=";
+
hash = "sha256-5sTHfxNV4JEonGke8ZZ/pXoLA15iCuJ/iSW1XwFMltg=";
};
postPatch = ''
+5 -5
pkgs/development/python-modules/qtile/default.nix
···
buildPythonPackage rec {
pname = "qtile";
version = "0.25.0";
-
format = "setuptools";
+
pyproject = true;
src = fetchFromGitHub {
owner = "qtile";
···
--replace /usr/include/libdrm ${lib.getDev libdrm}/include/libdrm
'';
-
nativeBuildInputs = [
-
pkg-config
+
build-system = [
setuptools
setuptools-scm
+
pkg-config
];
-
propagatedBuildInputs = [
+
dependencies = [
(cairocffi.override { withXcffib = true; })
dbus-next
dbus-python
···
description = "A small, flexible, scriptable tiling window manager written in Python";
mainProgram = "qtile";
platforms = platforms.linux;
-
maintainers = with maintainers; [ arjan-s ];
+
maintainers = with maintainers; [ arjan-s sigmanificient ];
};
}
+1 -4
pkgs/development/python-modules/slicedimage/default.nix
···
, fetchFromGitHub
, boto3
, diskcache
-
, enum34
, packaging
-
, pathlib
, numpy
, requests
, scikit-image
, six
, pytestCheckHook
-
, isPy27
, tifffile
}:
···
scikit-image
six
tifffile
-
] ++ lib.optionals isPy27 [ pathlib enum34 ];
+
];
nativeCheckInputs = [
pytestCheckHook
+2 -2
pkgs/development/python-modules/snakemake-storage-plugin-s3/default.nix
···
buildPythonPackage rec {
pname = "snakemake-storage-plugin-s3";
-
version = "0.2.10";
+
version = "0.2.11";
format = "pyproject";
src = fetchFromGitHub {
owner = "snakemake";
repo = pname;
rev = "refs/tags/v${version}";
-
hash = "sha256-k21DRQdSUFkdwNb7MZJmClhIg+pdSc7H6FkDrbf4DT8=";
+
hash = "sha256-pAMrWJe4+PWHglZ/C83Af+uHBg9wupfSlH4W8CvO9as=";
};
postPatch = ''
+2 -2
pkgs/development/python-modules/sphinxcontrib-confluencebuilder/default.nix
···
buildPythonPackage rec {
pname = "sphinxcontrib-confluencebuilder";
-
version = "2.5.1";
+
version = "2.5.2";
format = "pyproject";
src = fetchPypi {
pname = "sphinxcontrib_confluencebuilder";
inherit version;
-
hash = "sha256-PQpkwQ95UVJwDGTAq1xdcSvd07FZpZfA/4jq3ywlMas=";
+
hash = "sha256-FwjjlMTIhAD/v4Ig+uqrJJybdPqpPG+7OMuJwSqWo84=";
};
nativeBuildInputs = [
+2 -5
pkgs/development/python-modules/sqlbag/default.nix
···
{ lib
, buildPythonPackage
, fetchFromGitHub
-
, isPy27
, psycopg2
, pymysql
, sqlalchemy
-
, pathlib
, six
, flask
, pendulum
···
, pytest-sugar
, postgresql
, postgresqlTestHook
-
,
}:
buildPythonPackage rec {
pname = "sqlbag";
···
pymysql
setuptools # needed for 'pkg_resources'
-
]
-
++ lib.optional isPy27 pathlib;
+
];
nativeCheckInputs = [
pytestCheckHook
···
homepage = "https://github.com/djrobstep/sqlbag";
license = with licenses; [ unlicense ];
maintainers = with maintainers; [ soispha ];
+
broken = true; # Fails to build against the current flask version
};
}
-23
pkgs/development/python-modules/sqlsoup/default.nix
···
-
{ buildPythonPackage, fetchPypi, lib, sqlalchemy, nose }:
-
-
buildPythonPackage rec {
-
pname = "sqlsoup";
-
version = "0.9.1";
-
format = "setuptools";
-
-
src = fetchPypi {
-
inherit pname version;
-
sha256 = "1mj00fhxj75ac3i8xk9jmm7hvcjz9p4x2r3yndcwsgb659rvgbrg";
-
};
-
-
propagatedBuildInputs = [ sqlalchemy ];
-
nativeCheckInputs = [ nose ];
-
-
meta = with lib; {
-
description = "A one step database access tool, built on the SQLAlchemy ORM";
-
homepage = "https://github.com/zzzeek/sqlsoup";
-
license = licenses.mit;
-
maintainers = [];
-
broken = true; # incompatible with sqlalchemy>=1.4 and unmaintained since 2016
-
};
-
}
+9
pkgs/development/python-modules/stem/default.nix
···
, buildPythonPackage
, pythonOlder
, fetchFromGitHub
+
, fetchpatch
, setuptools
, cryptography
, mock
···
rev = "9a9c7d43a7fdcde6d4a9cf95b831fb5e5923a160";
hash = "sha256-Oc73Jx31SLzuhT9Iym5HHszKfflKZ+3aky5flXudvmI=";
};
+
+
patches = [
+
# fixes deprecated test assertion, assertRaisesRegexp in python 3
+
(fetchpatch {
+
url = "https://github.com/trishtzy/stem/commit/d5012a1039f05c69ebe832723ce96ecbe8f79fe1.patch";
+
hash = "sha256-ozOTx4/c86sW/9Ss5eZ6ZxX63ByJT5x7JF6wBBd+VFY=";
+
})
+
];
nativeBuildInputs = [
setuptools
+2 -2
pkgs/development/python-modules/tencentcloud-sdk-python/default.nix
···
buildPythonPackage rec {
pname = "tencentcloud-sdk-python";
-
version = "3.0.1143";
+
version = "3.0.1144";
pyproject = true;
disabled = pythonOlder "3.9";
···
owner = "TencentCloud";
repo = "tencentcloud-sdk-python";
rev = "refs/tags/${version}";
-
hash = "sha256-xUjOgqhHk1Yj0WNcx051kqRmpg/YqVVvA40ueIDkr10=";
+
hash = "sha256-cUuigY67wgYj6DvFiplXJbqcLNKxH9odeG5a8bSZ7/8=";
};
build-system = [ setuptools ];
+2 -6
pkgs/development/python-modules/tensorflow/bin.nix
···
, tensorboard
, config
, cudaSupport ? config.cudaSupport
-
, cudaPackagesGoogle
+
, cudaPackages
, zlib
, python
, keras-applications
···
let
packages = import ./binary-hashes.nix;
-
inherit (cudaPackagesGoogle) cudatoolkit cudnn;
+
inherit (cudaPackages) cudatoolkit cudnn;
in buildPythonPackage {
pname = "tensorflow" + lib.optionalString cudaSupport "-gpu";
inherit (packages) version;
···
"tensorflow.python"
"tensorflow.python.framework"
];
-
-
passthru = {
-
cudaPackages = cudaPackagesGoogle;
-
};
meta = with lib; {
description = "Computation using data flow graphs for scalable machine learning";
+7 -8
pkgs/development/python-modules/tensorflow/default.nix
···
# https://groups.google.com/a/tensorflow.org/forum/#!topic/developers/iRCt5m4qUz0
, config
, cudaSupport ? config.cudaSupport
-
, cudaPackagesGoogle
-
, cudaCapabilities ? cudaPackagesGoogle.cudaFlags.cudaCapabilities
+
, cudaPackages
+
, cudaCapabilities ? cudaPackages.cudaFlags.cudaCapabilities
, mklSupport ? false, mkl
, tensorboardSupport ? true
# XLA without CUDA is broken
···
# __ZN4llvm11SmallPtrSetIPKNS_10AllocaInstELj8EED1Ev in any of the
# translation units, so the build fails at link time
stdenv =
-
if cudaSupport then cudaPackagesGoogle.backendStdenv
+
if cudaSupport then cudaPackages.backendStdenv
else if originalStdenv.isDarwin then llvmPackages.stdenv
else originalStdenv;
-
inherit (cudaPackagesGoogle) cudatoolkit nccl;
+
inherit (cudaPackages) cudatoolkit nccl;
# use compatible cuDNN (https://www.tensorflow.org/install/source#gpu)
# cudaPackages.cudnn led to this:
# https://github.com/tensorflow/tensorflow/issues/60398
cudnnAttribute = "cudnn_8_6";
-
cudnn = cudaPackagesGoogle.${cudnnAttribute};
+
cudnn = cudaPackages.${cudnnAttribute};
gentoo-patches = fetchzip {
url = "https://dev.gentoo.org/~perfinion/patches/tensorflow-patches-2.12.0.tar.bz2";
hash = "sha256-SCRX/5/zML7LmKEPJkcM5Tebez9vv/gmE4xhT/jyqWs=";
···
broken =
stdenv.isDarwin
|| !(xlaSupport -> cudaSupport)
-
|| !(cudaSupport -> builtins.hasAttr cudnnAttribute cudaPackagesGoogle)
-
|| !(cudaSupport -> cudaPackagesGoogle ? cudatoolkit);
+
|| !(cudaSupport -> builtins.hasAttr cudnnAttribute cudaPackages)
+
|| !(cudaSupport -> cudaPackages ? cudatoolkit);
} // lib.optionalAttrs stdenv.isDarwin {
timeout = 86400; # 24 hours
maxSilent = 14400; # 4h, double the default of 7200s
···
# Regression test for #77626 removed because not more `tensorflow.contrib`.
passthru = {
-
cudaPackages = cudaPackagesGoogle;
deps = bazel-build.deps;
libtensorflow = bazel-build.out;
};
+4 -2
pkgs/development/ruby-modules/gem-config/default.nix
···
];
};
-
nokogiri = attrs: {
+
nokogiri = attrs: ({
buildFlags = [
"--use-system-libraries"
"--with-zlib-lib=${zlib.out}/lib"
···
"--with-iconv-dir=${libiconv}"
"--with-opt-include=${libiconv}/include"
];
-
};
+
} // lib.optionalAttrs stdenv.isDarwin {
+
buildInputs = [ libxml2 ];
+
});
openssl = attrs: {
# https://github.com/ruby/openssl/issues/369
+2 -2
pkgs/development/tools/analysis/checkov/default.nix
···
python3.pkgs.buildPythonApplication rec {
pname = "checkov";
-
version = "3.2.90";
+
version = "3.2.91";
pyproject = true;
src = fetchFromGitHub {
owner = "bridgecrewio";
repo = "checkov";
rev = "refs/tags/${version}";
-
hash = "sha256-I/fSguqs5h4YTP/6whBFYceYd4tC8wrh+PfW8i7+kpY=";
+
hash = "sha256-lw89E7xD1tSAxgf1bwuPojOikOqIBfisePAtTGHXr4c=";
};
patches = [ ./flake8-compat-5.x.patch ];
+3 -3
pkgs/development/tools/changie/default.nix
···
buildGoModule rec {
pname = "changie";
-
version = "1.18.0";
+
version = "1.19.0";
src = fetchFromGitHub {
owner = "miniscruff";
repo = "changie";
rev = "v${version}";
-
hash = "sha256-pZe9T/WALFX5xwCiZKbf8fpaG3wmBJbqgM7FTPqlN2k=";
+
hash = "sha256-3GQ9C9UteJG3LfJmDsBbFJ9hnz7ouQ/6bZkQ/8CZ8PI=";
};
-
vendorHash = "sha256-SdaDu9LXgelSEXdOCAbtvt1LnrSVpAIoN6MDSjTeEOs=";
+
vendorHash = "sha256-2SkHId5BDAv525PISLjlrP862Z2fJDN4L839rz8rWaw=";
nativeBuildInputs = [
installShellFiles
+3 -3
pkgs/development/tools/language-servers/fortls/default.nix
···
buildPythonApplication rec {
pname = "fortls";
-
version = "2.13.0";
+
version = "3.0.0";
src = fetchFromGitHub {
owner = "fortran-lang";
repo = pname;
-
rev = "v${version}";
-
hash = "sha256-kFk2Dlnb0FXM3Ysvsy+g2AAMgpWmwzxuyJPovDm/FJU=";
+
rev = "refs/tags/v${version}";
+
hash = "sha256-kRL4kLX1T2Sontl8f3VO8Hb7uI41JwhZBiH//gdcmNE=";
};
nativeBuildInputs = [ setuptools-scm ];
+5 -5
pkgs/development/tools/mysql-shell/default.nix
···
in
stdenv.mkDerivation (finalAttrs: {
pname = "mysql-shell";
-
version = "8.0.36";
+
version = "8.0.37";
srcs = [
(fetchurl {
-
url = "https://cdn.mysql.com//Downloads/MySQL-${lib.versions.majorMinor finalAttrs.version}/mysql-${finalAttrs.version}.tar.gz";
-
hash = "sha256-9PJwa5WKinOA72yVjdlyMHvb7qRR76/DQuTEbim36d0=";
+
url = "https://dev.mysql.com/get/Downloads/MySQL-${lib.versions.majorMinor finalAttrs.version}/mysql-${finalAttrs.version}.tar.gz";
+
hash = "sha256-4GOgkazZ7EC7BfLATfZPiZan5OJuiDu2UChJ1fa0pho=";
})
(fetchurl {
-
url = "https://cdn.mysql.com//Downloads/MySQL-Shell/mysql-shell-${finalAttrs.version}-src.tar.gz";
-
hash = "sha256-s0+7dbcLcgS8u/6p7vpVAV9sR2gf2j9VDnSCJvw77fQ=";
+
url = "https://dev.mysql.com/get/Downloads/MySQL-Shell/mysql-shell-${finalAttrs.version}-src.tar.gz";
+
hash = "sha256-UtZ7/Ip5h9CXKy3lkSt8/TXJgbPPUO73rMSIFPfX0Is=";
})
];
pkgs/development/tools/treefmt/default.nix pkgs/by-name/tr/treefmt/package.nix
+20
pkgs/kde/gear/kdeconnect-kde/default.nix
···
{
+
lib,
mkKdeDerivation,
+
substituteAll,
+
sshfs,
qtconnectivity,
qtmultimedia,
qtwayland,
···
}:
mkKdeDerivation {
pname = "kdeconnect-kde";
+
+
patches = [
+
(substituteAll {
+
src = ./hardcode-sshfs-path.patch;
+
sshfs = lib.getExe sshfs;
+
})
+
# We build OpenSSH without ssh-dss support, so sshfs explodes at runtime.
+
# See: https://github.com/NixOS/nixpkgs/commit/6ee4b8c8bf815567f7d0fa131576d2b8c0a18167
+
# FIXME: upstream?
+
./remove-ssh-dss.patch
+
];
+
+
# Hardcoded as a QString, which is UTF-16 so Nix can't pick it up automatically
+
postFixup = ''
+
mkdir -p $out/nix-support
+
echo "${sshfs}" > $out/nix-support/depends
+
'';
extraNativeBuildInputs = [pkg-config];
extraBuildInputs = [qtconnectivity qtmultimedia qtwayland wayland wayland-protocols libfakekey];
+13
pkgs/kde/gear/kdeconnect-kde/hardcode-sshfs-path.patch
···
+
diff --git a/plugins/sftp/mounter.cpp b/plugins/sftp/mounter.cpp
+
index 29e94f3b..c71e552f 100644
+
--- a/plugins/sftp/mounter.cpp
+
+++ b/plugins/sftp/mounter.cpp
+
@@ -94,7 +94,7 @@ void Mounter::onPacketReceived(const NetworkPacket &np)
+
+
QDir().mkpath(m_mountPoint);
+
+
- const QString program = QStringLiteral("sshfs");
+
+ const QString program = QStringLiteral("@sshfs@");
+
+
QString path;
+
if (np.has(QStringLiteral("multiPaths")))
+13
pkgs/kde/gear/kdeconnect-kde/remove-ssh-dss.patch
···
+
diff --git a/plugins/sftp/mounter.cpp b/plugins/sftp/mounter.cpp
+
index 29e94f3b..0f300c63 100644
+
--- a/plugins/sftp/mounter.cpp
+
+++ b/plugins/sftp/mounter.cpp
+
@@ -122,7 +122,7 @@ void Mounter::onPacketReceived(const NetworkPacket &np)
+
<< QStringLiteral("-o") << QStringLiteral("IdentityFile=") + KdeConnectConfig::instance().privateKeyPath()
+
<< QStringLiteral("-o") << QStringLiteral("StrictHostKeyChecking=no") // Do not ask for confirmation because it is not a known host
+
<< QStringLiteral("-o") << QStringLiteral("UserKnownHostsFile=/dev/null") // Prevent storing as a known host
+
- << QStringLiteral("-o") << QStringLiteral("HostKeyAlgorithms=+ssh-dss\\,ssh-rsa") // https://bugs.kde.org/show_bug.cgi?id=351725
+
+ << QStringLiteral("-o") << QStringLiteral("HostKeyAlgorithms=+ssh-rsa") // https://bugs.kde.org/show_bug.cgi?id=351725
+
<< QStringLiteral("-o") << QStringLiteral("PubkeyAcceptedKeyTypes=+ssh-rsa") // https://bugs.kde.org/show_bug.cgi?id=443155
+
<< QStringLiteral("-o") << QStringLiteral("uid=") + QString::number(getuid())
+
<< QStringLiteral("-o") << QStringLiteral("gid=") + QString::number(getgid())
+2 -2
pkgs/os-specific/darwin/rectangle/default.nix
···
stdenvNoCC.mkDerivation rec {
pname = "rectangle";
-
version = "0.77";
+
version = "0.79";
src = fetchurl {
url = "https://github.com/rxhanson/Rectangle/releases/download/v${version}/Rectangle${version}.dmg";
-
hash = "sha256-wViFngw6iwlOG7KsW+zqxjm9ZRJahscX0TYz7/7T5nw=";
+
hash = "sha256-XczwgLONTt7wL+oW1ruw6wBwZTMd5VyN+79xJy0NUIg=";
};
sourceRoot = ".";
+1 -5
pkgs/os-specific/linux/mstflint_access/default.nix
···
src = fetchurl {
url = "https://github.com/Mellanox/mstflint/releases/download/v${version}/kernel-mstflint-${version}.tar.gz";
-
hash = "sha256-rfZts0m8x6clVazpbAa2xK+dYgRU9Us5rbcWa0uHJ1M=";
+
hash = "sha256-bWYglHJUNCPT13N7aBdjbLPMZIk7vjvF+o9W3abDNr0=";
};
nativeBuildInputs = [ kmod ] ++ kernel.moduleBuildDependencies;
···
];
enableParallelBuilding = true;
-
-
preConfigure = lib.optionals (lib.versionAtLeast kernel.version "6.4") ''
-
sed -i "s/class_create(THIS_MODULE, dev->name)/class_create(dev->name)/g" mst_main.c
-
'';
installPhase = ''
runHook preInstall
+11 -9
pkgs/servers/home-assistant/custom-components/xiaomi_gateway3/default.nix
···
-
{ lib
-
, buildHomeAssistantComponent
-
, fetchFromGitHub
-
, zigpy
+
{
+
lib,
+
buildHomeAssistantComponent,
+
fetchFromGitHub,
+
zigpy,
+
nix-update-script,
}:
buildHomeAssistantComponent rec {
owner = "AlexxIT";
domain = "xiaomi_gateway3";
-
version = "4.0.3";
+
version = "4.0.5";
src = fetchFromGitHub {
owner = "AlexxIT";
repo = "XiaomiGateway3";
rev = "v${version}";
-
hash = "sha256-YGaVQaz3A0yM8AIC02CvMKWMJ3tW3OADYgKY8ViIt5U=";
+
hash = "sha256-C8aY23e6iWANbhCRQYNHx+3fomVO+7qdxj+qfv+K3JM=";
};
-
propagatedBuildInputs = [
-
zigpy
-
];
+
propagatedBuildInputs = [ zigpy ];
dontBuild = true;
+
+
passthru.updateScript = nix-update-script { };
meta = with lib; {
changelog = "https://github.com/AlexxIT/XiaomiGateway3/releases/tag/v{version}";
+13 -9
pkgs/servers/home-assistant/custom-components/xiaomi_miot/default.nix
···
-
{ lib
-
, buildHomeAssistantComponent
-
, fetchFromGitHub
-
, hap-python
-
, micloud
-
, pyqrcode
-
, python-miio
+
{
+
lib,
+
buildHomeAssistantComponent,
+
fetchFromGitHub,
+
hap-python,
+
micloud,
+
pyqrcode,
+
python-miio,
+
nix-update-script,
}:
buildHomeAssistantComponent rec {
owner = "al-one";
domain = "xiaomi_miot";
-
version = "0.7.17";
+
version = "0.7.18";
src = fetchFromGitHub {
owner = "al-one";
repo = "hass-xiaomi-miot";
rev = "v${version}";
-
hash = "sha256-IpL4e2mKCdtNu8NtI+xpx4FPW/uj1M5Rk6DswXmSJBk=";
+
hash = "sha256-/Zn2jEjwkCHiz48nVjKEPcCpPVajLaZ81DCTNpx1vbk=";
};
propagatedBuildInputs = [
···
];
dontBuild = true;
+
+
passthru.updateScript = nix-update-script { };
meta = with lib; {
changelog = "https://github.com/al-one/hass-xiaomi-miot/releases/tag/${version}";
+2 -2
pkgs/servers/http/nginx/stable.nix
···
{ callPackage, ... } @ args:
callPackage ./generic.nix args {
-
version = "1.24.0";
-
hash = "sha256-d6JUFje5KmIePudndsi3tAz21wfmm6U6lAKD4w/y9V0=";
+
version = "1.26.0";
+
hash = "sha256-0ubIQ51sbbUBXY6qskcKtSrvhae/NjGCh5l34IQ3BJc=";
}
+2 -2
pkgs/servers/knxd/default.nix
···
stdenv.mkDerivation rec {
pname = "knxd";
-
version = "0.14.60";
+
version = "0.14.61";
src = fetchFromGitHub {
owner = "knxd";
repo = "knxd";
rev = version;
-
hash = "sha256-djcp3b0OSnyrNOkvaacjQ3Nw9H54HOfKeBo26tkz/Ew=";
+
hash = "sha256-b8svjGaxW8YqonhXewebDUitezKoMcZxcUFGd2EKZQ4=";
};
postPatch = ''
+2 -2
pkgs/shells/hishtory/default.nix
···
buildGoModule rec {
pname = "hishtory";
-
version = "0.293";
+
version = "0.294";
src = fetchFromGitHub {
owner = "ddworken";
repo = pname;
rev = "v${version}";
-
hash = "sha256-5I61ey7GJ78dhSgRMkbRcKf3zk0j7zW2MyN2QSbAnpE=";
+
hash = "sha256-vwjzJTBgD4XzZekxGk02WW9/A7wLlLBsIUyvt0VCkOg=";
};
vendorHash = "sha256-zTwZ/sMhQdlf7RYfR2/K/m08U1Il0VQmYFyNNiYsWhc=";
-1
pkgs/test/cuda/default.nix
···
recurseIntoAttrs,
cudaPackages,
-
cudaPackagesGoogle,
cudaPackages_10_0,
cudaPackages_10_1,
+3 -3
pkgs/tools/admin/aliyun-cli/default.nix
···
buildGoModule rec {
pname = "aliyun-cli";
-
version = "3.0.204";
+
version = "3.0.205";
src = fetchFromGitHub {
rev = "v${version}";
owner = "aliyun";
repo = pname;
fetchSubmodules = true;
-
sha256 = "sha256-Wi35FcuDaTgGG7KgCcU/m6UIKzUuAaMF8J96YvjWgJc=";
+
sha256 = "sha256-fUInyAJKMvHZ13sWjqWr4KPge/hpeDSkJl69nnWJkPc=";
};
-
vendorHash = "sha256-AvlDqaJ5w9oDV1slwArr12KA1d3FKVU9H5WK4s3ePtU=";
+
vendorHash = "sha256-4jGwhcWANYUXuzFjXmFKzMVQXqFtPJt/y3IrjveRNYA=";
subPackages = [ "main" ];
+1 -1
pkgs/tools/admin/google-cloud-sdk/default.nix
···
meta = with lib; {
description = "Tools for the google cloud platform";
-
longDescription = "The Google Cloud SDK. This package has the programs: gcloud, gsutil, and bq";
+
longDescription = "The Google Cloud SDK for GCE hosts. Used by `google-cloud-sdk` only on GCE guests.";
sourceProvenance = with sourceTypes; [
fromSource
binaryNativeCode # anthoscli and possibly more
+32 -30
pkgs/tools/admin/google-cloud-sdk/withExtraComponents.nix
···
-
{ lib, google-cloud-sdk, runCommand, components }:
+
{ lib, google-cloud-sdk, symlinkJoin, components }:
comps_:
···
defaultComponents = with components; [ alpha beta ];
comps = [ google-cloud-sdk ] ++ filterPreInstalled (findDepsRecursive (defaultComponents ++ comps_));
-
in
-
# Components are installed by copying the `google-cloud-sdk` package, along
-
# with each component, over to a new location, and then patching that location
-
# with `sed` to ensure the proper paths are used.
-
# For some reason, this does not work properly with a `symlinkJoin`: the
-
# `gcloud` binary doesn't seem able to find the installed components.
-
runCommand "google-cloud-sdk-${google-cloud-sdk.version}"
-
{
-
inherit (google-cloud-sdk) meta;
-
inherit comps;
-
passAsFile = [ "comps" ];
-
doInstallCheck = true;
-
disallowedRequisites = [ google-cloud-sdk ];
-
installCheckPhase =
+
installCheck =
let
-
compNames = builtins.map (drv: drv.name) comps_;
+
compNames = builtins.map lib.getName comps_;
in
''
-
$out/bin/gcloud components list > component_list.txt
+
$out/bin/gcloud components list --only-local-state --format 'value(id)' > component_list.txt
for comp in ${builtins.toString compNames}; do
-
if [ ! grep ... component_list.txt | grep "Not Installed" ]; then
+
snapshot_file="$out/google-cloud-sdk/.install/$comp.snapshot.json"
+
+
if ! [ -f "$snapshot_file" ]; then
+
echo "Failed to install component '$comp'"
+
exit 1
+
fi
+
+
if grep --quiet '"is_hidden":true' "$snapshot_file"; then
+
continue
+
fi
+
+
if ! grep --quiet "^$comp$" component_list.txt; then
echo "Failed to install component '$comp'"
exit 1
fi
done
'';
-
}
-
''
-
mkdir -p $out
+
in
+
# The `gcloud` entrypoint script has some custom logic to determine the "real" cloud sdk
+
# root. In order to not trip up this logic and still have the symlink joined root we copy
+
# over this file. Since this file also has a Python wrapper, we need to copy that as well.
+
symlinkJoin {
+
name = "google-cloud-sdk-${google-cloud-sdk.version}";
+
inherit (google-cloud-sdk) meta;
-
# Install each component
-
for comp in $(cat $compsPath); do
-
echo "installing component $comp"
-
cp -dRf $comp/. $out
-
find $out -type d -exec chmod 744 {} +
-
done
+
paths = [
+
google-cloud-sdk
+
] ++ comps;
-
# Replace references to the original google-cloud-sdk with this one
-
find $out/google-cloud-sdk -type f -exec sed -i -e "s#${google-cloud-sdk}#$out#" {} \;
-
''
+
postBuild = ''
+
sed -i ';' $out/google-cloud-sdk/bin/.gcloud-wrapped
+
sed -i -e "s#${google-cloud-sdk}#$out#" "$out/google-cloud-sdk/bin/gcloud"
+
${installCheck}
+
'';
+
}
+2 -2
pkgs/tools/misc/diffoscope/default.nix
···
# Note: when upgrading this package, please run the list-missing-tools.sh script as described below!
python.pkgs.buildPythonApplication rec {
pname = "diffoscope";
-
version = "265";
+
version = "266";
src = fetchurl {
url = "https://diffoscope.org/archive/diffoscope-${version}.tar.bz2";
-
hash = "sha256-YE81R8lTOM3wmv/GIaIBqUq2O6UvnUaHjuXZ00yDU8U=";
+
hash = "sha256-whEFBUFl8yFsZOtOWHbDm6Tx6i6UJYmQ5Fz7svLqGgs=";
};
outputs = [
+2 -2
pkgs/tools/misc/graylog/5.2.nix
···
let
buildGraylog = callPackage ./graylog.nix {};
in buildGraylog {
-
version = "5.2.4";
-
sha256 = "sha256-TbZMRMLpYlg6wrsC+tDEk8sLYJ1nwJum/rL30CEGQcw=";
+
version = "5.2.7";
+
sha256 = "sha256-so9IHX0r3dmj5wLrLtQgrcXk+hu6E8/1d7wJu1XDcVA=";
maintainers = [ lib.maintainers.f2k1de ];
license = lib.licenses.sspl;
}
+2 -11
pkgs/tools/misc/mstflint/default.nix
···
stdenv.mkDerivation rec {
pname = "mstflint";
-
version = "4.26.0-1";
+
version = "4.28.0-1";
src = fetchurl {
url = "https://github.com/Mellanox/mstflint/releases/download/v${version}/mstflint-${version}.tar.gz";
-
hash = "sha256-P8XACcz6d8UTOhFFeTijfFOthBqnUghGlDj9K145sZ8=";
+
hash = "sha256-zvCDc/9wAqT3XBI9A5kOprnnm52Ek8oGe2Je3dKHti0=";
};
-
-
patches = [
-
# needed to introduce this with GCC 13. Remove, when https://github.com/Mellanox/mstflint/pull/916 is upstream.
-
(fetchpatch {
-
name = "elf.patch";
-
url = "https://patch-diff.githubusercontent.com/raw/Mellanox/mstflint/pull/916.patch";
-
hash = "sha256-quBdmiuzwThu4MkAaT74eJDlZwIcUZMrLZa8OIcO96w=";
-
})
-
];
nativeBuildInputs = [
autoconf
+2 -2
pkgs/tools/misc/ttyplot/default.nix
···
stdenv.mkDerivation rec {
pname = "ttyplot";
-
version = "1.6.2";
+
version = "1.6.4";
src = fetchFromGitHub {
owner = "tenox7";
repo = "ttyplot";
rev = version;
-
hash = "sha256-HBJvTDhp1CA96gRU2Q+lMxcFaZ+txXcmNb8Cg1BFiH4=";
+
hash = "sha256-yxAFqi3TgiKiZYgR891ahkwUqZLk/JDsjujOYmBjUtk=";
};
nativeBuildInputs = [
+22 -11
pkgs/tools/misc/undocker/default.nix
···
{ lib
, buildGoModule
-
, fetchFromSourcehut
+
, fetchFromGitea
+
, gnumake
}:
-
buildGoModule rec {
+
let
+
version = "1.2.2";
+
hash = "sha256-kBqNopcHpldU5oD6zoVjPjP84t12QFcbWBSNNgwImKg=";
+
src = fetchFromGitea {
+
domain = "git.jakstys.lt";
+
owner = "motiejus";
+
repo = "undocker";
+
rev = "v${version}";
+
hash = hash;
+
};
+
in
+
buildGoModule {
pname = "undocker";
-
version = "1.0.4";
+
inherit version src;
-
src = fetchFromSourcehut {
-
owner = "~motiejus";
-
repo = pname;
-
rev = "v${version}";
-
hash = "sha256-I+pTbr1lKELyYlyHrx2gB+aeZ3/PmcePQfXu1ckhKAk=";
-
};
+
nativeBuildInputs = [ gnumake ];
+
+
buildPhase = "make VSN=v${version} VSNHASH=${hash} undocker";
+
+
installPhase = "install -D undocker $out/bin/undocker";
vendorHash = null;
meta = with lib; {
-
homepage = "https://git.sr.ht/~motiejus/undocker";
+
homepage = "https://git.jakstys.lt/motiejus/undocker";
description = "A CLI tool to convert a Docker image to a flattened rootfs tarball";
license = licenses.asl20;
-
maintainers = with maintainers; [ jordanisaacs ];
+
maintainers = with maintainers; [ jordanisaacs motiejus ];
mainProgram = "undocker";
};
}
+12 -3
pkgs/tools/security/qdigidoc/default.nix
···
{ lib
, mkDerivation
, fetchurl
+
, fetchpatch
, cmake
, flatbuffers
, gettext
···
mkDerivation rec {
pname = "qdigidoc";
-
version = "4.4.0";
+
version = "4.5.1";
src = fetchurl {
url =
"https://github.com/open-eid/DigiDoc4-Client/releases/download/v${version}/qdigidoc4-${version}.tar.gz";
-
hash = "sha256-5zo0yoY0wufm9DWRIccxJ5g4DXn75nT4fd2h+5QP4oQ=";
+
hash = "sha256-grhSuexp5yd/s8h5AdmdSLBmQY85l9HKZ15oTTvC6PI=";
};
tsl = fetchurl {
···
sha256 = "1cikz36w9phgczcqnwk4k3mx3kk919wy2327jksmfa4cjfjq4a8d";
};
+
patches = [
+
# https://github.com/open-eid/DigiDoc4-Client/pull/1251
+
(fetchpatch {
+
url = "https://github.com/open-eid/DigiDoc4-Client/commit/30281d14c5fb5582832eafbc254b56f8d685227d.patch";
+
hash = "sha256-nv23NbPUogOhS8No3SMIrAcPChl+d1HkxnePpCKIoUw=";
+
})
+
];
+
nativeBuildInputs = [ cmake gettext pkg-config qttools ];
postPatch = ''
···
homepage = "https://www.id.ee/";
license = licenses.lgpl21Plus;
platforms = platforms.linux;
-
maintainers = with maintainers; [ mmahut yana ];
+
maintainers = with maintainers; [ flokli mmahut yana ];
};
}
+2 -2
pkgs/tools/security/web-eid-app/default.nix
···
mkDerivation rec {
pname = "web-eid-app";
-
version = "2.4.0";
+
version = "2.5.0";
src = fetchFromGitHub {
owner = "web-eid";
repo = "web-eid-app";
rev = "v${version}";
-
sha256 = "sha256-xWwguxs/121BFF1zhb/HxS9b1vTwQRemhPKOfHEXVZQ=";
+
sha256 = "sha256-CaMf7cRhZ8K6YAUG38B+ijNOKaOmaACqNabNfHZGT68=";
fetchSubmodules = true;
};
+3 -3
pkgs/tools/text/mdbook-open-on-gh/default.nix
···
rustPlatform.buildRustPackage rec {
pname = "mdbook-open-on-gh";
-
version = "2.4.2";
+
version = "2.4.3";
src = fetchFromGitHub {
owner = "badboy";
repo = pname;
rev = version;
-
hash = "sha256-ZExmOHvQApGZaepOuf3yXYe8NV3FpMtCqCR1KE6q4no=";
+
hash = "sha256-73738Vei7rQ67LQIOrHPGOtsBnHClaXClRWDmA5pP58=";
};
-
cargoHash = "sha256-WLCcYgkrH5fZvv3LZNEolBQUcTZC2URs6bIgzf4BtWU=";
+
cargoHash = "sha256-TQBjgQaoI88xGdhkffNWRH6aZ99WWbkkpiPu4LqBD3g=";
meta = with lib; {
description = "mdbook preprocessor to add a open-on-github link on every page";
+2 -2
pkgs/tools/wayland/mpvpaper/default.nix
···
stdenv.mkDerivation rec {
pname = "mpvpaper";
-
version = "1.5";
+
version = "1.6";
src = fetchFromGitHub {
owner = "GhostNaN";
repo = pname;
rev = version;
-
sha256 = "sha256-TlA2hmpHGe4PWb+Pe3cq2Hhce4NXVI1DnknseGmuFeY=";
+
sha256 = "sha256-/A2C6T7gP+VGON3Peaz2Y4rNC63UT+zYr4RNM2gdLUY=";
};
strictDeps = true;
-31
pkgs/tools/wayland/ydotool/default.nix
···
-
{ lib, stdenv, fetchFromGitHub, cmake, scdoc, util-linux, xorg }:
-
-
stdenv.mkDerivation rec {
-
pname = "ydotool";
-
version = "1.0.4";
-
-
src = fetchFromGitHub {
-
owner = "ReimuNotMoe";
-
repo = "ydotool";
-
rev = "v${version}";
-
hash = "sha256-MtanR+cxz6FsbNBngqLE+ITKPZFHmWGsD1mBDk0OVng=";
-
};
-
-
postPatch = ''
-
substituteInPlace Daemon/ydotoold.c \
-
--replace "/usr/bin/xinput" "${xorg.xinput}/bin/xinput"
-
substituteInPlace Daemon/ydotool.service.in \
-
--replace "/usr/bin/kill" "${util-linux}/bin/kill"
-
'';
-
-
strictDeps = true;
-
nativeBuildInputs = [ cmake scdoc ];
-
-
meta = with lib; {
-
homepage = "https://github.com/ReimuNotMoe/ydotool";
-
description = "Generic Linux command-line automation tool";
-
license = licenses.agpl3Plus;
-
maintainers = with maintainers; [ willibutz kraem ];
-
platforms = with platforms; linux;
-
};
-
}
+1
pkgs/top-level/aliases.nix
···
clang_10 = throw "clang_10 has been removed from nixpkgs"; # Added 2024-01-26
clang_11 = throw "clang_11 has been removed from nixpkgs"; # Added 2023-01-24
+
cq-editor = throw "cq-editor has been removed, as it use a dependency that was disabled since python 3.8 and was last updated in 2021"; # Added 2024-05-13
### D ###
dagger = throw "'dagger' has been removed from nixpkgs, as the trademark policy of the upstream project is incompatible"; # Added 2023-10-16
+1 -11
pkgs/top-level/all-packages.nix
···
antlr = antlr4_10;
boost = boost177; # Configure checks for specific version.
icu = icu73;
-
protobuf = protobuf_21;
+
protobuf = protobuf_24;
};
mysql-shell-innovation = callPackage ../development/tools/mysql-shell/innovation.nix {
···
wtype = callPackage ../tools/wayland/wtype { };
-
ydotool = callPackage ../tools/wayland/ydotool { };
-
cambalache = callPackage ../development/tools/cambalache { };
cambrinary = python3Packages.callPackage ../applications/misc/cambrinary { };
···
cudaPackages_12_2 = callPackage ./cuda-packages.nix { cudaVersion = "12.2"; };
cudaPackages_12_3 = callPackage ./cuda-packages.nix { cudaVersion = "12.3"; };
cudaPackages_12 = cudaPackages_12_2; # Latest supported by cudnn
-
-
# Use the older cudaPackages for tensorflow and jax, as determined by cudnn
-
# compatibility: https://www.tensorflow.org/install/source#gpu
-
cudaPackagesGoogle = cudaPackages_11;
cudaPackages = recurseIntoAttrs cudaPackages_12;
···
cplay-ng = callPackage ../applications/audio/cplay-ng { };
-
cq-editor = libsForQt5.callPackage ../applications/graphics/cq-editor { };
-
cqrlog = callPackage ../applications/radio/cqrlog {
hamlib = hamlib_4;
···
fac-build = callPackage ../development/tools/build-managers/fac {
inherit (darwin.apple_sdk.frameworks) CoreServices;
-
-
treefmt = callPackage ../development/tools/treefmt { };
nufmt = callPackage ../development/tools/nufmt { };
+6
pkgs/top-level/python-aliases.nix
···
bt_proximity = bt-proximity; # added 2021-07-02
BTrees = btrees; # added 2023-02-19
cacheyou = throw "cacheyou has been removed, as it was no longer used for the only consumer pdm"; # added 2023-12-21
+
cadquery = throw "cadquery was removed, because it was disabled on all python version since 3.8 and marked as broken"; # added 2024-05-13
carrot = throw "carrot has been removed, as its development was discontinued in 2012"; # added 2022-01-18
cchardet = faust-cchardet; # added 2023-03-02
cepa = throw "cepa has been removed, as onionshare switched back to stem"; # added 2024-05-07
···
coronavirus = throw "coronavirus was removed, because the source is not providing the data anymore."; # added 2023-05-04
covCore = cov-core; # added 2024-01-03
cozy = throw "cozy was removed because it was not actually https://pypi.org/project/Cozy/."; # added 2022-01-14
+
cryptacular = throw "cryptacular was removed, because it was disabled on all python version since 3.6 and last updated in 2021"; # Added 2024-05-13
cryptography_vectors = "cryptography_vectors is no longer exposed in python*Packages because it is used for testing cryptography only."; # Added 2022-03-23
cx_Freeze = cx-freeze; # added 2023-08-02
cx_oracle = cx-oracle; # added 2024-01-03
···
GitPython = gitpython; # added 2022-10-28
glances = throw "glances has moved to pkgs.glances"; # added 2020-20-28
glasgow = throw "glasgow has been promoted to a top-level attribute name: `pkgs.glasgow`"; # added 2023-02-05
+
globre = throw "globre was removed, because it was disabled on all python version since 3.7 and last updated in 2020."; # added 2024-05-13
google_api_python_client = google-api-python-client; # added 2021-03-19
googleapis_common_protos = googleapis-common-protos; # added 2021-03-19
google-apitools = throw "google-apitools was removed because it is deprecated and unsupported by upstream"; # added 2023-02-25
···
pafy = throw "pafy has been removed because it is unmaintained and only a dependency of mps-youtube, itself superseded by yewtube"; # Added 2023-01-19
pam = python-pam; # added 2020-09-07.
PasteDeploy = pastedeploy; # added 2021-10-07
+
pathlib = throw "pathlib was removed as it has been integrated in python standard library in version 3.4"; # added 2024-05-13
pathpy = path; # added 2022-04-12
pcbnew-transition = pcbnewtransition; # added 2024-03-21
pdfposter = throw "pdfposter was promoted to a top-level attribute"; # Added 2023-06-29
···
pur = throw "pur has been renamed to pkgs.pur"; # added 2021-11-08
pushbullet = pushbullet-py; # Added 2022-10-15
Pweave = pweave; # added 2023-02-19
+
pxml = throw "pxml was removed, because it was disabled on all python version since 3.8 and last updated in 2020."; # added 2024-05-13
py-radix = throw "py-radix has been removed, since it abandoned"; # added 2023-07-07
py_stringmatching = py-stringmatching; # added 2023-11-12
pyalmond = throw "pyalmond has been removed, since its API endpoints have been shutdown"; # added 2023-02-02
···
sphinx-navtree = throw "sphinx-navtree has been removed since it is not compatible with sphinx 3.3 and unmaintained"; # added 2023-07-03
sqlalchemy_migrate = sqlalchemy-migrate; # added 2021-10-28
SQLAlchemy-ImageAttach = throw "sqlalchemy-imageattach has been removed as it is incompatible with sqlalchemy 1.4 and unmaintained"; # added 2022-04-23
+
sqlsoup = throw "sqlsoup has been removed as it is incompatible with modern SQLAlchemy and unmaintained"; # added 2024-05-13
subdownloader = throw "subdownloader has been removed, because it depended on pyqt4"; # added 2022-06-09
suds-jurko = throw "suds-jurko has been removed, it was using setuptools 2to3 translation feature, which has been removed in setuptools 58"; # added 2023-02-27
supervise_api = supervise-api; # added 2023-10-11
+5 -14
pkgs/top-level/python-packages.nix
···
cachy = callPackage ../development/python-modules/cachy { };
-
cadquery = callPackage ../development/python-modules/cadquery {
-
inherit (pkgs.darwin.apple_sdk.frameworks) Cocoa;
-
};
-
caffe = toPythonModule (pkgs.caffe.override {
pythonSupport = true;
inherit (self) python numpy boost;
···
crownstone-sse = callPackage ../development/python-modules/crownstone-sse { };
crownstone-uart = callPackage ../development/python-modules/crownstone-uart { };
-
-
cryptacular = callPackage ../development/python-modules/cryptacular { };
cryptg = callPackage ../development/python-modules/cryptg { };
···
glfw = callPackage ../development/python-modules/glfw { };
glob2 = callPackage ../development/python-modules/glob2 { };
-
-
globre = callPackage ../development/python-modules/globre { };
globus-sdk = callPackage ../development/python-modules/globus-sdk { };
···
pathable = callPackage ../development/python-modules/pathable { };
pathlib2 = callPackage ../development/python-modules/pathlib2 { };
-
-
pathlib = callPackage ../development/python-modules/pathlib { };
pathlib-abc = callPackage ../development/python-modules/pathlib-abc { };
···
debugger = pkgs.gdb;
-
pxml = callPackage ../development/python-modules/pxml { };
-
py-air-control = callPackage ../development/python-modules/py-air-control { };
py-air-control-exporter = callPackage ../development/python-modules/py-air-control-exporter { };
···
sqlparse = callPackage ../development/python-modules/sqlparse { };
-
sqlsoup = callPackage ../development/python-modules/sqlsoup { };
-
sqltrie = callPackage ../development/python-modules/sqltrie { };
squarify = callPackage ../development/python-modules/squarify { };
···
tensorflow-bin = callPackage ../development/python-modules/tensorflow/bin.nix {
inherit (pkgs.config) cudaSupport;
+
# https://www.tensorflow.org/install/source#gpu
+
cudaPackages = pkgs.cudaPackages_11;
tensorflow-build = let
···
protobufTF = pkgs.protobuf_21.override {
abseil-cpp = pkgs.abseil-cpp_202301;
+
# https://www.tensorflow.org/install/source#gpu
+
cudaPackagesTF = pkgs.cudaPackages_11;
grpcTF = (pkgs.grpc.overrideAttrs (
oldAttrs: rec {
# nvcc fails on recent grpc versions, so we use the latest patch level
···
inherit (pkgs.darwin.apple_sdk.frameworks) Foundation Security;
flatbuffers-core = pkgs.flatbuffers;
flatbuffers-python = self.flatbuffers;
+
cudaPackages = compat.cudaPackagesTF;
protobuf-core = compat.protobufTF;
protobuf-python = compat.protobuf-pythonTF;
grpc = compat.grpcTF;