1{ config, lib, options, pkgs, utils, ... }:
2#
3# TODO: zfs tunables
4
5with utils;
6with lib;
7
8let
9
10 cfgZfs = config.boot.zfs;
11 optZfs = options.boot.zfs;
12 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
13 cfgSnapshots = config.services.zfs.autoSnapshot;
14 cfgSnapFlags = cfgSnapshots.flags;
15 cfgScrub = config.services.zfs.autoScrub;
16 cfgTrim = config.services.zfs.trim;
17 cfgZED = config.services.zfs.zed;
18
19 inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
20 inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
21
22 autosnapPkg = pkgs.zfstools.override {
23 zfs = cfgZfs.package;
24 };
25
26 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
27
28 datasetToPool = x: elemAt (splitString "/" x) 0;
29
30 fsToPool = fs: datasetToPool fs.device;
31
32 zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
33
34 allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
35
36 rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
37
38 dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
39
40 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
41
42 # When importing ZFS pools, there's one difficulty: These scripts may run
43 # before the backing devices (physical HDDs, etc.) of the pool have been
44 # scanned and initialized.
45 #
46 # An attempted import with all devices missing will just fail, and can be
47 # retried, but an import where e.g. two out of three disks in a three-way
48 # mirror are missing, will succeed. This is a problem: When the missing disks
49 # are later discovered, they won't be automatically set online, rendering the
50 # pool redundancy-less (and far slower) until such time as the system reboots.
51 #
52 # The solution is the below. poolReady checks the status of an un-imported
53 # pool, to see if *every* device is available -- in which case the pool will be
54 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
55 #
56 # The import scripts then loop over this, waiting until the pool is ready or a
57 # sufficient amount of time has passed that we can assume it won't be. In the
58 # latter case it makes one last attempt at importing, allowing the system to
59 # (eventually) boot even with a degraded pool.
60 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
61 for o in $(cat /proc/cmdline); do
62 case $o in
63 zfs_force|zfs_force=1|zfs_force=y)
64 ZFS_FORCE="-f"
65 ;;
66 esac
67 done
68 poolReady() {
69 pool="$1"
70 state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
71 if [[ "$state" = "ONLINE" ]]; then
72 return 0
73 else
74 echo "Pool $pool in state $state, waiting"
75 return 1
76 fi
77 }
78 poolImported() {
79 pool="$1"
80 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
81 }
82 poolImport() {
83 pool="$1"
84 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
85 }
86 '';
87
88 getPoolFilesystems = pool:
89 filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
90
91 getPoolMounts = prefix: pool:
92 let
93 poolFSes = getPoolFilesystems pool;
94
95 # Remove the "/" suffix because even though most mountpoints
96 # won't have it, the "/" mountpoint will, and we can't have the
97 # trailing slash in "/sysroot/" in stage 1.
98 mountPoint = fs: escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
99
100 hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
101 in
102 map (x: "${mountPoint x}.mount") poolFSes
103 ++ lib.optional hasUsr "sysusr-usr.mount";
104
105 getKeyLocations = pool: if isBool cfgZfs.requestEncryptionCredentials then {
106 hasKeys = cfgZfs.requestEncryptionCredentials;
107 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus ${pool}";
108 } else let
109 keys = filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
110 in {
111 hasKeys = keys != [];
112 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus ${toString keys}";
113 };
114
115 createImportService = { pool, systemd, force, prefix ? "" }:
116 nameValuePair "zfs-import-${pool}" {
117 description = "Import ZFS pool \"${pool}\"";
118 # We wait for systemd-udev-settle to ensure devices are available,
119 # but don't *require* it, because mounts shouldn't be killed if it's stopped.
120 # In the future, hopefully someone will complete this:
121 # https://github.com/zfsonlinux/zfs/pull/4943
122 wants = [ "systemd-udev-settle.service" ];
123 after = [
124 "systemd-udev-settle.service"
125 "systemd-modules-load.service"
126 "systemd-ask-password-console.service"
127 ];
128 requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
129 before = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
130 unitConfig = {
131 DefaultDependencies = "no";
132 };
133 serviceConfig = {
134 Type = "oneshot";
135 RemainAfterExit = true;
136 };
137 environment.ZFS_FORCE = optionalString force "-f";
138 script = let
139 keyLocations = getKeyLocations pool;
140 in (importLib {
141 # See comments at importLib definition.
142 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
143 awkCmd = "${pkgs.gawk}/bin/awk";
144 inherit cfgZfs;
145 }) + ''
146 if ! poolImported "${pool}"; then
147 echo -n "importing ZFS pool \"${pool}\"..."
148 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
149 for trial in `seq 1 60`; do
150 poolReady "${pool}" && poolImport "${pool}" && break
151 sleep 1
152 done
153 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
154 fi
155 if poolImported "${pool}"; then
156 ${optionalString keyLocations.hasKeys ''
157 ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
158 {
159 if [[ "$ks" != unavailable ]]; then
160 continue
161 fi
162 case "$kl" in
163 none )
164 ;;
165 prompt )
166 tries=3
167 success=false
168 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
169 ${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
170 && success=true \
171 || tries=$((tries - 1))
172 done
173 [[ $success = true ]]
174 ;;
175 * )
176 ${cfgZfs.package}/sbin/zfs load-key "$ds"
177 ;;
178 esac
179 } < /dev/null # To protect while read ds kl in case anything reads stdin
180 done
181 ''}
182 echo "Successfully imported ${pool}"
183 else
184 exit 1
185 fi
186 '';
187 };
188
189 zedConf = generators.toKeyValue {
190 mkKeyValue = generators.mkKeyValueDefault {
191 mkValueString = v:
192 if isInt v then toString v
193 else if isString v then "\"${v}\""
194 else if true == v then "1"
195 else if false == v then "0"
196 else if isList v then "\"" + (concatStringsSep " " v) + "\""
197 else err "this value is" (toString v);
198 } "=";
199 } cfgZED.settings;
200in
201
202{
203
204 imports = [
205 (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
206 ];
207
208 ###### interface
209
210 options = {
211 boot.zfs = {
212 package = mkOption {
213 readOnly = true;
214 type = types.package;
215 default = if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs;
216 defaultText = literalExpression "if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs";
217 description = lib.mdDoc "Configured ZFS userland tools package.";
218 };
219
220 enabled = mkOption {
221 readOnly = true;
222 type = types.bool;
223 default = inInitrd || inSystem;
224 defaultText = literalMD "`true` if ZFS filesystem support is enabled";
225 description = lib.mdDoc "True if ZFS filesystem support is enabled";
226 };
227
228 enableUnstable = mkOption {
229 type = types.bool;
230 default = false;
231 description = lib.mdDoc ''
232 Use the unstable zfs package. This might be an option, if the latest
233 kernel is not yet supported by a published release of ZFS. Enabling
234 this option will install a development version of ZFS on Linux. The
235 version will have already passed an extensive test suite, but it is
236 more likely to hit an undiscovered bug compared to running a released
237 version of ZFS on Linux.
238 '';
239 };
240
241 allowHibernation = mkOption {
242 type = types.bool;
243 default = false;
244 description = lib.mdDoc ''
245 Allow hibernation support, this may be a unsafe option depending on your
246 setup. Make sure to NOT use Swap on ZFS.
247 '';
248 };
249
250 extraPools = mkOption {
251 type = types.listOf types.str;
252 default = [];
253 example = [ "tank" "data" ];
254 description = lib.mdDoc ''
255 Name or GUID of extra ZFS pools that you wish to import during boot.
256
257 Usually this is not necessary. Instead, you should set the mountpoint property
258 of ZFS filesystems to `legacy` and add the ZFS filesystems to
259 NixOS's {option}`fileSystems` option, which makes NixOS automatically
260 import the associated pool.
261
262 However, in some cases (e.g. if you have many filesystems) it may be preferable
263 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
264 will not be managing those filesystems, you will need to specify the ZFS pool here
265 so that NixOS automatically imports it on every boot.
266 '';
267 };
268
269 devNodes = mkOption {
270 type = types.path;
271 default = "/dev/disk/by-id";
272 description = lib.mdDoc ''
273 Name of directory from which to import ZFS devices.
274
275 This should be a path under /dev containing stable names for all devices needed, as
276 import may fail if device nodes are renamed concurrently with a device failing.
277 '';
278 };
279
280 forceImportRoot = mkOption {
281 type = types.bool;
282 default = true;
283 description = lib.mdDoc ''
284 Forcibly import the ZFS root pool(s) during early boot.
285
286 This is enabled by default for backwards compatibility purposes, but it is highly
287 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
288 to protect your ZFS pools.
289
290 If you set this option to `false` and NixOS subsequently fails to
291 boot because it cannot import the root pool, you should boot with the
292 `zfs_force=1` option as a kernel parameter (e.g. by manually
293 editing the kernel params in grub during boot). You should only need to do this
294 once.
295 '';
296 };
297
298 forceImportAll = mkOption {
299 type = types.bool;
300 default = false;
301 description = lib.mdDoc ''
302 Forcibly import all ZFS pool(s).
303
304 If you set this option to `false` and NixOS subsequently fails to
305 import your non-root ZFS pool(s), you should manually import each pool with
306 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
307 this once.
308 '';
309 };
310
311 requestEncryptionCredentials = mkOption {
312 type = types.either types.bool (types.listOf types.str);
313 default = true;
314 example = [ "tank" "data" ];
315 description = lib.mdDoc ''
316 If true on import encryption keys or passwords for all encrypted datasets
317 are requested. To only decrypt selected datasets supply a list of dataset
318 names instead. For root pools the encryption key can be supplied via both
319 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
320 '';
321 };
322
323 passwordTimeout = mkOption {
324 type = types.int;
325 default = 0;
326 description = lib.mdDoc ''
327 Timeout in seconds to wait for password entry for decrypt at boot.
328
329 Defaults to 0, which waits forever.
330 '';
331 };
332
333 removeLinuxDRM = lib.mkOption {
334 type = types.bool;
335 default = false;
336 description = lib.mdDoc ''
337 Linux 6.2 dropped some kernel symbols required on aarch64 required by zfs.
338 Enabling this option will bring them back to allow this kernel version.
339 Note that in some jurisdictions this may be illegal as it might be considered
340 removing copyright protection from the code.
341 See https://www.ifross.org/?q=en/artikel/ongoing-dispute-over-value-exportsymbolgpl-function for further information.
342
343 If configure your kernel package with `zfs.latestCompatibleLinuxPackages`, you will need to also pass removeLinuxDRM to that package like this:
344
345 ```
346 { pkgs, ... }: {
347 boot.kernelPackages = (pkgs.zfs.override {
348 removeLinuxDRM = pkgs.hostPlatform.isAarch64;
349 }).latestCompatibleLinuxPackages;
350
351 boot.zfs.removeLinuxDRM = true;
352 }
353 ```
354 '';
355 };
356 };
357
358 services.zfs.autoSnapshot = {
359 enable = mkOption {
360 default = false;
361 type = types.bool;
362 description = lib.mdDoc ''
363 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
364 Note that you must set the `com.sun:auto-snapshot`
365 property to `true` on all datasets which you wish
366 to auto-snapshot.
367
368 You can override a child dataset to use, or not use auto-snapshotting
369 by setting its flag with the given interval:
370 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
371 '';
372 };
373
374 flags = mkOption {
375 default = "-k -p";
376 example = "-k -p --utc";
377 type = types.str;
378 description = lib.mdDoc ''
379 Flags to pass to the zfs-auto-snapshot command.
380
381 Run `zfs-auto-snapshot` (without any arguments) to
382 see available flags.
383
384 If it's not too inconvenient for snapshots to have timestamps in UTC,
385 it is suggested that you append `--utc` to the list
386 of default options (see example).
387
388 Otherwise, snapshot names can cause name conflicts or apparent time
389 reversals due to daylight savings, timezone or other date/time changes.
390 '';
391 };
392
393 frequent = mkOption {
394 default = 4;
395 type = types.int;
396 description = lib.mdDoc ''
397 Number of frequent (15-minute) auto-snapshots that you wish to keep.
398 '';
399 };
400
401 hourly = mkOption {
402 default = 24;
403 type = types.int;
404 description = lib.mdDoc ''
405 Number of hourly auto-snapshots that you wish to keep.
406 '';
407 };
408
409 daily = mkOption {
410 default = 7;
411 type = types.int;
412 description = lib.mdDoc ''
413 Number of daily auto-snapshots that you wish to keep.
414 '';
415 };
416
417 weekly = mkOption {
418 default = 4;
419 type = types.int;
420 description = lib.mdDoc ''
421 Number of weekly auto-snapshots that you wish to keep.
422 '';
423 };
424
425 monthly = mkOption {
426 default = 12;
427 type = types.int;
428 description = lib.mdDoc ''
429 Number of monthly auto-snapshots that you wish to keep.
430 '';
431 };
432 };
433
434 services.zfs.trim = {
435 enable = mkOption {
436 description = lib.mdDoc "Whether to enable periodic TRIM on all ZFS pools.";
437 default = true;
438 example = false;
439 type = types.bool;
440 };
441
442 interval = mkOption {
443 default = "weekly";
444 type = types.str;
445 example = "daily";
446 description = lib.mdDoc ''
447 How often we run trim. For most desktop and server systems
448 a sufficient trimming frequency is once a week.
449
450 The format is described in
451 {manpage}`systemd.time(7)`.
452 '';
453 };
454 };
455
456 services.zfs.autoScrub = {
457 enable = mkEnableOption (lib.mdDoc "periodic scrubbing of ZFS pools");
458
459 interval = mkOption {
460 default = "Sun, 02:00";
461 type = types.str;
462 example = "daily";
463 description = lib.mdDoc ''
464 Systemd calendar expression when to scrub ZFS pools. See
465 {manpage}`systemd.time(7)`.
466 '';
467 };
468
469 pools = mkOption {
470 default = [];
471 type = types.listOf types.str;
472 example = [ "tank" ];
473 description = lib.mdDoc ''
474 List of ZFS pools to periodically scrub. If empty, all pools
475 will be scrubbed.
476 '';
477 };
478 };
479
480 services.zfs.expandOnBoot = mkOption {
481 type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
482 default = "disabled";
483 example = [ "tank" "dozer" ];
484 description = lib.mdDoc ''
485 After importing, expand each device in the specified pools.
486
487 Set the value to the plain string "all" to expand all pools on boot:
488
489 services.zfs.expandOnBoot = "all";
490
491 or set the value to a list of pools to expand the disks of specific pools:
492
493 services.zfs.expandOnBoot = [ "tank" "dozer" ];
494 '';
495 };
496
497 services.zfs.zed = {
498 enableMail = mkEnableOption (lib.mdDoc "ZED's ability to send emails") // {
499 default = cfgZfs.package.enableMail;
500 defaultText = literalExpression "config.${optZfs.package}.enableMail";
501 };
502
503 settings = mkOption {
504 type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
505 example = literalExpression ''
506 {
507 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
508
509 ZED_EMAIL_ADDR = [ "root" ];
510 ZED_EMAIL_PROG = "mail";
511 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
512
513 ZED_NOTIFY_INTERVAL_SECS = 3600;
514 ZED_NOTIFY_VERBOSE = false;
515
516 ZED_USE_ENCLOSURE_LEDS = true;
517 ZED_SCRUB_AFTER_RESILVER = false;
518 }
519 '';
520 description = lib.mdDoc ''
521 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
522
523 See
524 {manpage}`zed(8)`
525 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
526 '';
527 };
528 };
529 };
530
531 ###### implementation
532
533 config = mkMerge [
534 (mkIf cfgZfs.enabled {
535 assertions = [
536 {
537 assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
538 message = ''
539 To allow ZED to send emails, ZFS needs to be configured to enable
540 this. To do so, one must override the `zfs` package and set
541 `enableMail` to true.
542 '';
543 }
544 {
545 assertion = config.networking.hostId != null;
546 message = "ZFS requires networking.hostId to be set";
547 }
548 {
549 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
550 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
551 }
552 {
553 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
554 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
555 }
556 {
557 assertion = !(elem "" allPools);
558 message = ''
559 Automatic pool detection found an empty pool name, which can't be used.
560 Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
561 should be a zfs dataset name, like `device = "pool/data/set"`.
562 This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
563 '';
564 }
565 ];
566
567 boot = {
568 kernelModules = [ "zfs" ];
569 # https://github.com/openzfs/zfs/issues/260
570 # https://github.com/openzfs/zfs/issues/12842
571 # https://github.com/NixOS/nixpkgs/issues/106093
572 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
573
574 extraModulePackages = let
575 kernelPkg = if config.boot.zfs.enableUnstable then
576 config.boot.kernelPackages.zfsUnstable
577 else
578 config.boot.kernelPackages.zfs;
579 in [
580 (kernelPkg.override { inherit (cfgZfs) removeLinuxDRM; })
581 ];
582 };
583
584 boot.initrd = mkIf inInitrd {
585 kernelModules = [ "zfs" ] ++ optional (!cfgZfs.enableUnstable) "spl";
586 extraUtilsCommands =
587 mkIf (!config.boot.initrd.systemd.enable) ''
588 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
589 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
590 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
591 '';
592 extraUtilsCommandsTest =
593 mkIf (!config.boot.initrd.systemd.enable) ''
594 $out/bin/zfs --help >/dev/null 2>&1
595 $out/bin/zpool --help >/dev/null 2>&1
596 '';
597 postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable) (concatStringsSep "\n" ([''
598 ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
599 ''] ++ [(importLib {
600 # See comments at importLib definition.
601 zpoolCmd = "zpool";
602 awkCmd = "awk";
603 inherit cfgZfs;
604 })] ++ (map (pool: ''
605 echo -n "importing root ZFS pool \"${pool}\"..."
606 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
607 if ! poolImported "${pool}"; then
608 for trial in `seq 1 60`; do
609 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
610 sleep 1
611 echo -n .
612 done
613 echo
614 if [[ -n "$msg" ]]; then
615 echo "$msg";
616 fi
617 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
618 fi
619 ${if isBool cfgZfs.requestEncryptionCredentials
620 then optionalString cfgZfs.requestEncryptionCredentials ''
621 zfs load-key -a
622 ''
623 else concatMapStrings (fs: ''
624 zfs load-key -- ${escapeShellArg fs}
625 '') (filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
626 '') rootPools)));
627
628 # Systemd in stage 1
629 systemd = mkIf config.boot.initrd.systemd.enable {
630 packages = [cfgZfs.package];
631 services = listToAttrs (map (pool: createImportService {
632 inherit pool;
633 systemd = config.boot.initrd.systemd.package;
634 force = cfgZfs.forceImportRoot;
635 prefix = "/sysroot";
636 }) rootPools);
637 targets.zfs-import.wantedBy = [ "zfs.target" ];
638 targets.zfs.wantedBy = [ "initrd.target" ];
639 extraBin = {
640 zpool = "${cfgZfs.package}/sbin/zpool";
641 zfs = "${cfgZfs.package}/sbin/zfs";
642 awk = "${pkgs.gawk}/bin/awk";
643 };
644 };
645 };
646
647 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
648 exec ${cfgZfs.package}/bin/zpool sync
649 '';
650 systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
651
652 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
653 boot.loader.grub = mkIf (inInitrd || inSystem) {
654 zfsSupport = true;
655 };
656
657 services.zfs.zed.settings = {
658 ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
659 PATH = lib.makeBinPath [
660 cfgZfs.package
661 pkgs.coreutils
662 pkgs.curl
663 pkgs.gawk
664 pkgs.gnugrep
665 pkgs.gnused
666 pkgs.nettools
667 pkgs.util-linux
668 ];
669 };
670
671 # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
672 services.udev.extraRules = ''
673 ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
674 '';
675
676 environment.etc = genAttrs
677 (map
678 (file: "zfs/zed.d/${file}")
679 [
680 "all-syslog.sh"
681 "pool_import-led.sh"
682 "resilver_finish-start-scrub.sh"
683 "statechange-led.sh"
684 "vdev_attach-led.sh"
685 "zed-functions.sh"
686 "data-notify.sh"
687 "resilver_finish-notify.sh"
688 "scrub_finish-notify.sh"
689 "statechange-notify.sh"
690 "vdev_clear-led.sh"
691 ]
692 )
693 (file: { source = "${cfgZfs.package}/etc/${file}"; })
694 // {
695 "zfs/zed.d/zed.rc".text = zedConf;
696 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
697 };
698
699 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
700 environment.systemPackages = [ cfgZfs.package ]
701 ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
702
703 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
704 systemd.packages = [ cfgZfs.package ];
705
706 # Export kernel_neon_* symbols again.
707 # This change is necessary until ZFS figures out a solution
708 # with upstream or in their build system to fill the gap for
709 # this symbol.
710 # In the meantime, we restore what was once a working piece of code
711 # in the kernel.
712 boot.kernelPatches = lib.optional (cfgZfs.removeLinuxDRM && pkgs.stdenv.hostPlatform.system == "aarch64-linux") {
713 name = "export-neon-symbols-as-gpl";
714 patch = pkgs.fetchpatch {
715 url = "https://github.com/torvalds/linux/commit/aaeca98456431a8d9382ecf48ac4843e252c07b3.patch";
716 hash = "sha256-L2g4G1tlWPIi/QRckMuHDcdWBcKpObSWSRTvbHRIwIk=";
717 revert = true;
718 };
719 };
720
721 systemd.services = let
722 createImportService' = pool: createImportService {
723 inherit pool;
724 systemd = config.systemd.package;
725 force = cfgZfs.forceImportAll;
726 };
727
728 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
729 # to sync=disabled.
730 createSyncService = pool:
731 nameValuePair "zfs-sync-${pool}" {
732 description = "Sync ZFS pool \"${pool}\"";
733 wantedBy = [ "shutdown.target" ];
734 unitConfig = {
735 DefaultDependencies = false;
736 };
737 serviceConfig = {
738 Type = "oneshot";
739 RemainAfterExit = true;
740 };
741 script = ''
742 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
743 '';
744 };
745
746 createZfsService = serv:
747 nameValuePair serv {
748 after = [ "systemd-modules-load.service" ];
749 wantedBy = [ "zfs.target" ];
750 };
751
752 in listToAttrs (map createImportService' dataPools ++
753 map createSyncService allPools ++
754 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
755
756 systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
757
758 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
759 })
760
761 (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
762 systemd.services."zpool-expand@" = {
763 description = "Expand ZFS pools";
764 after = [ "zfs.target" ];
765
766 serviceConfig = {
767 Type = "oneshot";
768 RemainAfterExit = true;
769 };
770
771 scriptArgs = "%i";
772 path = [ cfgZfs.package ];
773
774 script = ''
775 pool=$1
776
777 echo "Expanding all devices for $pool."
778
779 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
780 '';
781 };
782
783 systemd.services."zpool-expand-pools" =
784 let
785 # Create a string, to be interpolated in a bash script
786 # which enumerates all of the pools to expand.
787 # If the `pools` option is `true`, we want to dynamically
788 # expand every pool. Otherwise we want to enumerate
789 # just the specifically provided list of pools.
790 poolListProvider = if cfgExpandOnBoot == "all"
791 then "$(zpool list -H -o name)"
792 else lib.escapeShellArgs cfgExpandOnBoot;
793 in
794 {
795 description = "Expand specified ZFS pools";
796 wantedBy = [ "default.target" ];
797 after = [ "zfs.target" ];
798
799 serviceConfig = {
800 Type = "oneshot";
801 RemainAfterExit = true;
802 };
803
804 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
805
806 script = ''
807 for pool in ${poolListProvider}; do
808 systemctl start --no-block "zpool-expand@$pool"
809 done
810 '';
811 };
812 })
813
814 (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
815 systemd.services = let
816 descr = name: if name == "frequent" then "15 mins"
817 else if name == "hourly" then "hour"
818 else if name == "daily" then "day"
819 else if name == "weekly" then "week"
820 else if name == "monthly" then "month"
821 else throw "unknown snapshot name";
822 numSnapshots = name: builtins.getAttr name cfgSnapshots;
823 in builtins.listToAttrs (map (snapName:
824 {
825 name = "zfs-snapshot-${snapName}";
826 value = {
827 description = "ZFS auto-snapshotting every ${descr snapName}";
828 after = [ "zfs-import.target" ];
829 serviceConfig = {
830 Type = "oneshot";
831 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
832 };
833 restartIfChanged = false;
834 };
835 }) snapshotNames);
836
837 systemd.timers = let
838 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
839 in builtins.listToAttrs (map (snapName:
840 {
841 name = "zfs-snapshot-${snapName}";
842 value = {
843 wantedBy = [ "timers.target" ];
844 timerConfig = {
845 OnCalendar = timer snapName;
846 Persistent = "yes";
847 };
848 };
849 }) snapshotNames);
850 })
851
852 (mkIf (cfgZfs.enabled && cfgScrub.enable) {
853 systemd.services.zfs-scrub = {
854 description = "ZFS pools scrubbing";
855 after = [ "zfs-import.target" ];
856 serviceConfig = {
857 Type = "simple";
858 };
859 script = ''
860 ${cfgZfs.package}/bin/zpool scrub -w ${
861 if cfgScrub.pools != [] then
862 (concatStringsSep " " cfgScrub.pools)
863 else
864 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
865 }
866 '';
867 };
868
869 systemd.timers.zfs-scrub = {
870 wantedBy = [ "timers.target" ];
871 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
872 timerConfig = {
873 OnCalendar = cfgScrub.interval;
874 Persistent = "yes";
875 };
876 };
877 })
878
879 (mkIf (cfgZfs.enabled && cfgTrim.enable) {
880 systemd.services.zpool-trim = {
881 description = "ZFS pools trim";
882 after = [ "zfs-import.target" ];
883 path = [ cfgZfs.package ];
884 startAt = cfgTrim.interval;
885 # By default we ignore errors returned by the trim command, in case:
886 # - HDDs are mixed with SSDs
887 # - There is a SSDs in a pool that is currently trimmed.
888 # - There are only HDDs and we would set the system in a degraded state
889 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
890 };
891
892 systemd.timers.zpool-trim.timerConfig.Persistent = "yes";
893 })
894 ];
895}