1{ config, lib, options, pkgs, utils, ... }:
2#
3# TODO: zfs tunables
4
5with utils;
6with lib;
7
8let
9
10 cfgZfs = config.boot.zfs;
11 optZfs = options.boot.zfs;
12 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
13 cfgSnapshots = config.services.zfs.autoSnapshot;
14 cfgSnapFlags = cfgSnapshots.flags;
15 cfgScrub = config.services.zfs.autoScrub;
16 cfgTrim = config.services.zfs.trim;
17 cfgZED = config.services.zfs.zed;
18
19 inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
20 inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
21
22 autosnapPkg = pkgs.zfstools.override {
23 zfs = cfgZfs.package;
24 };
25
26 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
27
28 datasetToPool = x: elemAt (splitString "/" x) 0;
29
30 fsToPool = fs: datasetToPool fs.device;
31
32 zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
33
34 allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
35
36 rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
37
38 dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
39
40 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
41
42 # When importing ZFS pools, there's one difficulty: These scripts may run
43 # before the backing devices (physical HDDs, etc.) of the pool have been
44 # scanned and initialized.
45 #
46 # An attempted import with all devices missing will just fail, and can be
47 # retried, but an import where e.g. two out of three disks in a three-way
48 # mirror are missing, will succeed. This is a problem: When the missing disks
49 # are later discovered, they won't be automatically set online, rendering the
50 # pool redundancy-less (and far slower) until such time as the system reboots.
51 #
52 # The solution is the below. poolReady checks the status of an un-imported
53 # pool, to see if *every* device is available -- in which case the pool will be
54 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
55 #
56 # The import scripts then loop over this, waiting until the pool is ready or a
57 # sufficient amount of time has passed that we can assume it won't be. In the
58 # latter case it makes one last attempt at importing, allowing the system to
59 # (eventually) boot even with a degraded pool.
60 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
61 for o in $(cat /proc/cmdline); do
62 case $o in
63 zfs_force|zfs_force=1|zfs_force=y)
64 ZFS_FORCE="-f"
65 ;;
66 esac
67 done
68 poolReady() {
69 pool="$1"
70 state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
71 if [[ "$state" = "ONLINE" ]]; then
72 return 0
73 else
74 echo "Pool $pool in state $state, waiting"
75 return 1
76 fi
77 }
78 poolImported() {
79 pool="$1"
80 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
81 }
82 poolImport() {
83 pool="$1"
84 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
85 }
86 '';
87
88 getPoolFilesystems = pool:
89 filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
90
91 getPoolMounts = prefix: pool:
92 let
93 # Remove the "/" suffix because even though most mountpoints
94 # won't have it, the "/" mountpoint will, and we can't have the
95 # trailing slash in "/sysroot/" in stage 1.
96 mountPoint = fs: escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
97 in
98 map (x: "${mountPoint x}.mount") (getPoolFilesystems pool);
99
100 getKeyLocations = pool:
101 if isBool cfgZfs.requestEncryptionCredentials
102 then "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus ${pool}"
103 else "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus ${toString (filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}";
104
105 createImportService = { pool, systemd, force, prefix ? "" }:
106 nameValuePair "zfs-import-${pool}" {
107 description = "Import ZFS pool \"${pool}\"";
108 # we need systemd-udev-settle to ensure devices are available
109 # In the future, hopefully someone will complete this:
110 # https://github.com/zfsonlinux/zfs/pull/4943
111 requires = [ "systemd-udev-settle.service" ];
112 after = [
113 "systemd-udev-settle.service"
114 "systemd-modules-load.service"
115 "systemd-ask-password-console.service"
116 ];
117 wantedBy = (getPoolMounts prefix pool) ++ [ "local-fs.target" ];
118 before = (getPoolMounts prefix pool) ++ [ "local-fs.target" ];
119 unitConfig = {
120 DefaultDependencies = "no";
121 };
122 serviceConfig = {
123 Type = "oneshot";
124 RemainAfterExit = true;
125 };
126 environment.ZFS_FORCE = optionalString force "-f";
127 script = (importLib {
128 # See comments at importLib definition.
129 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
130 awkCmd = "${pkgs.gawk}/bin/awk";
131 inherit cfgZfs;
132 }) + ''
133 poolImported "${pool}" && exit
134 echo -n "importing ZFS pool \"${pool}\"..."
135 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
136 for trial in `seq 1 60`; do
137 poolReady "${pool}" && poolImport "${pool}" && break
138 sleep 1
139 done
140 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
141 if poolImported "${pool}"; then
142 ${optionalString (if isBool cfgZfs.requestEncryptionCredentials
143 then cfgZfs.requestEncryptionCredentials
144 else cfgZfs.requestEncryptionCredentials != []) ''
145 ${getKeyLocations pool} | while IFS=$'\t' read ds kl ks; do
146 {
147 if [[ "$ks" != unavailable ]]; then
148 continue
149 fi
150 case "$kl" in
151 none )
152 ;;
153 prompt )
154 tries=3
155 success=false
156 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
157 ${systemd}/bin/systemd-ask-password "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
158 && success=true \
159 || tries=$((tries - 1))
160 done
161 [[ $success = true ]]
162 ;;
163 * )
164 ${cfgZfs.package}/sbin/zfs load-key "$ds"
165 ;;
166 esac
167 } < /dev/null # To protect while read ds kl in case anything reads stdin
168 done
169 ''}
170 echo "Successfully imported ${pool}"
171 else
172 exit 1
173 fi
174 '';
175 };
176
177 zedConf = generators.toKeyValue {
178 mkKeyValue = generators.mkKeyValueDefault {
179 mkValueString = v:
180 if isInt v then toString v
181 else if isString v then "\"${v}\""
182 else if true == v then "1"
183 else if false == v then "0"
184 else if isList v then "\"" + (concatStringsSep " " v) + "\""
185 else err "this value is" (toString v);
186 } "=";
187 } cfgZED.settings;
188in
189
190{
191
192 imports = [
193 (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
194 ];
195
196 ###### interface
197
198 options = {
199 boot.zfs = {
200 package = mkOption {
201 readOnly = true;
202 type = types.package;
203 default = if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs;
204 defaultText = literalExpression "if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs";
205 description = lib.mdDoc "Configured ZFS userland tools package.";
206 };
207
208 enabled = mkOption {
209 readOnly = true;
210 type = types.bool;
211 default = inInitrd || inSystem;
212 defaultText = literalMD "`true` if ZFS filesystem support is enabled";
213 description = lib.mdDoc "True if ZFS filesystem support is enabled";
214 };
215
216 enableUnstable = mkOption {
217 type = types.bool;
218 default = false;
219 description = lib.mdDoc ''
220 Use the unstable zfs package. This might be an option, if the latest
221 kernel is not yet supported by a published release of ZFS. Enabling
222 this option will install a development version of ZFS on Linux. The
223 version will have already passed an extensive test suite, but it is
224 more likely to hit an undiscovered bug compared to running a released
225 version of ZFS on Linux.
226 '';
227 };
228
229 allowHibernation = mkOption {
230 type = types.bool;
231 default = false;
232 description = lib.mdDoc ''
233 Allow hibernation support, this may be a unsafe option depending on your
234 setup. Make sure to NOT use Swap on ZFS.
235 '';
236 };
237
238 extraPools = mkOption {
239 type = types.listOf types.str;
240 default = [];
241 example = [ "tank" "data" ];
242 description = lib.mdDoc ''
243 Name or GUID of extra ZFS pools that you wish to import during boot.
244
245 Usually this is not necessary. Instead, you should set the mountpoint property
246 of ZFS filesystems to `legacy` and add the ZFS filesystems to
247 NixOS's {option}`fileSystems` option, which makes NixOS automatically
248 import the associated pool.
249
250 However, in some cases (e.g. if you have many filesystems) it may be preferable
251 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
252 will not be managing those filesystems, you will need to specify the ZFS pool here
253 so that NixOS automatically imports it on every boot.
254 '';
255 };
256
257 devNodes = mkOption {
258 type = types.path;
259 default = "/dev/disk/by-id";
260 description = lib.mdDoc ''
261 Name of directory from which to import ZFS devices.
262
263 This should be a path under /dev containing stable names for all devices needed, as
264 import may fail if device nodes are renamed concurrently with a device failing.
265 '';
266 };
267
268 forceImportRoot = mkOption {
269 type = types.bool;
270 default = true;
271 description = lib.mdDoc ''
272 Forcibly import the ZFS root pool(s) during early boot.
273
274 This is enabled by default for backwards compatibility purposes, but it is highly
275 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
276 to protect your ZFS pools.
277
278 If you set this option to `false` and NixOS subsequently fails to
279 boot because it cannot import the root pool, you should boot with the
280 `zfs_force=1` option as a kernel parameter (e.g. by manually
281 editing the kernel params in grub during boot). You should only need to do this
282 once.
283 '';
284 };
285
286 forceImportAll = mkOption {
287 type = types.bool;
288 default = false;
289 description = lib.mdDoc ''
290 Forcibly import all ZFS pool(s).
291
292 If you set this option to `false` and NixOS subsequently fails to
293 import your non-root ZFS pool(s), you should manually import each pool with
294 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
295 this once.
296 '';
297 };
298
299 requestEncryptionCredentials = mkOption {
300 type = types.either types.bool (types.listOf types.str);
301 default = true;
302 example = [ "tank" "data" ];
303 description = lib.mdDoc ''
304 If true on import encryption keys or passwords for all encrypted datasets
305 are requested. To only decrypt selected datasets supply a list of dataset
306 names instead. For root pools the encryption key can be supplied via both
307 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
308 '';
309 };
310 };
311
312 services.zfs.autoSnapshot = {
313 enable = mkOption {
314 default = false;
315 type = types.bool;
316 description = lib.mdDoc ''
317 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
318 Note that you must set the `com.sun:auto-snapshot`
319 property to `true` on all datasets which you wish
320 to auto-snapshot.
321
322 You can override a child dataset to use, or not use auto-snapshotting
323 by setting its flag with the given interval:
324 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
325 '';
326 };
327
328 flags = mkOption {
329 default = "-k -p";
330 example = "-k -p --utc";
331 type = types.str;
332 description = lib.mdDoc ''
333 Flags to pass to the zfs-auto-snapshot command.
334
335 Run `zfs-auto-snapshot` (without any arguments) to
336 see available flags.
337
338 If it's not too inconvenient for snapshots to have timestamps in UTC,
339 it is suggested that you append `--utc` to the list
340 of default options (see example).
341
342 Otherwise, snapshot names can cause name conflicts or apparent time
343 reversals due to daylight savings, timezone or other date/time changes.
344 '';
345 };
346
347 frequent = mkOption {
348 default = 4;
349 type = types.int;
350 description = lib.mdDoc ''
351 Number of frequent (15-minute) auto-snapshots that you wish to keep.
352 '';
353 };
354
355 hourly = mkOption {
356 default = 24;
357 type = types.int;
358 description = lib.mdDoc ''
359 Number of hourly auto-snapshots that you wish to keep.
360 '';
361 };
362
363 daily = mkOption {
364 default = 7;
365 type = types.int;
366 description = lib.mdDoc ''
367 Number of daily auto-snapshots that you wish to keep.
368 '';
369 };
370
371 weekly = mkOption {
372 default = 4;
373 type = types.int;
374 description = lib.mdDoc ''
375 Number of weekly auto-snapshots that you wish to keep.
376 '';
377 };
378
379 monthly = mkOption {
380 default = 12;
381 type = types.int;
382 description = lib.mdDoc ''
383 Number of monthly auto-snapshots that you wish to keep.
384 '';
385 };
386 };
387
388 services.zfs.trim = {
389 enable = mkOption {
390 description = lib.mdDoc "Whether to enable periodic TRIM on all ZFS pools.";
391 default = true;
392 example = false;
393 type = types.bool;
394 };
395
396 interval = mkOption {
397 default = "weekly";
398 type = types.str;
399 example = "daily";
400 description = lib.mdDoc ''
401 How often we run trim. For most desktop and server systems
402 a sufficient trimming frequency is once a week.
403
404 The format is described in
405 {manpage}`systemd.time(7)`.
406 '';
407 };
408 };
409
410 services.zfs.autoScrub = {
411 enable = mkEnableOption (lib.mdDoc "periodic scrubbing of ZFS pools");
412
413 interval = mkOption {
414 default = "Sun, 02:00";
415 type = types.str;
416 example = "daily";
417 description = lib.mdDoc ''
418 Systemd calendar expression when to scrub ZFS pools. See
419 {manpage}`systemd.time(7)`.
420 '';
421 };
422
423 pools = mkOption {
424 default = [];
425 type = types.listOf types.str;
426 example = [ "tank" ];
427 description = lib.mdDoc ''
428 List of ZFS pools to periodically scrub. If empty, all pools
429 will be scrubbed.
430 '';
431 };
432 };
433
434 services.zfs.expandOnBoot = mkOption {
435 type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
436 default = "disabled";
437 example = [ "tank" "dozer" ];
438 description = lib.mdDoc ''
439 After importing, expand each device in the specified pools.
440
441 Set the value to the plain string "all" to expand all pools on boot:
442
443 services.zfs.expandOnBoot = "all";
444
445 or set the value to a list of pools to expand the disks of specific pools:
446
447 services.zfs.expandOnBoot = [ "tank" "dozer" ];
448 '';
449 };
450
451 services.zfs.zed = {
452 enableMail = mkEnableOption (lib.mdDoc "ZED's ability to send emails") // {
453 default = cfgZfs.package.enableMail;
454 defaultText = literalExpression "config.${optZfs.package}.enableMail";
455 };
456
457 settings = mkOption {
458 type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
459 example = literalExpression ''
460 {
461 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
462
463 ZED_EMAIL_ADDR = [ "root" ];
464 ZED_EMAIL_PROG = "mail";
465 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
466
467 ZED_NOTIFY_INTERVAL_SECS = 3600;
468 ZED_NOTIFY_VERBOSE = false;
469
470 ZED_USE_ENCLOSURE_LEDS = true;
471 ZED_SCRUB_AFTER_RESILVER = false;
472 }
473 '';
474 description = lib.mdDoc ''
475 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
476
477 See
478 {manpage}`zed(8)`
479 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
480 '';
481 };
482 };
483 };
484
485 ###### implementation
486
487 config = mkMerge [
488 (mkIf cfgZfs.enabled {
489 assertions = [
490 {
491 assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
492 message = ''
493 To allow ZED to send emails, ZFS needs to be configured to enable
494 this. To do so, one must override the `zfs` package and set
495 `enableMail` to true.
496 '';
497 }
498 {
499 assertion = config.networking.hostId != null;
500 message = "ZFS requires networking.hostId to be set";
501 }
502 {
503 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
504 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
505 }
506 ];
507
508 boot = {
509 kernelModules = [ "zfs" ];
510 # https://github.com/openzfs/zfs/issues/260
511 # https://github.com/openzfs/zfs/issues/12842
512 # https://github.com/NixOS/nixpkgs/issues/106093
513 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
514
515 extraModulePackages = [
516 (if config.boot.zfs.enableUnstable then
517 config.boot.kernelPackages.zfsUnstable
518 else
519 config.boot.kernelPackages.zfs)
520 ];
521 };
522
523 boot.initrd = mkIf inInitrd {
524 kernelModules = [ "zfs" ] ++ optional (!cfgZfs.enableUnstable) "spl";
525 extraUtilsCommands =
526 ''
527 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
528 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
529 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
530 '';
531 extraUtilsCommandsTest = mkIf inInitrd
532 ''
533 $out/bin/zfs --help >/dev/null 2>&1
534 $out/bin/zpool --help >/dev/null 2>&1
535 '';
536 postDeviceCommands = concatStringsSep "\n" ([''
537 ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
538 ''] ++ [(importLib {
539 # See comments at importLib definition.
540 zpoolCmd = "zpool";
541 awkCmd = "awk";
542 inherit cfgZfs;
543 })] ++ (map (pool: ''
544 echo -n "importing root ZFS pool \"${pool}\"..."
545 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
546 if ! poolImported "${pool}"; then
547 for trial in `seq 1 60`; do
548 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
549 sleep 1
550 echo -n .
551 done
552 echo
553 if [[ -n "$msg" ]]; then
554 echo "$msg";
555 fi
556 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
557 fi
558 ${if isBool cfgZfs.requestEncryptionCredentials
559 then optionalString cfgZfs.requestEncryptionCredentials ''
560 zfs load-key -a
561 ''
562 else concatMapStrings (fs: ''
563 zfs load-key -- ${escapeShellArg fs}
564 '') cfgZfs.requestEncryptionCredentials}
565 '') rootPools));
566
567 # Systemd in stage 1
568 systemd = {
569 packages = [cfgZfs.package];
570 services = listToAttrs (map (pool: createImportService {
571 inherit pool;
572 systemd = config.boot.initrd.systemd.package;
573 force = cfgZfs.forceImportRoot;
574 prefix = "/sysroot";
575 }) rootPools);
576 extraBin = {
577 # zpool and zfs are already in thanks to fsPackages
578 awk = "${pkgs.gawk}/bin/awk";
579 };
580 };
581 };
582
583 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
584 exec ${cfgZfs.package}/bin/zpool sync
585 '';
586 systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
587
588 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
589 boot.loader.grub = mkIf (inInitrd || inSystem) {
590 zfsSupport = true;
591 };
592
593 services.zfs.zed.settings = {
594 ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
595 PATH = lib.makeBinPath [
596 cfgZfs.package
597 pkgs.coreutils
598 pkgs.curl
599 pkgs.gawk
600 pkgs.gnugrep
601 pkgs.gnused
602 pkgs.nettools
603 pkgs.util-linux
604 ];
605 };
606
607 environment.etc = genAttrs
608 (map
609 (file: "zfs/zed.d/${file}")
610 [
611 "all-syslog.sh"
612 "pool_import-led.sh"
613 "resilver_finish-start-scrub.sh"
614 "statechange-led.sh"
615 "vdev_attach-led.sh"
616 "zed-functions.sh"
617 "data-notify.sh"
618 "resilver_finish-notify.sh"
619 "scrub_finish-notify.sh"
620 "statechange-notify.sh"
621 "vdev_clear-led.sh"
622 ]
623 )
624 (file: { source = "${cfgZfs.package}/etc/${file}"; })
625 // {
626 "zfs/zed.d/zed.rc".text = zedConf;
627 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
628 };
629
630 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
631 environment.systemPackages = [ cfgZfs.package ]
632 ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
633
634 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
635 systemd.packages = [ cfgZfs.package ];
636
637 systemd.services = let
638 createImportService' = pool: createImportService {
639 inherit pool;
640 systemd = config.systemd.package;
641 force = cfgZfs.forceImportAll;
642 };
643
644 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
645 # to sync=disabled.
646 createSyncService = pool:
647 nameValuePair "zfs-sync-${pool}" {
648 description = "Sync ZFS pool \"${pool}\"";
649 wantedBy = [ "shutdown.target" ];
650 unitConfig = {
651 DefaultDependencies = false;
652 };
653 serviceConfig = {
654 Type = "oneshot";
655 RemainAfterExit = true;
656 };
657 script = ''
658 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
659 '';
660 };
661
662 createZfsService = serv:
663 nameValuePair serv {
664 after = [ "systemd-modules-load.service" ];
665 wantedBy = [ "zfs.target" ];
666 };
667
668 in listToAttrs (map createImportService' dataPools ++
669 map createSyncService allPools ++
670 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
671
672 systemd.targets.zfs-import =
673 let
674 services = map (pool: "zfs-import-${pool}.service") dataPools;
675 in
676 {
677 requires = services;
678 after = services;
679 wantedBy = [ "zfs.target" ];
680 };
681
682 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
683 })
684
685 (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
686 systemd.services."zpool-expand@" = {
687 description = "Expand ZFS pools";
688 after = [ "zfs.target" ];
689
690 serviceConfig = {
691 Type = "oneshot";
692 RemainAfterExit = true;
693 };
694
695 scriptArgs = "%i";
696 path = [ cfgZfs.package ];
697
698 script = ''
699 pool=$1
700
701 echo "Expanding all devices for $pool."
702
703 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
704 '';
705 };
706
707 systemd.services."zpool-expand-pools" =
708 let
709 # Create a string, to be interpolated in a bash script
710 # which enumerates all of the pools to expand.
711 # If the `pools` option is `true`, we want to dynamically
712 # expand every pool. Otherwise we want to enumerate
713 # just the specifically provided list of pools.
714 poolListProvider = if cfgExpandOnBoot == "all"
715 then "$(zpool list -H -o name)"
716 else lib.escapeShellArgs cfgExpandOnBoot;
717 in
718 {
719 description = "Expand specified ZFS pools";
720 wantedBy = [ "default.target" ];
721 after = [ "zfs.target" ];
722
723 serviceConfig = {
724 Type = "oneshot";
725 RemainAfterExit = true;
726 };
727
728 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
729
730 script = ''
731 for pool in ${poolListProvider}; do
732 systemctl start --no-block "zpool-expand@$pool"
733 done
734 '';
735 };
736 })
737
738 (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
739 systemd.services = let
740 descr = name: if name == "frequent" then "15 mins"
741 else if name == "hourly" then "hour"
742 else if name == "daily" then "day"
743 else if name == "weekly" then "week"
744 else if name == "monthly" then "month"
745 else throw "unknown snapshot name";
746 numSnapshots = name: builtins.getAttr name cfgSnapshots;
747 in builtins.listToAttrs (map (snapName:
748 {
749 name = "zfs-snapshot-${snapName}";
750 value = {
751 description = "ZFS auto-snapshotting every ${descr snapName}";
752 after = [ "zfs-import.target" ];
753 serviceConfig = {
754 Type = "oneshot";
755 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
756 };
757 restartIfChanged = false;
758 };
759 }) snapshotNames);
760
761 systemd.timers = let
762 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
763 in builtins.listToAttrs (map (snapName:
764 {
765 name = "zfs-snapshot-${snapName}";
766 value = {
767 wantedBy = [ "timers.target" ];
768 timerConfig = {
769 OnCalendar = timer snapName;
770 Persistent = "yes";
771 };
772 };
773 }) snapshotNames);
774 })
775
776 (mkIf (cfgZfs.enabled && cfgScrub.enable) {
777 systemd.services.zfs-scrub = {
778 description = "ZFS pools scrubbing";
779 after = [ "zfs-import.target" ];
780 serviceConfig = {
781 Type = "simple";
782 };
783 script = ''
784 ${cfgZfs.package}/bin/zpool scrub -w ${
785 if cfgScrub.pools != [] then
786 (concatStringsSep " " cfgScrub.pools)
787 else
788 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
789 }
790 '';
791 };
792
793 systemd.timers.zfs-scrub = {
794 wantedBy = [ "timers.target" ];
795 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
796 timerConfig = {
797 OnCalendar = cfgScrub.interval;
798 Persistent = "yes";
799 };
800 };
801 })
802
803 (mkIf (cfgZfs.enabled && cfgTrim.enable) {
804 systemd.services.zpool-trim = {
805 description = "ZFS pools trim";
806 after = [ "zfs-import.target" ];
807 path = [ cfgZfs.package ];
808 startAt = cfgTrim.interval;
809 # By default we ignore errors returned by the trim command, in case:
810 # - HDDs are mixed with SSDs
811 # - There is a SSDs in a pool that is currently trimmed.
812 # - There are only HDDs and we would set the system in a degraded state
813 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
814 };
815
816 systemd.timers.zpool-trim.timerConfig.Persistent = "yes";
817 })
818 ];
819}