1{ config, lib, options, pkgs, utils, ... }:
2#
3# TODO: zfs tunables
4
5with utils;
6with lib;
7
8let
9
10 cfgZfs = config.boot.zfs;
11 optZfs = options.boot.zfs;
12 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
13 cfgSnapshots = config.services.zfs.autoSnapshot;
14 cfgSnapFlags = cfgSnapshots.flags;
15 cfgScrub = config.services.zfs.autoScrub;
16 cfgTrim = config.services.zfs.trim;
17 cfgZED = config.services.zfs.zed;
18
19 inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
20 inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
21
22 autosnapPkg = pkgs.zfstools.override {
23 zfs = cfgZfs.package;
24 };
25
26 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
27
28 datasetToPool = x: elemAt (splitString "/" x) 0;
29
30 fsToPool = fs: datasetToPool fs.device;
31
32 zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
33
34 allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
35
36 rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
37
38 dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
39
40 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
41
42 # When importing ZFS pools, there's one difficulty: These scripts may run
43 # before the backing devices (physical HDDs, etc.) of the pool have been
44 # scanned and initialized.
45 #
46 # An attempted import with all devices missing will just fail, and can be
47 # retried, but an import where e.g. two out of three disks in a three-way
48 # mirror are missing, will succeed. This is a problem: When the missing disks
49 # are later discovered, they won't be automatically set online, rendering the
50 # pool redundancy-less (and far slower) until such time as the system reboots.
51 #
52 # The solution is the below. poolReady checks the status of an un-imported
53 # pool, to see if *every* device is available -- in which case the pool will be
54 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
55 #
56 # The import scripts then loop over this, waiting until the pool is ready or a
57 # sufficient amount of time has passed that we can assume it won't be. In the
58 # latter case it makes one last attempt at importing, allowing the system to
59 # (eventually) boot even with a degraded pool.
60 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
61 for o in $(cat /proc/cmdline); do
62 case $o in
63 zfs_force|zfs_force=1|zfs_force=y)
64 ZFS_FORCE="-f"
65 ;;
66 esac
67 done
68 poolReady() {
69 pool="$1"
70 state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
71 if [[ "$state" = "ONLINE" ]]; then
72 return 0
73 else
74 echo "Pool $pool in state $state, waiting"
75 return 1
76 fi
77 }
78 poolImported() {
79 pool="$1"
80 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
81 }
82 poolImport() {
83 pool="$1"
84 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
85 }
86 '';
87
88 getPoolFilesystems = pool:
89 filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
90
91 getPoolMounts = prefix: pool:
92 let
93 # Remove the "/" suffix because even though most mountpoints
94 # won't have it, the "/" mountpoint will, and we can't have the
95 # trailing slash in "/sysroot/" in stage 1.
96 mountPoint = fs: escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
97 in
98 map (x: "${mountPoint x}.mount") (getPoolFilesystems pool);
99
100 getKeyLocations = pool: if isBool cfgZfs.requestEncryptionCredentials then {
101 hasKeys = cfgZfs.requestEncryptionCredentials;
102 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus ${pool}";
103 } else let
104 keys = filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
105 in {
106 hasKeys = keys != [];
107 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus ${toString keys}";
108 };
109
110 createImportService = { pool, systemd, force, prefix ? "" }:
111 nameValuePair "zfs-import-${pool}" {
112 description = "Import ZFS pool \"${pool}\"";
113 # we need systemd-udev-settle to ensure devices are available
114 # In the future, hopefully someone will complete this:
115 # https://github.com/zfsonlinux/zfs/pull/4943
116 requires = [ "systemd-udev-settle.service" ];
117 after = [
118 "systemd-udev-settle.service"
119 "systemd-modules-load.service"
120 "systemd-ask-password-console.service"
121 ];
122 wantedBy = (getPoolMounts prefix pool) ++ [ "local-fs.target" ];
123 before = (getPoolMounts prefix pool) ++ [ "local-fs.target" ];
124 unitConfig = {
125 DefaultDependencies = "no";
126 };
127 serviceConfig = {
128 Type = "oneshot";
129 RemainAfterExit = true;
130 };
131 environment.ZFS_FORCE = optionalString force "-f";
132 script = let
133 keyLocations = getKeyLocations pool;
134 in (importLib {
135 # See comments at importLib definition.
136 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
137 awkCmd = "${pkgs.gawk}/bin/awk";
138 inherit cfgZfs;
139 }) + ''
140 if ! poolImported "${pool}"; then
141 echo -n "importing ZFS pool \"${pool}\"..."
142 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
143 for trial in `seq 1 60`; do
144 poolReady "${pool}" && poolImport "${pool}" && break
145 sleep 1
146 done
147 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
148 fi
149 if poolImported "${pool}"; then
150 ${optionalString keyLocations.hasKeys ''
151 ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
152 {
153 if [[ "$ks" != unavailable ]]; then
154 continue
155 fi
156 case "$kl" in
157 none )
158 ;;
159 prompt )
160 tries=3
161 success=false
162 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
163 ${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
164 && success=true \
165 || tries=$((tries - 1))
166 done
167 [[ $success = true ]]
168 ;;
169 * )
170 ${cfgZfs.package}/sbin/zfs load-key "$ds"
171 ;;
172 esac
173 } < /dev/null # To protect while read ds kl in case anything reads stdin
174 done
175 ''}
176 echo "Successfully imported ${pool}"
177 else
178 exit 1
179 fi
180 '';
181 };
182
183 zedConf = generators.toKeyValue {
184 mkKeyValue = generators.mkKeyValueDefault {
185 mkValueString = v:
186 if isInt v then toString v
187 else if isString v then "\"${v}\""
188 else if true == v then "1"
189 else if false == v then "0"
190 else if isList v then "\"" + (concatStringsSep " " v) + "\""
191 else err "this value is" (toString v);
192 } "=";
193 } cfgZED.settings;
194in
195
196{
197
198 imports = [
199 (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
200 ];
201
202 ###### interface
203
204 options = {
205 boot.zfs = {
206 package = mkOption {
207 readOnly = true;
208 type = types.package;
209 default = if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs;
210 defaultText = literalExpression "if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs";
211 description = lib.mdDoc "Configured ZFS userland tools package.";
212 };
213
214 enabled = mkOption {
215 readOnly = true;
216 type = types.bool;
217 default = inInitrd || inSystem;
218 defaultText = literalMD "`true` if ZFS filesystem support is enabled";
219 description = lib.mdDoc "True if ZFS filesystem support is enabled";
220 };
221
222 enableUnstable = mkOption {
223 type = types.bool;
224 default = false;
225 description = lib.mdDoc ''
226 Use the unstable zfs package. This might be an option, if the latest
227 kernel is not yet supported by a published release of ZFS. Enabling
228 this option will install a development version of ZFS on Linux. The
229 version will have already passed an extensive test suite, but it is
230 more likely to hit an undiscovered bug compared to running a released
231 version of ZFS on Linux.
232 '';
233 };
234
235 allowHibernation = mkOption {
236 type = types.bool;
237 default = false;
238 description = lib.mdDoc ''
239 Allow hibernation support, this may be a unsafe option depending on your
240 setup. Make sure to NOT use Swap on ZFS.
241 '';
242 };
243
244 extraPools = mkOption {
245 type = types.listOf types.str;
246 default = [];
247 example = [ "tank" "data" ];
248 description = lib.mdDoc ''
249 Name or GUID of extra ZFS pools that you wish to import during boot.
250
251 Usually this is not necessary. Instead, you should set the mountpoint property
252 of ZFS filesystems to `legacy` and add the ZFS filesystems to
253 NixOS's {option}`fileSystems` option, which makes NixOS automatically
254 import the associated pool.
255
256 However, in some cases (e.g. if you have many filesystems) it may be preferable
257 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
258 will not be managing those filesystems, you will need to specify the ZFS pool here
259 so that NixOS automatically imports it on every boot.
260 '';
261 };
262
263 devNodes = mkOption {
264 type = types.path;
265 default = "/dev/disk/by-id";
266 description = lib.mdDoc ''
267 Name of directory from which to import ZFS devices.
268
269 This should be a path under /dev containing stable names for all devices needed, as
270 import may fail if device nodes are renamed concurrently with a device failing.
271 '';
272 };
273
274 forceImportRoot = mkOption {
275 type = types.bool;
276 default = true;
277 description = lib.mdDoc ''
278 Forcibly import the ZFS root pool(s) during early boot.
279
280 This is enabled by default for backwards compatibility purposes, but it is highly
281 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
282 to protect your ZFS pools.
283
284 If you set this option to `false` and NixOS subsequently fails to
285 boot because it cannot import the root pool, you should boot with the
286 `zfs_force=1` option as a kernel parameter (e.g. by manually
287 editing the kernel params in grub during boot). You should only need to do this
288 once.
289 '';
290 };
291
292 forceImportAll = mkOption {
293 type = types.bool;
294 default = false;
295 description = lib.mdDoc ''
296 Forcibly import all ZFS pool(s).
297
298 If you set this option to `false` and NixOS subsequently fails to
299 import your non-root ZFS pool(s), you should manually import each pool with
300 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
301 this once.
302 '';
303 };
304
305 requestEncryptionCredentials = mkOption {
306 type = types.either types.bool (types.listOf types.str);
307 default = true;
308 example = [ "tank" "data" ];
309 description = lib.mdDoc ''
310 If true on import encryption keys or passwords for all encrypted datasets
311 are requested. To only decrypt selected datasets supply a list of dataset
312 names instead. For root pools the encryption key can be supplied via both
313 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
314 '';
315 };
316
317 passwordTimeout = mkOption {
318 type = types.int;
319 default = 0;
320 description = lib.mdDoc ''
321 Timeout in seconds to wait for password entry for decrypt at boot.
322
323 Defaults to 0, which waits forever.
324 '';
325 };
326 };
327
328 services.zfs.autoSnapshot = {
329 enable = mkOption {
330 default = false;
331 type = types.bool;
332 description = lib.mdDoc ''
333 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
334 Note that you must set the `com.sun:auto-snapshot`
335 property to `true` on all datasets which you wish
336 to auto-snapshot.
337
338 You can override a child dataset to use, or not use auto-snapshotting
339 by setting its flag with the given interval:
340 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
341 '';
342 };
343
344 flags = mkOption {
345 default = "-k -p";
346 example = "-k -p --utc";
347 type = types.str;
348 description = lib.mdDoc ''
349 Flags to pass to the zfs-auto-snapshot command.
350
351 Run `zfs-auto-snapshot` (without any arguments) to
352 see available flags.
353
354 If it's not too inconvenient for snapshots to have timestamps in UTC,
355 it is suggested that you append `--utc` to the list
356 of default options (see example).
357
358 Otherwise, snapshot names can cause name conflicts or apparent time
359 reversals due to daylight savings, timezone or other date/time changes.
360 '';
361 };
362
363 frequent = mkOption {
364 default = 4;
365 type = types.int;
366 description = lib.mdDoc ''
367 Number of frequent (15-minute) auto-snapshots that you wish to keep.
368 '';
369 };
370
371 hourly = mkOption {
372 default = 24;
373 type = types.int;
374 description = lib.mdDoc ''
375 Number of hourly auto-snapshots that you wish to keep.
376 '';
377 };
378
379 daily = mkOption {
380 default = 7;
381 type = types.int;
382 description = lib.mdDoc ''
383 Number of daily auto-snapshots that you wish to keep.
384 '';
385 };
386
387 weekly = mkOption {
388 default = 4;
389 type = types.int;
390 description = lib.mdDoc ''
391 Number of weekly auto-snapshots that you wish to keep.
392 '';
393 };
394
395 monthly = mkOption {
396 default = 12;
397 type = types.int;
398 description = lib.mdDoc ''
399 Number of monthly auto-snapshots that you wish to keep.
400 '';
401 };
402 };
403
404 services.zfs.trim = {
405 enable = mkOption {
406 description = lib.mdDoc "Whether to enable periodic TRIM on all ZFS pools.";
407 default = true;
408 example = false;
409 type = types.bool;
410 };
411
412 interval = mkOption {
413 default = "weekly";
414 type = types.str;
415 example = "daily";
416 description = lib.mdDoc ''
417 How often we run trim. For most desktop and server systems
418 a sufficient trimming frequency is once a week.
419
420 The format is described in
421 {manpage}`systemd.time(7)`.
422 '';
423 };
424 };
425
426 services.zfs.autoScrub = {
427 enable = mkEnableOption (lib.mdDoc "periodic scrubbing of ZFS pools");
428
429 interval = mkOption {
430 default = "Sun, 02:00";
431 type = types.str;
432 example = "daily";
433 description = lib.mdDoc ''
434 Systemd calendar expression when to scrub ZFS pools. See
435 {manpage}`systemd.time(7)`.
436 '';
437 };
438
439 pools = mkOption {
440 default = [];
441 type = types.listOf types.str;
442 example = [ "tank" ];
443 description = lib.mdDoc ''
444 List of ZFS pools to periodically scrub. If empty, all pools
445 will be scrubbed.
446 '';
447 };
448 };
449
450 services.zfs.expandOnBoot = mkOption {
451 type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
452 default = "disabled";
453 example = [ "tank" "dozer" ];
454 description = lib.mdDoc ''
455 After importing, expand each device in the specified pools.
456
457 Set the value to the plain string "all" to expand all pools on boot:
458
459 services.zfs.expandOnBoot = "all";
460
461 or set the value to a list of pools to expand the disks of specific pools:
462
463 services.zfs.expandOnBoot = [ "tank" "dozer" ];
464 '';
465 };
466
467 services.zfs.zed = {
468 enableMail = mkEnableOption (lib.mdDoc "ZED's ability to send emails") // {
469 default = cfgZfs.package.enableMail;
470 defaultText = literalExpression "config.${optZfs.package}.enableMail";
471 };
472
473 settings = mkOption {
474 type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
475 example = literalExpression ''
476 {
477 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
478
479 ZED_EMAIL_ADDR = [ "root" ];
480 ZED_EMAIL_PROG = "mail";
481 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
482
483 ZED_NOTIFY_INTERVAL_SECS = 3600;
484 ZED_NOTIFY_VERBOSE = false;
485
486 ZED_USE_ENCLOSURE_LEDS = true;
487 ZED_SCRUB_AFTER_RESILVER = false;
488 }
489 '';
490 description = lib.mdDoc ''
491 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
492
493 See
494 {manpage}`zed(8)`
495 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
496 '';
497 };
498 };
499 };
500
501 ###### implementation
502
503 config = mkMerge [
504 (mkIf cfgZfs.enabled {
505 assertions = [
506 {
507 assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
508 message = ''
509 To allow ZED to send emails, ZFS needs to be configured to enable
510 this. To do so, one must override the `zfs` package and set
511 `enableMail` to true.
512 '';
513 }
514 {
515 assertion = config.networking.hostId != null;
516 message = "ZFS requires networking.hostId to be set";
517 }
518 {
519 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
520 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
521 }
522 {
523 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
524 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
525 }
526 ];
527
528 boot = {
529 kernelModules = [ "zfs" ];
530 # https://github.com/openzfs/zfs/issues/260
531 # https://github.com/openzfs/zfs/issues/12842
532 # https://github.com/NixOS/nixpkgs/issues/106093
533 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
534
535 extraModulePackages = [
536 (if config.boot.zfs.enableUnstable then
537 config.boot.kernelPackages.zfsUnstable
538 else
539 config.boot.kernelPackages.zfs)
540 ];
541 };
542
543 boot.initrd = mkIf inInitrd {
544 kernelModules = [ "zfs" ] ++ optional (!cfgZfs.enableUnstable) "spl";
545 extraUtilsCommands =
546 ''
547 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
548 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
549 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
550 '';
551 extraUtilsCommandsTest = mkIf inInitrd
552 ''
553 $out/bin/zfs --help >/dev/null 2>&1
554 $out/bin/zpool --help >/dev/null 2>&1
555 '';
556 postDeviceCommands = concatStringsSep "\n" ([''
557 ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
558 ''] ++ [(importLib {
559 # See comments at importLib definition.
560 zpoolCmd = "zpool";
561 awkCmd = "awk";
562 inherit cfgZfs;
563 })] ++ (map (pool: ''
564 echo -n "importing root ZFS pool \"${pool}\"..."
565 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
566 if ! poolImported "${pool}"; then
567 for trial in `seq 1 60`; do
568 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
569 sleep 1
570 echo -n .
571 done
572 echo
573 if [[ -n "$msg" ]]; then
574 echo "$msg";
575 fi
576 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
577 fi
578 ${if isBool cfgZfs.requestEncryptionCredentials
579 then optionalString cfgZfs.requestEncryptionCredentials ''
580 zfs load-key -a
581 ''
582 else concatMapStrings (fs: ''
583 zfs load-key -- ${escapeShellArg fs}
584 '') (filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
585 '') rootPools));
586
587 # Systemd in stage 1
588 systemd = {
589 packages = [cfgZfs.package];
590 services = listToAttrs (map (pool: createImportService {
591 inherit pool;
592 systemd = config.boot.initrd.systemd.package;
593 force = cfgZfs.forceImportRoot;
594 prefix = "/sysroot";
595 }) rootPools);
596 extraBin = {
597 # zpool and zfs are already in thanks to fsPackages
598 awk = "${pkgs.gawk}/bin/awk";
599 };
600 };
601 };
602
603 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
604 exec ${cfgZfs.package}/bin/zpool sync
605 '';
606 systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
607
608 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
609 boot.loader.grub = mkIf (inInitrd || inSystem) {
610 zfsSupport = true;
611 };
612
613 services.zfs.zed.settings = {
614 ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
615 PATH = lib.makeBinPath [
616 cfgZfs.package
617 pkgs.coreutils
618 pkgs.curl
619 pkgs.gawk
620 pkgs.gnugrep
621 pkgs.gnused
622 pkgs.nettools
623 pkgs.util-linux
624 ];
625 };
626
627 environment.etc = genAttrs
628 (map
629 (file: "zfs/zed.d/${file}")
630 [
631 "all-syslog.sh"
632 "pool_import-led.sh"
633 "resilver_finish-start-scrub.sh"
634 "statechange-led.sh"
635 "vdev_attach-led.sh"
636 "zed-functions.sh"
637 "data-notify.sh"
638 "resilver_finish-notify.sh"
639 "scrub_finish-notify.sh"
640 "statechange-notify.sh"
641 "vdev_clear-led.sh"
642 ]
643 )
644 (file: { source = "${cfgZfs.package}/etc/${file}"; })
645 // {
646 "zfs/zed.d/zed.rc".text = zedConf;
647 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
648 };
649
650 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
651 environment.systemPackages = [ cfgZfs.package ]
652 ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
653
654 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
655 systemd.packages = [ cfgZfs.package ];
656
657 systemd.services = let
658 createImportService' = pool: createImportService {
659 inherit pool;
660 systemd = config.systemd.package;
661 force = cfgZfs.forceImportAll;
662 };
663
664 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
665 # to sync=disabled.
666 createSyncService = pool:
667 nameValuePair "zfs-sync-${pool}" {
668 description = "Sync ZFS pool \"${pool}\"";
669 wantedBy = [ "shutdown.target" ];
670 unitConfig = {
671 DefaultDependencies = false;
672 };
673 serviceConfig = {
674 Type = "oneshot";
675 RemainAfterExit = true;
676 };
677 script = ''
678 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
679 '';
680 };
681
682 createZfsService = serv:
683 nameValuePair serv {
684 after = [ "systemd-modules-load.service" ];
685 wantedBy = [ "zfs.target" ];
686 };
687
688 in listToAttrs (map createImportService' dataPools ++
689 map createSyncService allPools ++
690 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
691
692 systemd.targets.zfs-import =
693 let
694 services = map (pool: "zfs-import-${pool}.service") dataPools;
695 in
696 {
697 requires = services;
698 after = services;
699 wantedBy = [ "zfs.target" ];
700 };
701
702 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
703 })
704
705 (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
706 systemd.services."zpool-expand@" = {
707 description = "Expand ZFS pools";
708 after = [ "zfs.target" ];
709
710 serviceConfig = {
711 Type = "oneshot";
712 RemainAfterExit = true;
713 };
714
715 scriptArgs = "%i";
716 path = [ cfgZfs.package ];
717
718 script = ''
719 pool=$1
720
721 echo "Expanding all devices for $pool."
722
723 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
724 '';
725 };
726
727 systemd.services."zpool-expand-pools" =
728 let
729 # Create a string, to be interpolated in a bash script
730 # which enumerates all of the pools to expand.
731 # If the `pools` option is `true`, we want to dynamically
732 # expand every pool. Otherwise we want to enumerate
733 # just the specifically provided list of pools.
734 poolListProvider = if cfgExpandOnBoot == "all"
735 then "$(zpool list -H -o name)"
736 else lib.escapeShellArgs cfgExpandOnBoot;
737 in
738 {
739 description = "Expand specified ZFS pools";
740 wantedBy = [ "default.target" ];
741 after = [ "zfs.target" ];
742
743 serviceConfig = {
744 Type = "oneshot";
745 RemainAfterExit = true;
746 };
747
748 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
749
750 script = ''
751 for pool in ${poolListProvider}; do
752 systemctl start --no-block "zpool-expand@$pool"
753 done
754 '';
755 };
756 })
757
758 (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
759 systemd.services = let
760 descr = name: if name == "frequent" then "15 mins"
761 else if name == "hourly" then "hour"
762 else if name == "daily" then "day"
763 else if name == "weekly" then "week"
764 else if name == "monthly" then "month"
765 else throw "unknown snapshot name";
766 numSnapshots = name: builtins.getAttr name cfgSnapshots;
767 in builtins.listToAttrs (map (snapName:
768 {
769 name = "zfs-snapshot-${snapName}";
770 value = {
771 description = "ZFS auto-snapshotting every ${descr snapName}";
772 after = [ "zfs-import.target" ];
773 serviceConfig = {
774 Type = "oneshot";
775 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
776 };
777 restartIfChanged = false;
778 };
779 }) snapshotNames);
780
781 systemd.timers = let
782 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
783 in builtins.listToAttrs (map (snapName:
784 {
785 name = "zfs-snapshot-${snapName}";
786 value = {
787 wantedBy = [ "timers.target" ];
788 timerConfig = {
789 OnCalendar = timer snapName;
790 Persistent = "yes";
791 };
792 };
793 }) snapshotNames);
794 })
795
796 (mkIf (cfgZfs.enabled && cfgScrub.enable) {
797 systemd.services.zfs-scrub = {
798 description = "ZFS pools scrubbing";
799 after = [ "zfs-import.target" ];
800 serviceConfig = {
801 Type = "simple";
802 };
803 script = ''
804 ${cfgZfs.package}/bin/zpool scrub -w ${
805 if cfgScrub.pools != [] then
806 (concatStringsSep " " cfgScrub.pools)
807 else
808 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
809 }
810 '';
811 };
812
813 systemd.timers.zfs-scrub = {
814 wantedBy = [ "timers.target" ];
815 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
816 timerConfig = {
817 OnCalendar = cfgScrub.interval;
818 Persistent = "yes";
819 };
820 };
821 })
822
823 (mkIf (cfgZfs.enabled && cfgTrim.enable) {
824 systemd.services.zpool-trim = {
825 description = "ZFS pools trim";
826 after = [ "zfs-import.target" ];
827 path = [ cfgZfs.package ];
828 startAt = cfgTrim.interval;
829 # By default we ignore errors returned by the trim command, in case:
830 # - HDDs are mixed with SSDs
831 # - There is a SSDs in a pool that is currently trimmed.
832 # - There are only HDDs and we would set the system in a degraded state
833 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
834 };
835
836 systemd.timers.zpool-trim.timerConfig.Persistent = "yes";
837 })
838 ];
839}