1{
2 config,
3 lib,
4 options,
5 pkgs,
6 utils,
7 ...
8}:
9#
10# TODO: zfs tunables
11
12let
13
14 cfgZfs = config.boot.zfs;
15 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
16 cfgSnapshots = config.services.zfs.autoSnapshot;
17 cfgSnapFlags = cfgSnapshots.flags;
18 cfgScrub = config.services.zfs.autoScrub;
19 cfgTrim = config.services.zfs.trim;
20 cfgZED = config.services.zfs.zed;
21
22 selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
23 clevisDatasets = lib.attrNames (
24 lib.filterAttrs (
25 device: _:
26 lib.any (
27 e:
28 e.fsType == "zfs"
29 && (utils.fsNeededForBoot e)
30 && (e.device == device || lib.hasPrefix "${device}/" e.device)
31 ) config.system.build.fileSystems
32 ) config.boot.initrd.clevis.devices
33 );
34
35 inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
36 inSystem = config.boot.supportedFilesystems.zfs or false;
37
38 autosnapPkg = pkgs.zfstools.override {
39 zfs = cfgZfs.package;
40 };
41
42 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
43
44 datasetToPool = x: lib.elemAt (lib.splitString "/" x) 0;
45
46 fsToPool = fs: datasetToPool fs.device;
47
48 zfsFilesystems = lib.filter (x: x.fsType == "zfs") config.system.build.fileSystems;
49
50 allPools = lib.unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
51
52 rootPools = lib.unique (map fsToPool (lib.filter utils.fsNeededForBoot zfsFilesystems));
53
54 dataPools = lib.unique (lib.filter (pool: !(lib.elem pool rootPools)) allPools);
55
56 snapshotNames = [
57 "frequent"
58 "hourly"
59 "daily"
60 "weekly"
61 "monthly"
62 ];
63
64 # When importing ZFS pools, there's one difficulty: These scripts may run
65 # before the backing devices (physical HDDs, etc.) of the pool have been
66 # scanned and initialized.
67 #
68 # An attempted import with all devices missing will just fail, and can be
69 # retried, but an import where e.g. two out of three disks in a three-way
70 # mirror are missing, will succeed. This is a problem: When the missing disks
71 # are later discovered, they won't be automatically set online, rendering the
72 # pool redundancy-less (and far slower) until such time as the system reboots.
73 #
74 # The solution is the below. poolReady checks the status of an un-imported
75 # pool, to see if *every* device is available -- in which case the pool will be
76 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
77 #
78 # The import scripts then loop over this, waiting until the pool is ready or a
79 # sufficient amount of time has passed that we can assume it won't be. In the
80 # latter case it makes one last attempt at importing, allowing the system to
81 # (eventually) boot even with a degraded pool.
82 importLib =
83 {
84 zpoolCmd,
85 awkCmd,
86 pool,
87 }:
88 let
89 devNodes =
90 if pool != null && cfgZfs.pools ? ${pool} then cfgZfs.pools.${pool}.devNodes else cfgZfs.devNodes;
91 in
92 ''
93 # shellcheck disable=SC2013
94 for o in $(cat /proc/cmdline); do
95 case $o in
96 zfs_force|zfs_force=1|zfs_force=y)
97 ZFS_FORCE="-f"
98 ;;
99 esac
100 done
101 poolReady() {
102 pool="$1"
103 state="$("${zpoolCmd}" import -d "${devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
104 if [[ "$state" = "ONLINE" ]]; then
105 return 0
106 else
107 echo "Pool $pool in state $state, waiting"
108 return 1
109 fi
110 }
111 poolImported() {
112 pool="$1"
113 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
114 }
115 poolImport() {
116 pool="$1"
117 # shellcheck disable=SC2086
118 "${zpoolCmd}" import -d "${devNodes}" -N $ZFS_FORCE "$pool"
119 }
120 '';
121
122 getPoolFilesystems =
123 pool: lib.filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
124
125 getPoolMounts =
126 prefix: pool:
127 let
128 poolFSes = getPoolFilesystems pool;
129
130 # Remove the "/" suffix because even though most mountpoints
131 # won't have it, the "/" mountpoint will, and we can't have the
132 # trailing slash in "/sysroot/" in stage 1.
133 mountPoint = fs: utils.escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
134
135 hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
136 in
137 map (x: "${mountPoint x}.mount") poolFSes ++ lib.optional hasUsr "sysusr-usr.mount";
138
139 getKeyLocations =
140 pool:
141 if lib.isBool cfgZfs.requestEncryptionCredentials then
142 {
143 hasKeys = cfgZfs.requestEncryptionCredentials;
144 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
145 }
146 else
147 let
148 keys = lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
149 in
150 {
151 hasKeys = keys != [ ];
152 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
153 };
154
155 createImportService =
156 {
157 pool,
158 systemd,
159 force,
160 prefix ? "",
161 }:
162 lib.nameValuePair "zfs-import-${pool}" {
163 description = "Import ZFS pool \"${pool}\"";
164 # We wait for systemd-udev-settle to ensure devices are available,
165 # but don't *require* it, because mounts shouldn't be killed if it's stopped.
166 # In the future, hopefully someone will complete this:
167 # https://github.com/zfsonlinux/zfs/pull/4943
168 wants = [
169 "systemd-udev-settle.service"
170 ]
171 ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
172 after = [
173 "systemd-udev-settle.service"
174 "systemd-modules-load.service"
175 "systemd-ask-password-console.service"
176 ]
177 ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
178 requiredBy =
179 let
180 poolFilesystems = getPoolFilesystems pool;
181 noauto = poolFilesystems != [ ] && lib.all (fs: lib.elem "noauto" fs.options) poolFilesystems;
182 in
183 getPoolMounts prefix pool ++ lib.optional (!noauto) "zfs-import.target";
184 before = getPoolMounts prefix pool ++ [
185 "shutdown.target"
186 "zfs-import.target"
187 ];
188 conflicts = [ "shutdown.target" ];
189 unitConfig = {
190 DefaultDependencies = "no";
191 };
192 serviceConfig = {
193 Type = "oneshot";
194 RemainAfterExit = true;
195 };
196 environment.ZFS_FORCE = lib.optionalString force "-f";
197 script =
198 let
199 keyLocations = getKeyLocations pool;
200 in
201 (importLib {
202 # See comments at importLib definition.
203 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
204 awkCmd = "${pkgs.gawk}/bin/awk";
205 inherit pool;
206 })
207 + ''
208 if ! poolImported "${pool}"; then
209 echo -n "importing ZFS pool \"${pool}\"..."
210 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
211 for _ in $(seq 1 60); do
212 poolReady "${pool}" && poolImport "${pool}" && break
213 sleep 1
214 done
215 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
216 fi
217 if poolImported "${pool}"; then
218 ${lib.optionalString config.boot.initrd.clevis.enable (
219 lib.concatMapStringsSep "\n" (
220 elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true "
221 ) (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets)
222 )}
223
224
225 ${lib.optionalString keyLocations.hasKeys ''
226 ${keyLocations.command} | while IFS=$'\t' read -r ds kl ks; do
227 {
228 if [[ "$ks" != unavailable ]]; then
229 continue
230 fi
231 case "$kl" in
232 none )
233 ;;
234 prompt )
235 tries=3
236 success=false
237 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
238 ${systemd}/bin/systemd-ask-password ${lib.optionalString cfgZfs.useKeyringForCredentials ("--keyname=zfs-$ds")} --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
239 && success=true \
240 || tries=$((tries - 1))
241 done
242 [[ $success = true ]]
243 ;;
244 * )
245 ${cfgZfs.package}/sbin/zfs load-key "$ds"
246 ;;
247 esac
248 } < /dev/null # To protect while read ds kl in case anything reads stdin
249 done
250 ''}
251 echo "Successfully imported ${pool}"
252 else
253 exit 1
254 fi
255 '';
256 };
257
258 zedConf = lib.generators.toKeyValue {
259 mkKeyValue = lib.generators.mkKeyValueDefault {
260 mkValueString =
261 v:
262 if lib.isInt v then
263 toString v
264 else if lib.isString v then
265 "\"${v}\""
266 else if true == v then
267 "1"
268 else if false == v then
269 "0"
270 else if lib.isList v then
271 "\"" + (lib.concatStringsSep " " v) + "\""
272 else
273 lib.err "this value is" (toString v);
274 } "=";
275 } cfgZED.settings;
276in
277
278{
279
280 imports = [
281 (lib.mkRemovedOptionModule [
282 "boot"
283 "zfs"
284 "enableLegacyCrypto"
285 ] "The corresponding package was removed from nixpkgs.")
286 (lib.mkRemovedOptionModule [
287 "boot"
288 "zfs"
289 "enableUnstable"
290 ] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
291 ];
292
293 ###### interface
294
295 options = {
296 boot.zfs = {
297 package = lib.mkOption {
298 type = lib.types.package;
299 default = pkgs.zfs;
300 defaultText = lib.literalExpression "pkgs.zfs";
301 description = "Configured ZFS userland tools package.";
302 };
303
304 modulePackage = lib.mkOption {
305 internal = true; # It is supposed to be selected automatically, but can be overridden by expert users.
306 default = selectModulePackage cfgZfs.package;
307 type = lib.types.package;
308 description = "Configured ZFS kernel module package.";
309 };
310
311 enabled = lib.mkOption {
312 readOnly = true;
313 type = lib.types.bool;
314 default = inInitrd || inSystem;
315 defaultText = lib.literalMD "`true` if ZFS filesystem support is enabled";
316 description = "True if ZFS filesystem support is enabled";
317 };
318
319 allowHibernation = lib.mkOption {
320 type = lib.types.bool;
321 default = false;
322 description = ''
323 Allow hibernation support, this may be a unsafe option depending on your
324 setup. Make sure to NOT use Swap on ZFS.
325 '';
326 };
327
328 extraPools = lib.mkOption {
329 type = lib.types.listOf lib.types.str;
330 default = [ ];
331 example = [
332 "tank"
333 "data"
334 ];
335 description = ''
336 Name or GUID of extra ZFS pools that you wish to import during boot.
337
338 Usually this is not necessary. Instead, you should set the mountpoint property
339 of ZFS filesystems to `legacy` and add the ZFS filesystems to
340 NixOS's {option}`fileSystems` option, which makes NixOS automatically
341 import the associated pool.
342
343 However, in some cases (e.g. if you have many filesystems) it may be preferable
344 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
345 will not be managing those filesystems, you will need to specify the ZFS pool here
346 so that NixOS automatically imports it on every boot.
347 '';
348 };
349
350 devNodes = lib.mkOption {
351 type = lib.types.path;
352 default = "/dev/disk/by-id";
353 description = ''
354 Name of directory from which to import ZFS device, this is passed to `zpool import`
355 as the value of the `-d` option.
356
357 For guidance on choosing this value, see
358 [the ZFS documentation](https://openzfs.github.io/openzfs-docs/Project%20and%20Community/FAQ.html#selecting-dev-names-when-creating-a-pool-linux).
359 '';
360 };
361
362 forceImportRoot = lib.mkOption {
363 type = lib.types.bool;
364 default = true;
365 description = ''
366 Forcibly import the ZFS root pool(s) during early boot.
367
368 This is enabled by default for backwards compatibility purposes, but it is highly
369 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
370 to protect your ZFS pools.
371
372 If you set this option to `false` and NixOS subsequently fails to
373 boot because it cannot import the root pool, you should boot with the
374 `zfs_force=1` option as a kernel parameter (e.g. by manually
375 editing the kernel params in grub during boot). You should only need to do this
376 once.
377 '';
378 };
379
380 forceImportAll = lib.mkOption {
381 type = lib.types.bool;
382 default = false;
383 description = ''
384 Forcibly import all ZFS pool(s).
385
386 If you set this option to `false` and NixOS subsequently fails to
387 import your non-root ZFS pool(s), you should manually import each pool with
388 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
389 this once.
390 '';
391 };
392
393 requestEncryptionCredentials = lib.mkOption {
394 type = lib.types.either lib.types.bool (lib.types.listOf lib.types.str);
395 default = true;
396 example = [
397 "tank"
398 "data"
399 ];
400 description = ''
401 If true on import encryption keys or passwords for all encrypted datasets
402 are requested. To only decrypt selected datasets supply a list of dataset
403 names instead. For root pools the encryption key can be supplied via both
404 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
405 '';
406 };
407
408 useKeyringForCredentials = lib.mkEnableOption "Uses the kernel keyring for encryption credentials with keyname=zfs-<poolname>";
409
410 passwordTimeout = lib.mkOption {
411 type = lib.types.int;
412 default = 0;
413 description = ''
414 Timeout in seconds to wait for password entry for decrypt at boot.
415
416 Defaults to 0, which waits forever.
417 '';
418 };
419
420 pools = lib.mkOption {
421 type = lib.types.attrsOf (
422 lib.types.submodule {
423 options = {
424 devNodes = lib.mkOption {
425 type = lib.types.path;
426 default = cfgZfs.devNodes;
427 defaultText = "config.boot.zfs.devNodes";
428 description = options.boot.zfs.devNodes.description;
429 };
430 };
431 }
432 );
433 default = { };
434 description = ''
435 Configuration for individual pools to override global defaults.
436 '';
437 };
438
439 removeLinuxDRM = lib.mkOption {
440 type = lib.types.bool;
441 default = false;
442 description = ''
443 Patch the kernel to change symbols needed by ZFS from
444 EXPORT_SYMBOL_GPL to EXPORT_SYMBOL.
445
446 Currently has no effect, but may again in future if a kernel
447 update breaks ZFS due to symbols being newly changed to GPL.
448 '';
449 };
450 };
451
452 services.zfs.autoSnapshot = {
453 enable = lib.mkOption {
454 default = false;
455 type = lib.types.bool;
456 description = ''
457 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
458 Note that you must set the `com.sun:auto-snapshot`
459 property to `true` on all datasets which you wish
460 to auto-snapshot.
461
462 You can override a child dataset to use, or not use auto-snapshotting
463 by setting its flag with the given interval:
464 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
465 '';
466 };
467
468 flags = lib.mkOption {
469 default = "-k -p";
470 example = "-k -p --utc";
471 type = lib.types.str;
472 description = ''
473 Flags to pass to the zfs-auto-snapshot command.
474
475 Run `zfs-auto-snapshot` (without any arguments) to
476 see available flags.
477
478 If it's not too inconvenient for snapshots to have timestamps in UTC,
479 it is suggested that you append `--utc` to the list
480 of default options (see example).
481
482 Otherwise, snapshot names can cause name conflicts or apparent time
483 reversals due to daylight savings, timezone or other date/time changes.
484 '';
485 };
486
487 frequent = lib.mkOption {
488 default = 4;
489 type = lib.types.int;
490 description = ''
491 Number of frequent (15-minute) auto-snapshots that you wish to keep.
492 '';
493 };
494
495 hourly = lib.mkOption {
496 default = 24;
497 type = lib.types.int;
498 description = ''
499 Number of hourly auto-snapshots that you wish to keep.
500 '';
501 };
502
503 daily = lib.mkOption {
504 default = 7;
505 type = lib.types.int;
506 description = ''
507 Number of daily auto-snapshots that you wish to keep.
508 '';
509 };
510
511 weekly = lib.mkOption {
512 default = 4;
513 type = lib.types.int;
514 description = ''
515 Number of weekly auto-snapshots that you wish to keep.
516 '';
517 };
518
519 monthly = lib.mkOption {
520 default = 12;
521 type = lib.types.int;
522 description = ''
523 Number of monthly auto-snapshots that you wish to keep.
524 '';
525 };
526 };
527
528 services.zfs.trim = {
529 enable = lib.mkOption {
530 description = "Whether to enable periodic TRIM on all ZFS pools.";
531 default = true;
532 example = false;
533 type = lib.types.bool;
534 };
535
536 interval = lib.mkOption {
537 default = "weekly";
538 type = lib.types.str;
539 example = "daily";
540 description = ''
541 How often we run trim. For most desktop and server systems
542 a sufficient trimming frequency is once a week.
543
544 The format is described in
545 {manpage}`systemd.time(7)`.
546 '';
547 };
548
549 randomizedDelaySec = lib.mkOption {
550 default = "6h";
551 type = lib.types.str;
552 example = "12h";
553 description = ''
554 Add a randomized delay before each ZFS trim.
555 The delay will be chosen between zero and this value.
556 This value must be a time span in the format specified by
557 {manpage}`systemd.time(7)`
558 '';
559 };
560 };
561
562 services.zfs.autoScrub = {
563 enable = lib.mkEnableOption "periodic scrubbing of ZFS pools";
564
565 interval = lib.mkOption {
566 default = "monthly";
567 type = lib.types.str;
568 example = "quarterly";
569 description = ''
570 Systemd calendar expression when to scrub ZFS pools. See
571 {manpage}`systemd.time(7)`.
572 '';
573 };
574
575 randomizedDelaySec = lib.mkOption {
576 default = "6h";
577 type = lib.types.str;
578 example = "12h";
579 description = ''
580 Add a randomized delay before each ZFS autoscrub.
581 The delay will be chosen between zero and this value.
582 This value must be a time span in the format specified by
583 {manpage}`systemd.time(7)`
584 '';
585 };
586
587 pools = lib.mkOption {
588 default = [ ];
589 type = lib.types.listOf lib.types.str;
590 example = [ "tank" ];
591 description = ''
592 List of ZFS pools to periodically scrub. If empty, all pools
593 will be scrubbed.
594 '';
595 };
596 };
597
598 services.zfs.expandOnBoot = lib.mkOption {
599 type = lib.types.either (lib.types.enum [
600 "disabled"
601 "all"
602 ]) (lib.types.listOf lib.types.str);
603 default = "disabled";
604 example = [
605 "tank"
606 "dozer"
607 ];
608 description = ''
609 After importing, expand each device in the specified pools.
610
611 Set the value to the plain string "all" to expand all pools on boot:
612
613 services.zfs.expandOnBoot = "all";
614
615 or set the value to a list of pools to expand the disks of specific pools:
616
617 services.zfs.expandOnBoot = [ "tank" "dozer" ];
618 '';
619 };
620
621 services.zfs.zed = {
622 enableMail = lib.mkOption {
623 type = lib.types.bool;
624 default = config.services.mail.sendmailSetuidWrapper != null;
625 defaultText = lib.literalExpression ''
626 config.services.mail.sendmailSetuidWrapper != null
627 '';
628 description = ''
629 Whether to enable ZED's ability to send emails.
630 '';
631 };
632
633 settings = lib.mkOption {
634 type =
635 let
636 t = lib.types;
637 in
638 t.attrsOf (
639 t.oneOf [
640 t.str
641 t.int
642 t.bool
643 (t.listOf t.str)
644 ]
645 );
646 example = lib.literalExpression ''
647 {
648 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
649
650 ZED_EMAIL_ADDR = [ "root" ];
651 ZED_EMAIL_PROG = "mail";
652 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
653
654 ZED_NOTIFY_INTERVAL_SECS = 3600;
655 ZED_NOTIFY_VERBOSE = false;
656
657 ZED_USE_ENCLOSURE_LEDS = true;
658 ZED_SCRUB_AFTER_RESILVER = false;
659 }
660 '';
661 description = ''
662 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
663
664 See
665 {manpage}`zed(8)`
666 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
667 '';
668 };
669 };
670 };
671
672 ###### implementation
673
674 config = lib.mkMerge [
675 (lib.mkIf cfgZfs.enabled {
676 assertions = [
677 {
678 assertion = cfgZfs.modulePackage.version == cfgZfs.package.version;
679 message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
680 }
681 {
682 assertion = config.networking.hostId != null;
683 message = "ZFS requires networking.hostId to be set";
684 }
685 {
686 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
687 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
688 }
689 {
690 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
691 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
692 }
693 {
694 assertion = !(lib.elem "" allPools);
695 message = ''
696 Automatic pool detection found an empty pool name, which can't be used.
697 Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
698 should be a zfs dataset name, like `device = "pool/data/set"`.
699 This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
700 '';
701 }
702 ];
703
704 boot = {
705 kernelModules = [ "zfs" ];
706 # https://github.com/openzfs/zfs/issues/260
707 # https://github.com/openzfs/zfs/issues/12842
708 # https://github.com/NixOS/nixpkgs/issues/106093
709 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
710
711 extraModulePackages = [
712 cfgZfs.modulePackage
713 ];
714 };
715
716 boot.initrd = lib.mkIf inInitrd {
717 kernelModules = [ "zfs" ];
718 extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
719 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
720 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
721 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
722 copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
723 copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
724 '';
725 extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
726 $out/bin/zfs --help >/dev/null 2>&1
727 $out/bin/zpool --help >/dev/null 2>&1
728 '';
729 postResumeCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (
730 lib.concatStringsSep "\n" (
731 [
732 ''
733 ZFS_FORCE="${lib.optionalString cfgZfs.forceImportRoot "-f"}"
734 ''
735 ]
736 ++ [
737 (importLib {
738 # See comments at importLib definition.
739 zpoolCmd = "zpool";
740 awkCmd = "awk";
741 pool = null;
742 })
743 ]
744 ++ (map (pool: ''
745 echo -n "importing root ZFS pool \"${pool}\"..."
746 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
747 if ! poolImported "${pool}"; then
748 for _ in $(seq 1 60); do
749 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
750 sleep 1
751 echo -n .
752 done
753 echo
754 if [[ -n "$msg" ]]; then
755 echo "$msg";
756 fi
757 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
758 fi
759
760 ${lib.optionalString config.boot.initrd.clevis.enable (
761 lib.concatMapStringsSep "\n" (
762 elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}"
763 ) (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets)
764 )}
765
766 ${
767 if lib.isBool cfgZfs.requestEncryptionCredentials then
768 lib.optionalString cfgZfs.requestEncryptionCredentials ''
769 zfs load-key -a
770 ''
771 else
772 lib.concatMapStrings (fs: ''
773 zfs load-key -- ${lib.escapeShellArg fs}
774 '') (lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)
775 }
776 '') rootPools)
777 )
778 );
779
780 # Systemd in stage 1
781 systemd = lib.mkIf config.boot.initrd.systemd.enable {
782 packages = [ cfgZfs.package ];
783 services = lib.listToAttrs (
784 map (
785 pool:
786 createImportService {
787 inherit pool;
788 systemd = config.boot.initrd.systemd.package;
789 force = cfgZfs.forceImportRoot;
790 prefix = "/sysroot";
791 }
792 ) rootPools
793 );
794 targets.zfs-import.wantedBy = [ "zfs.target" ];
795 targets.zfs.wantedBy = [ "initrd.target" ];
796 extraBin = {
797 zpool = "${cfgZfs.package}/sbin/zpool";
798 zfs = "${cfgZfs.package}/sbin/zfs";
799 awk = "${pkgs.gawk}/bin/awk";
800 };
801 storePaths = [
802 "${cfgZfs.package}/lib/udev/vdev_id"
803 "${cfgZfs.package}/lib/udev/zvol_id"
804 ];
805 };
806 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, in stage 1
807 };
808
809 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source =
810 pkgs.writeShellScript "zpool-sync-shutdown" ''
811 exec ${cfgZfs.package}/bin/zpool sync
812 '';
813 systemd.shutdownRamfs.storePaths = [ "${cfgZfs.package}/bin/zpool" ];
814
815 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
816 boot.loader.grub = lib.mkIf (inInitrd || inSystem) {
817 zfsSupport = true;
818 zfsPackage = cfgZfs.package;
819 };
820
821 services.zfs.zed.settings = {
822 ZED_EMAIL_PROG = lib.mkIf cfgZED.enableMail (
823 lib.mkDefault (
824 config.security.wrapperDir + "/" + config.services.mail.sendmailSetuidWrapper.program
825 )
826 );
827 # subject in header for sendmail
828 ZED_EMAIL_OPTS = lib.mkIf cfgZED.enableMail (lib.mkDefault "@ADDRESS@");
829
830 PATH = lib.makeBinPath [
831 cfgZfs.package
832 pkgs.coreutils
833 pkgs.curl
834 pkgs.gawk
835 pkgs.gnugrep
836 pkgs.gnused
837 pkgs.hostname-debian
838 pkgs.util-linux
839 ];
840 };
841
842 # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
843 services.udev.extraRules = ''
844 ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
845 '';
846
847 environment.etc =
848 lib.genAttrs
849 (map (file: "zfs/zed.d/${file}") [
850 "all-syslog.sh"
851 "pool_import-led.sh"
852 "resilver_finish-start-scrub.sh"
853 "statechange-led.sh"
854 "vdev_attach-led.sh"
855 "zed-functions.sh"
856 "data-notify.sh"
857 "resilver_finish-notify.sh"
858 "scrub_finish-notify.sh"
859 "statechange-notify.sh"
860 "vdev_clear-led.sh"
861 ])
862 (file: {
863 source = "${cfgZfs.package}/etc/${file}";
864 })
865 // {
866 "zfs/zed.d/zed.rc".text = zedConf;
867 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
868 };
869
870 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
871 environment.systemPackages = [ cfgZfs.package ] ++ lib.optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
872
873 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
874 systemd.packages = [ cfgZfs.package ];
875
876 systemd.services =
877 let
878 createImportService' =
879 pool:
880 createImportService {
881 inherit pool;
882 systemd = config.systemd.package;
883 force = cfgZfs.forceImportAll;
884 };
885
886 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
887 # to sync=disabled.
888 createSyncService =
889 pool:
890 lib.nameValuePair "zfs-sync-${pool}" {
891 description = "Sync ZFS pool \"${pool}\"";
892 wantedBy = [ "shutdown.target" ];
893 before = [ "final.target" ];
894 unitConfig = {
895 DefaultDependencies = false;
896 };
897 serviceConfig = {
898 Type = "oneshot";
899 RemainAfterExit = true;
900 };
901 script = ''
902 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
903 '';
904 };
905
906 createZfsService =
907 serv:
908 lib.nameValuePair serv {
909 after = [ "systemd-modules-load.service" ];
910 wantedBy = [ "zfs.target" ];
911 };
912
913 in
914 lib.listToAttrs (
915 map createImportService' dataPools
916 ++ map createSyncService allPools
917 ++ map createZfsService [
918 "zfs-mount"
919 "zfs-share"
920 "zfs-zed"
921 ]
922 );
923
924 systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
925
926 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
927 })
928
929 (lib.mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
930 systemd.services."zpool-expand@" = {
931 description = "Expand ZFS pools";
932 after = [ "zfs.target" ];
933
934 serviceConfig = {
935 Type = "oneshot";
936 RemainAfterExit = true;
937 };
938
939 scriptArgs = "%i";
940 path = [ cfgZfs.package ];
941
942 script = ''
943 pool=$1
944
945 echo "Expanding all devices for $pool."
946
947 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
948 '';
949 };
950
951 systemd.services."zpool-expand-pools" =
952 let
953 # Create a string, to be interpolated in a bash script
954 # which enumerates all of the pools to expand.
955 # If the `pools` option is `true`, we want to dynamically
956 # expand every pool. Otherwise we want to enumerate
957 # just the specifically provided list of pools.
958 poolListProvider =
959 if cfgExpandOnBoot == "all" then
960 "$(zpool list -H -o name)"
961 else
962 lib.escapeShellArgs cfgExpandOnBoot;
963 in
964 {
965 description = "Expand specified ZFS pools";
966 wantedBy = [ "default.target" ];
967 after = [ "zfs.target" ];
968
969 serviceConfig = {
970 Type = "oneshot";
971 RemainAfterExit = true;
972 };
973
974 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
975
976 script = ''
977 for pool in ${poolListProvider}; do
978 systemctl start --no-block "zpool-expand@$pool"
979 done
980 '';
981 };
982 })
983
984 (lib.mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
985 systemd.services =
986 let
987 descr =
988 name:
989 if name == "frequent" then
990 "15 mins"
991 else if name == "hourly" then
992 "hour"
993 else if name == "daily" then
994 "day"
995 else if name == "weekly" then
996 "week"
997 else if name == "monthly" then
998 "month"
999 else
1000 throw "unknown snapshot name";
1001 numSnapshots = name: builtins.getAttr name cfgSnapshots;
1002 in
1003 builtins.listToAttrs (
1004 map (snapName: {
1005 name = "zfs-snapshot-${snapName}";
1006 value = {
1007 description = "ZFS auto-snapshotting every ${descr snapName}";
1008 after = [ "zfs-import.target" ];
1009 serviceConfig = {
1010 Type = "oneshot";
1011 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
1012 };
1013 restartIfChanged = false;
1014 };
1015 }) snapshotNames
1016 );
1017
1018 systemd.timers =
1019 let
1020 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
1021 in
1022 builtins.listToAttrs (
1023 map (snapName: {
1024 name = "zfs-snapshot-${snapName}";
1025 value = {
1026 wantedBy = [ "timers.target" ];
1027 timerConfig = {
1028 OnCalendar = timer snapName;
1029 Persistent = lib.mkDefault "yes";
1030 };
1031 };
1032 }) snapshotNames
1033 );
1034 })
1035
1036 (lib.mkIf (cfgZfs.enabled && cfgScrub.enable) {
1037 systemd.services.zfs-scrub = {
1038 description = "ZFS pools scrubbing";
1039 after = [ "zfs-import.target" ];
1040 serviceConfig = {
1041 Type = "simple";
1042 IOSchedulingClass = "idle";
1043 };
1044 script = ''
1045 # shellcheck disable=SC2046
1046 ${cfgZfs.package}/bin/zpool scrub -w ${
1047 if cfgScrub.pools != [ ] then
1048 (lib.concatStringsSep " " cfgScrub.pools)
1049 else
1050 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
1051 }
1052 '';
1053 };
1054
1055 systemd.timers.zfs-scrub = {
1056 wantedBy = [ "timers.target" ];
1057 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
1058 timerConfig = {
1059 OnCalendar = cfgScrub.interval;
1060 Persistent = lib.mkDefault "yes";
1061 RandomizedDelaySec = cfgScrub.randomizedDelaySec;
1062 };
1063 };
1064 })
1065
1066 (lib.mkIf (cfgZfs.enabled && cfgTrim.enable) {
1067 systemd.services.zpool-trim = {
1068 description = "ZFS pools trim";
1069 after = [ "zfs-import.target" ];
1070 path = [ cfgZfs.package ];
1071 startAt = cfgTrim.interval;
1072 # By default we ignore errors returned by the trim command, in case:
1073 # - HDDs are mixed with SSDs
1074 # - There is a SSDs in a pool that is currently trimmed.
1075 # - There are only HDDs and we would set the system in a degraded state
1076 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
1077 };
1078
1079 systemd.timers.zpool-trim.timerConfig = {
1080 Persistent = lib.mkDefault "yes";
1081 RandomizedDelaySec = cfgTrim.randomizedDelaySec;
1082 };
1083 })
1084 ];
1085}