1{
2 config,
3 lib,
4 options,
5 pkgs,
6 utils,
7 ...
8}:
9#
10# TODO: zfs tunables
11
12let
13
14 cfgZfs = config.boot.zfs;
15 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
16 cfgSnapshots = config.services.zfs.autoSnapshot;
17 cfgSnapFlags = cfgSnapshots.flags;
18 cfgScrub = config.services.zfs.autoScrub;
19 cfgTrim = config.services.zfs.trim;
20 cfgZED = config.services.zfs.zed;
21
22 selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
23 clevisDatasets = lib.attrNames (
24 lib.filterAttrs (
25 device: _:
26 lib.any (
27 e:
28 e.fsType == "zfs"
29 && (utils.fsNeededForBoot e)
30 && (e.device == device || lib.hasPrefix "${device}/" e.device)
31 ) config.system.build.fileSystems
32 ) config.boot.initrd.clevis.devices
33 );
34
35 inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
36 inSystem = config.boot.supportedFilesystems.zfs or false;
37
38 autosnapPkg = pkgs.zfstools.override {
39 zfs = cfgZfs.package;
40 };
41
42 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
43
44 datasetToPool = x: lib.elemAt (lib.splitString "/" x) 0;
45
46 fsToPool = fs: datasetToPool fs.device;
47
48 zfsFilesystems = lib.filter (x: x.fsType == "zfs") config.system.build.fileSystems;
49
50 allPools = lib.unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
51
52 rootPools = lib.unique (map fsToPool (lib.filter utils.fsNeededForBoot zfsFilesystems));
53
54 dataPools = lib.unique (lib.filter (pool: !(lib.elem pool rootPools)) allPools);
55
56 snapshotNames = [
57 "frequent"
58 "hourly"
59 "daily"
60 "weekly"
61 "monthly"
62 ];
63
64 # When importing ZFS pools, there's one difficulty: These scripts may run
65 # before the backing devices (physical HDDs, etc.) of the pool have been
66 # scanned and initialized.
67 #
68 # An attempted import with all devices missing will just fail, and can be
69 # retried, but an import where e.g. two out of three disks in a three-way
70 # mirror are missing, will succeed. This is a problem: When the missing disks
71 # are later discovered, they won't be automatically set online, rendering the
72 # pool redundancy-less (and far slower) until such time as the system reboots.
73 #
74 # The solution is the below. poolReady checks the status of an un-imported
75 # pool, to see if *every* device is available -- in which case the pool will be
76 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
77 #
78 # The import scripts then loop over this, waiting until the pool is ready or a
79 # sufficient amount of time has passed that we can assume it won't be. In the
80 # latter case it makes one last attempt at importing, allowing the system to
81 # (eventually) boot even with a degraded pool.
82 importLib =
83 {
84 zpoolCmd,
85 awkCmd,
86 pool,
87 }:
88 let
89 devNodes =
90 if pool != null && cfgZfs.pools ? ${pool} then cfgZfs.pools.${pool}.devNodes else cfgZfs.devNodes;
91 in
92 ''
93 # shellcheck disable=SC2013
94 for o in $(cat /proc/cmdline); do
95 case $o in
96 zfs_force|zfs_force=1|zfs_force=y)
97 ZFS_FORCE="-f"
98 ;;
99 esac
100 done
101 poolReady() {
102 pool="$1"
103 state="$("${zpoolCmd}" import -d "${devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
104 if [[ "$state" = "ONLINE" ]]; then
105 return 0
106 else
107 echo "Pool $pool in state $state, waiting"
108 return 1
109 fi
110 }
111 poolImported() {
112 pool="$1"
113 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
114 }
115 poolImport() {
116 pool="$1"
117 # shellcheck disable=SC2086
118 "${zpoolCmd}" import -d "${devNodes}" -N $ZFS_FORCE "$pool"
119 }
120 '';
121
122 getPoolFilesystems =
123 pool: lib.filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
124
125 getPoolMounts =
126 prefix: pool:
127 let
128 poolFSes = getPoolFilesystems pool;
129
130 # Remove the "/" suffix because even though most mountpoints
131 # won't have it, the "/" mountpoint will, and we can't have the
132 # trailing slash in "/sysroot/" in stage 1.
133 mountPoint = fs: utils.escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
134
135 hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
136 in
137 map (x: "${mountPoint x}.mount") poolFSes ++ lib.optional hasUsr "sysusr-usr.mount";
138
139 getKeyLocations =
140 pool:
141 if lib.isBool cfgZfs.requestEncryptionCredentials then
142 {
143 hasKeys = cfgZfs.requestEncryptionCredentials;
144 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
145 }
146 else
147 let
148 keys = lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
149 in
150 {
151 hasKeys = keys != [ ];
152 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
153 };
154
155 createImportService =
156 {
157 pool,
158 systemd,
159 force,
160 prefix ? "",
161 }:
162 lib.nameValuePair "zfs-import-${pool}" {
163 description = "Import ZFS pool \"${pool}\"";
164 # We wait for systemd-udev-settle to ensure devices are available,
165 # but don't *require* it, because mounts shouldn't be killed if it's stopped.
166 # In the future, hopefully someone will complete this:
167 # https://github.com/zfsonlinux/zfs/pull/4943
168 wants = [
169 "systemd-udev-settle.service"
170 ] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
171 after = [
172 "systemd-udev-settle.service"
173 "systemd-modules-load.service"
174 "systemd-ask-password-console.service"
175 ] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
176 requiredBy =
177 let
178 poolFilesystems = getPoolFilesystems pool;
179 noauto = poolFilesystems != [ ] && lib.all (fs: lib.elem "noauto" fs.options) poolFilesystems;
180 in
181 getPoolMounts prefix pool ++ lib.optional (!noauto) "zfs-import.target";
182 before = getPoolMounts prefix pool ++ [
183 "shutdown.target"
184 "zfs-import.target"
185 ];
186 conflicts = [ "shutdown.target" ];
187 unitConfig = {
188 DefaultDependencies = "no";
189 };
190 serviceConfig = {
191 Type = "oneshot";
192 RemainAfterExit = true;
193 };
194 environment.ZFS_FORCE = lib.optionalString force "-f";
195 script =
196 let
197 keyLocations = getKeyLocations pool;
198 in
199 (importLib {
200 # See comments at importLib definition.
201 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
202 awkCmd = "${pkgs.gawk}/bin/awk";
203 inherit pool;
204 })
205 + ''
206 if ! poolImported "${pool}"; then
207 echo -n "importing ZFS pool \"${pool}\"..."
208 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
209 for _ in $(seq 1 60); do
210 poolReady "${pool}" && poolImport "${pool}" && break
211 sleep 1
212 done
213 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
214 fi
215 if poolImported "${pool}"; then
216 ${lib.optionalString config.boot.initrd.clevis.enable (
217 lib.concatMapStringsSep "\n" (
218 elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true "
219 ) (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets)
220 )}
221
222
223 ${lib.optionalString keyLocations.hasKeys ''
224 ${keyLocations.command} | while IFS=$'\t' read -r ds kl ks; do
225 {
226 if [[ "$ks" != unavailable ]]; then
227 continue
228 fi
229 case "$kl" in
230 none )
231 ;;
232 prompt )
233 tries=3
234 success=false
235 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
236 ${systemd}/bin/systemd-ask-password ${lib.optionalString cfgZfs.useKeyringForCredentials ("--keyname=zfs-$ds")} --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
237 && success=true \
238 || tries=$((tries - 1))
239 done
240 [[ $success = true ]]
241 ;;
242 * )
243 ${cfgZfs.package}/sbin/zfs load-key "$ds"
244 ;;
245 esac
246 } < /dev/null # To protect while read ds kl in case anything reads stdin
247 done
248 ''}
249 echo "Successfully imported ${pool}"
250 else
251 exit 1
252 fi
253 '';
254 };
255
256 zedConf = lib.generators.toKeyValue {
257 mkKeyValue = lib.generators.mkKeyValueDefault {
258 mkValueString =
259 v:
260 if lib.isInt v then
261 toString v
262 else if lib.isString v then
263 "\"${v}\""
264 else if true == v then
265 "1"
266 else if false == v then
267 "0"
268 else if lib.isList v then
269 "\"" + (lib.concatStringsSep " " v) + "\""
270 else
271 lib.err "this value is" (toString v);
272 } "=";
273 } cfgZED.settings;
274in
275
276{
277
278 imports = [
279 (lib.mkRemovedOptionModule [
280 "boot"
281 "zfs"
282 "enableLegacyCrypto"
283 ] "The corresponding package was removed from nixpkgs.")
284 (lib.mkRemovedOptionModule [
285 "boot"
286 "zfs"
287 "enableUnstable"
288 ] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
289 ];
290
291 ###### interface
292
293 options = {
294 boot.zfs = {
295 package = lib.mkOption {
296 type = lib.types.package;
297 default = pkgs.zfs;
298 defaultText = lib.literalExpression "pkgs.zfs";
299 description = "Configured ZFS userland tools package.";
300 };
301
302 modulePackage = lib.mkOption {
303 internal = true; # It is supposed to be selected automatically, but can be overridden by expert users.
304 default = selectModulePackage cfgZfs.package;
305 type = lib.types.package;
306 description = "Configured ZFS kernel module package.";
307 };
308
309 enabled = lib.mkOption {
310 readOnly = true;
311 type = lib.types.bool;
312 default = inInitrd || inSystem;
313 defaultText = lib.literalMD "`true` if ZFS filesystem support is enabled";
314 description = "True if ZFS filesystem support is enabled";
315 };
316
317 allowHibernation = lib.mkOption {
318 type = lib.types.bool;
319 default = false;
320 description = ''
321 Allow hibernation support, this may be a unsafe option depending on your
322 setup. Make sure to NOT use Swap on ZFS.
323 '';
324 };
325
326 extraPools = lib.mkOption {
327 type = lib.types.listOf lib.types.str;
328 default = [ ];
329 example = [
330 "tank"
331 "data"
332 ];
333 description = ''
334 Name or GUID of extra ZFS pools that you wish to import during boot.
335
336 Usually this is not necessary. Instead, you should set the mountpoint property
337 of ZFS filesystems to `legacy` and add the ZFS filesystems to
338 NixOS's {option}`fileSystems` option, which makes NixOS automatically
339 import the associated pool.
340
341 However, in some cases (e.g. if you have many filesystems) it may be preferable
342 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
343 will not be managing those filesystems, you will need to specify the ZFS pool here
344 so that NixOS automatically imports it on every boot.
345 '';
346 };
347
348 devNodes = lib.mkOption {
349 type = lib.types.path;
350 default = "/dev/disk/by-id";
351 description = ''
352 Name of directory from which to import ZFS device, this is passed to `zpool import`
353 as the value of the `-d` option.
354
355 For guidance on choosing this value, see
356 [the ZFS documentation](https://openzfs.github.io/openzfs-docs/Project%20and%20Community/FAQ.html#selecting-dev-names-when-creating-a-pool-linux).
357 '';
358 };
359
360 forceImportRoot = lib.mkOption {
361 type = lib.types.bool;
362 default = true;
363 description = ''
364 Forcibly import the ZFS root pool(s) during early boot.
365
366 This is enabled by default for backwards compatibility purposes, but it is highly
367 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
368 to protect your ZFS pools.
369
370 If you set this option to `false` and NixOS subsequently fails to
371 boot because it cannot import the root pool, you should boot with the
372 `zfs_force=1` option as a kernel parameter (e.g. by manually
373 editing the kernel params in grub during boot). You should only need to do this
374 once.
375 '';
376 };
377
378 forceImportAll = lib.mkOption {
379 type = lib.types.bool;
380 default = false;
381 description = ''
382 Forcibly import all ZFS pool(s).
383
384 If you set this option to `false` and NixOS subsequently fails to
385 import your non-root ZFS pool(s), you should manually import each pool with
386 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
387 this once.
388 '';
389 };
390
391 requestEncryptionCredentials = lib.mkOption {
392 type = lib.types.either lib.types.bool (lib.types.listOf lib.types.str);
393 default = true;
394 example = [
395 "tank"
396 "data"
397 ];
398 description = ''
399 If true on import encryption keys or passwords for all encrypted datasets
400 are requested. To only decrypt selected datasets supply a list of dataset
401 names instead. For root pools the encryption key can be supplied via both
402 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
403 '';
404 };
405
406 useKeyringForCredentials = lib.mkEnableOption "Uses the kernel keyring for encryption credentials with keyname=zfs-<poolname>";
407
408 passwordTimeout = lib.mkOption {
409 type = lib.types.int;
410 default = 0;
411 description = ''
412 Timeout in seconds to wait for password entry for decrypt at boot.
413
414 Defaults to 0, which waits forever.
415 '';
416 };
417
418 pools = lib.mkOption {
419 type = lib.types.attrsOf (
420 lib.types.submodule {
421 options = {
422 devNodes = lib.mkOption {
423 type = lib.types.path;
424 default = cfgZfs.devNodes;
425 defaultText = "config.boot.zfs.devNodes";
426 description = options.boot.zfs.devNodes.description;
427 };
428 };
429 }
430 );
431 default = { };
432 description = ''
433 Configuration for individual pools to override global defaults.
434 '';
435 };
436
437 removeLinuxDRM = lib.mkOption {
438 type = lib.types.bool;
439 default = false;
440 description = ''
441 Patch the kernel to change symbols needed by ZFS from
442 EXPORT_SYMBOL_GPL to EXPORT_SYMBOL.
443
444 Currently has no effect, but may again in future if a kernel
445 update breaks ZFS due to symbols being newly changed to GPL.
446 '';
447 };
448 };
449
450 services.zfs.autoSnapshot = {
451 enable = lib.mkOption {
452 default = false;
453 type = lib.types.bool;
454 description = ''
455 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
456 Note that you must set the `com.sun:auto-snapshot`
457 property to `true` on all datasets which you wish
458 to auto-snapshot.
459
460 You can override a child dataset to use, or not use auto-snapshotting
461 by setting its flag with the given interval:
462 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
463 '';
464 };
465
466 flags = lib.mkOption {
467 default = "-k -p";
468 example = "-k -p --utc";
469 type = lib.types.str;
470 description = ''
471 Flags to pass to the zfs-auto-snapshot command.
472
473 Run `zfs-auto-snapshot` (without any arguments) to
474 see available flags.
475
476 If it's not too inconvenient for snapshots to have timestamps in UTC,
477 it is suggested that you append `--utc` to the list
478 of default options (see example).
479
480 Otherwise, snapshot names can cause name conflicts or apparent time
481 reversals due to daylight savings, timezone or other date/time changes.
482 '';
483 };
484
485 frequent = lib.mkOption {
486 default = 4;
487 type = lib.types.int;
488 description = ''
489 Number of frequent (15-minute) auto-snapshots that you wish to keep.
490 '';
491 };
492
493 hourly = lib.mkOption {
494 default = 24;
495 type = lib.types.int;
496 description = ''
497 Number of hourly auto-snapshots that you wish to keep.
498 '';
499 };
500
501 daily = lib.mkOption {
502 default = 7;
503 type = lib.types.int;
504 description = ''
505 Number of daily auto-snapshots that you wish to keep.
506 '';
507 };
508
509 weekly = lib.mkOption {
510 default = 4;
511 type = lib.types.int;
512 description = ''
513 Number of weekly auto-snapshots that you wish to keep.
514 '';
515 };
516
517 monthly = lib.mkOption {
518 default = 12;
519 type = lib.types.int;
520 description = ''
521 Number of monthly auto-snapshots that you wish to keep.
522 '';
523 };
524 };
525
526 services.zfs.trim = {
527 enable = lib.mkOption {
528 description = "Whether to enable periodic TRIM on all ZFS pools.";
529 default = true;
530 example = false;
531 type = lib.types.bool;
532 };
533
534 interval = lib.mkOption {
535 default = "weekly";
536 type = lib.types.str;
537 example = "daily";
538 description = ''
539 How often we run trim. For most desktop and server systems
540 a sufficient trimming frequency is once a week.
541
542 The format is described in
543 {manpage}`systemd.time(7)`.
544 '';
545 };
546
547 randomizedDelaySec = lib.mkOption {
548 default = "6h";
549 type = lib.types.str;
550 example = "12h";
551 description = ''
552 Add a randomized delay before each ZFS trim.
553 The delay will be chosen between zero and this value.
554 This value must be a time span in the format specified by
555 {manpage}`systemd.time(7)`
556 '';
557 };
558 };
559
560 services.zfs.autoScrub = {
561 enable = lib.mkEnableOption "periodic scrubbing of ZFS pools";
562
563 interval = lib.mkOption {
564 default = "monthly";
565 type = lib.types.str;
566 example = "quarterly";
567 description = ''
568 Systemd calendar expression when to scrub ZFS pools. See
569 {manpage}`systemd.time(7)`.
570 '';
571 };
572
573 randomizedDelaySec = lib.mkOption {
574 default = "6h";
575 type = lib.types.str;
576 example = "12h";
577 description = ''
578 Add a randomized delay before each ZFS autoscrub.
579 The delay will be chosen between zero and this value.
580 This value must be a time span in the format specified by
581 {manpage}`systemd.time(7)`
582 '';
583 };
584
585 pools = lib.mkOption {
586 default = [ ];
587 type = lib.types.listOf lib.types.str;
588 example = [ "tank" ];
589 description = ''
590 List of ZFS pools to periodically scrub. If empty, all pools
591 will be scrubbed.
592 '';
593 };
594 };
595
596 services.zfs.expandOnBoot = lib.mkOption {
597 type = lib.types.either (lib.types.enum [
598 "disabled"
599 "all"
600 ]) (lib.types.listOf lib.types.str);
601 default = "disabled";
602 example = [
603 "tank"
604 "dozer"
605 ];
606 description = ''
607 After importing, expand each device in the specified pools.
608
609 Set the value to the plain string "all" to expand all pools on boot:
610
611 services.zfs.expandOnBoot = "all";
612
613 or set the value to a list of pools to expand the disks of specific pools:
614
615 services.zfs.expandOnBoot = [ "tank" "dozer" ];
616 '';
617 };
618
619 services.zfs.zed = {
620 enableMail = lib.mkOption {
621 type = lib.types.bool;
622 default = config.services.mail.sendmailSetuidWrapper != null;
623 defaultText = lib.literalExpression ''
624 config.services.mail.sendmailSetuidWrapper != null
625 '';
626 description = ''
627 Whether to enable ZED's ability to send emails.
628 '';
629 };
630
631 settings = lib.mkOption {
632 type =
633 let
634 t = lib.types;
635 in
636 t.attrsOf (
637 t.oneOf [
638 t.str
639 t.int
640 t.bool
641 (t.listOf t.str)
642 ]
643 );
644 example = lib.literalExpression ''
645 {
646 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
647
648 ZED_EMAIL_ADDR = [ "root" ];
649 ZED_EMAIL_PROG = "mail";
650 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
651
652 ZED_NOTIFY_INTERVAL_SECS = 3600;
653 ZED_NOTIFY_VERBOSE = false;
654
655 ZED_USE_ENCLOSURE_LEDS = true;
656 ZED_SCRUB_AFTER_RESILVER = false;
657 }
658 '';
659 description = ''
660 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
661
662 See
663 {manpage}`zed(8)`
664 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
665 '';
666 };
667 };
668 };
669
670 ###### implementation
671
672 config = lib.mkMerge [
673 (lib.mkIf cfgZfs.enabled {
674 assertions = [
675 {
676 assertion = cfgZfs.modulePackage.version == cfgZfs.package.version;
677 message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
678 }
679 {
680 assertion = config.networking.hostId != null;
681 message = "ZFS requires networking.hostId to be set";
682 }
683 {
684 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
685 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
686 }
687 {
688 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
689 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
690 }
691 {
692 assertion = !(lib.elem "" allPools);
693 message = ''
694 Automatic pool detection found an empty pool name, which can't be used.
695 Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
696 should be a zfs dataset name, like `device = "pool/data/set"`.
697 This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
698 '';
699 }
700 ];
701
702 boot = {
703 kernelModules = [ "zfs" ];
704 # https://github.com/openzfs/zfs/issues/260
705 # https://github.com/openzfs/zfs/issues/12842
706 # https://github.com/NixOS/nixpkgs/issues/106093
707 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
708
709 extraModulePackages = [
710 cfgZfs.modulePackage
711 ];
712 };
713
714 boot.initrd = lib.mkIf inInitrd {
715 kernelModules = [ "zfs" ];
716 extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
717 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
718 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
719 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
720 copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
721 copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
722 '';
723 extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
724 $out/bin/zfs --help >/dev/null 2>&1
725 $out/bin/zpool --help >/dev/null 2>&1
726 '';
727 postResumeCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (
728 lib.concatStringsSep "\n" (
729 [
730 ''
731 ZFS_FORCE="${lib.optionalString cfgZfs.forceImportRoot "-f"}"
732 ''
733 ]
734 ++ [
735 (importLib {
736 # See comments at importLib definition.
737 zpoolCmd = "zpool";
738 awkCmd = "awk";
739 pool = null;
740 })
741 ]
742 ++ (map (pool: ''
743 echo -n "importing root ZFS pool \"${pool}\"..."
744 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
745 if ! poolImported "${pool}"; then
746 for _ in $(seq 1 60); do
747 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
748 sleep 1
749 echo -n .
750 done
751 echo
752 if [[ -n "$msg" ]]; then
753 echo "$msg";
754 fi
755 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
756 fi
757
758 ${lib.optionalString config.boot.initrd.clevis.enable (
759 lib.concatMapStringsSep "\n" (
760 elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}"
761 ) (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets)
762 )}
763
764 ${
765 if lib.isBool cfgZfs.requestEncryptionCredentials then
766 lib.optionalString cfgZfs.requestEncryptionCredentials ''
767 zfs load-key -a
768 ''
769 else
770 lib.concatMapStrings (fs: ''
771 zfs load-key -- ${lib.escapeShellArg fs}
772 '') (lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)
773 }
774 '') rootPools)
775 )
776 );
777
778 # Systemd in stage 1
779 systemd = lib.mkIf config.boot.initrd.systemd.enable {
780 packages = [ cfgZfs.package ];
781 services = lib.listToAttrs (
782 map (
783 pool:
784 createImportService {
785 inherit pool;
786 systemd = config.boot.initrd.systemd.package;
787 force = cfgZfs.forceImportRoot;
788 prefix = "/sysroot";
789 }
790 ) rootPools
791 );
792 targets.zfs-import.wantedBy = [ "zfs.target" ];
793 targets.zfs.wantedBy = [ "initrd.target" ];
794 extraBin = {
795 zpool = "${cfgZfs.package}/sbin/zpool";
796 zfs = "${cfgZfs.package}/sbin/zfs";
797 awk = "${pkgs.gawk}/bin/awk";
798 };
799 storePaths = [
800 "${cfgZfs.package}/lib/udev/vdev_id"
801 "${cfgZfs.package}/lib/udev/zvol_id"
802 ];
803 };
804 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, in stage 1
805 };
806
807 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source =
808 pkgs.writeShellScript "zpool-sync-shutdown" ''
809 exec ${cfgZfs.package}/bin/zpool sync
810 '';
811 systemd.shutdownRamfs.storePaths = [ "${cfgZfs.package}/bin/zpool" ];
812
813 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
814 boot.loader.grub = lib.mkIf (inInitrd || inSystem) {
815 zfsSupport = true;
816 zfsPackage = cfgZfs.package;
817 };
818
819 services.zfs.zed.settings = {
820 ZED_EMAIL_PROG = lib.mkIf cfgZED.enableMail (
821 lib.mkDefault (
822 config.security.wrapperDir + "/" + config.services.mail.sendmailSetuidWrapper.program
823 )
824 );
825 # subject in header for sendmail
826 ZED_EMAIL_OPTS = lib.mkIf cfgZED.enableMail (lib.mkDefault "@ADDRESS@");
827
828 PATH = lib.makeBinPath [
829 cfgZfs.package
830 pkgs.coreutils
831 pkgs.curl
832 pkgs.gawk
833 pkgs.gnugrep
834 pkgs.gnused
835 pkgs.nettools
836 pkgs.util-linux
837 ];
838 };
839
840 # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
841 services.udev.extraRules = ''
842 ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
843 '';
844
845 environment.etc =
846 lib.genAttrs
847 (map (file: "zfs/zed.d/${file}") [
848 "all-syslog.sh"
849 "pool_import-led.sh"
850 "resilver_finish-start-scrub.sh"
851 "statechange-led.sh"
852 "vdev_attach-led.sh"
853 "zed-functions.sh"
854 "data-notify.sh"
855 "resilver_finish-notify.sh"
856 "scrub_finish-notify.sh"
857 "statechange-notify.sh"
858 "vdev_clear-led.sh"
859 ])
860 (file: {
861 source = "${cfgZfs.package}/etc/${file}";
862 })
863 // {
864 "zfs/zed.d/zed.rc".text = zedConf;
865 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
866 };
867
868 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
869 environment.systemPackages = [ cfgZfs.package ] ++ lib.optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
870
871 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
872 systemd.packages = [ cfgZfs.package ];
873
874 systemd.services =
875 let
876 createImportService' =
877 pool:
878 createImportService {
879 inherit pool;
880 systemd = config.systemd.package;
881 force = cfgZfs.forceImportAll;
882 };
883
884 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
885 # to sync=disabled.
886 createSyncService =
887 pool:
888 lib.nameValuePair "zfs-sync-${pool}" {
889 description = "Sync ZFS pool \"${pool}\"";
890 wantedBy = [ "shutdown.target" ];
891 before = [ "final.target" ];
892 unitConfig = {
893 DefaultDependencies = false;
894 };
895 serviceConfig = {
896 Type = "oneshot";
897 RemainAfterExit = true;
898 };
899 script = ''
900 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
901 '';
902 };
903
904 createZfsService =
905 serv:
906 lib.nameValuePair serv {
907 after = [ "systemd-modules-load.service" ];
908 wantedBy = [ "zfs.target" ];
909 };
910
911 in
912 lib.listToAttrs (
913 map createImportService' dataPools
914 ++ map createSyncService allPools
915 ++ map createZfsService [
916 "zfs-mount"
917 "zfs-share"
918 "zfs-zed"
919 ]
920 );
921
922 systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
923
924 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
925 })
926
927 (lib.mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
928 systemd.services."zpool-expand@" = {
929 description = "Expand ZFS pools";
930 after = [ "zfs.target" ];
931
932 serviceConfig = {
933 Type = "oneshot";
934 RemainAfterExit = true;
935 };
936
937 scriptArgs = "%i";
938 path = [ cfgZfs.package ];
939
940 script = ''
941 pool=$1
942
943 echo "Expanding all devices for $pool."
944
945 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
946 '';
947 };
948
949 systemd.services."zpool-expand-pools" =
950 let
951 # Create a string, to be interpolated in a bash script
952 # which enumerates all of the pools to expand.
953 # If the `pools` option is `true`, we want to dynamically
954 # expand every pool. Otherwise we want to enumerate
955 # just the specifically provided list of pools.
956 poolListProvider =
957 if cfgExpandOnBoot == "all" then
958 "$(zpool list -H -o name)"
959 else
960 lib.escapeShellArgs cfgExpandOnBoot;
961 in
962 {
963 description = "Expand specified ZFS pools";
964 wantedBy = [ "default.target" ];
965 after = [ "zfs.target" ];
966
967 serviceConfig = {
968 Type = "oneshot";
969 RemainAfterExit = true;
970 };
971
972 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
973
974 script = ''
975 for pool in ${poolListProvider}; do
976 systemctl start --no-block "zpool-expand@$pool"
977 done
978 '';
979 };
980 })
981
982 (lib.mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
983 systemd.services =
984 let
985 descr =
986 name:
987 if name == "frequent" then
988 "15 mins"
989 else if name == "hourly" then
990 "hour"
991 else if name == "daily" then
992 "day"
993 else if name == "weekly" then
994 "week"
995 else if name == "monthly" then
996 "month"
997 else
998 throw "unknown snapshot name";
999 numSnapshots = name: builtins.getAttr name cfgSnapshots;
1000 in
1001 builtins.listToAttrs (
1002 map (snapName: {
1003 name = "zfs-snapshot-${snapName}";
1004 value = {
1005 description = "ZFS auto-snapshotting every ${descr snapName}";
1006 after = [ "zfs-import.target" ];
1007 serviceConfig = {
1008 Type = "oneshot";
1009 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
1010 };
1011 restartIfChanged = false;
1012 };
1013 }) snapshotNames
1014 );
1015
1016 systemd.timers =
1017 let
1018 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
1019 in
1020 builtins.listToAttrs (
1021 map (snapName: {
1022 name = "zfs-snapshot-${snapName}";
1023 value = {
1024 wantedBy = [ "timers.target" ];
1025 timerConfig = {
1026 OnCalendar = timer snapName;
1027 Persistent = lib.mkDefault "yes";
1028 };
1029 };
1030 }) snapshotNames
1031 );
1032 })
1033
1034 (lib.mkIf (cfgZfs.enabled && cfgScrub.enable) {
1035 systemd.services.zfs-scrub = {
1036 description = "ZFS pools scrubbing";
1037 after = [ "zfs-import.target" ];
1038 serviceConfig = {
1039 Type = "simple";
1040 IOSchedulingClass = "idle";
1041 };
1042 script = ''
1043 # shellcheck disable=SC2046
1044 ${cfgZfs.package}/bin/zpool scrub -w ${
1045 if cfgScrub.pools != [ ] then
1046 (lib.concatStringsSep " " cfgScrub.pools)
1047 else
1048 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
1049 }
1050 '';
1051 };
1052
1053 systemd.timers.zfs-scrub = {
1054 wantedBy = [ "timers.target" ];
1055 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
1056 timerConfig = {
1057 OnCalendar = cfgScrub.interval;
1058 Persistent = lib.mkDefault "yes";
1059 RandomizedDelaySec = cfgScrub.randomizedDelaySec;
1060 };
1061 };
1062 })
1063
1064 (lib.mkIf (cfgZfs.enabled && cfgTrim.enable) {
1065 systemd.services.zpool-trim = {
1066 description = "ZFS pools trim";
1067 after = [ "zfs-import.target" ];
1068 path = [ cfgZfs.package ];
1069 startAt = cfgTrim.interval;
1070 # By default we ignore errors returned by the trim command, in case:
1071 # - HDDs are mixed with SSDs
1072 # - There is a SSDs in a pool that is currently trimmed.
1073 # - There are only HDDs and we would set the system in a degraded state
1074 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
1075 };
1076
1077 systemd.timers.zpool-trim.timerConfig = {
1078 Persistent = lib.mkDefault "yes";
1079 RandomizedDelaySec = cfgTrim.randomizedDelaySec;
1080 };
1081 })
1082 ];
1083}