1{ config, lib, options, pkgs, utils, ... }:
2#
3# TODO: zfs tunables
4
5with utils;
6with lib;
7
8let
9
10 cfgZfs = config.boot.zfs;
11 optZfs = options.boot.zfs;
12 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
13 cfgSnapshots = config.services.zfs.autoSnapshot;
14 cfgSnapFlags = cfgSnapshots.flags;
15 cfgScrub = config.services.zfs.autoScrub;
16 cfgTrim = config.services.zfs.trim;
17 cfgZED = config.services.zfs.zed;
18
19 selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
20 clevisDatasets = map (e: e.device) (filter (e: e.device != null && (hasAttr e.device config.boot.initrd.clevis.devices) && e.fsType == "zfs" && (fsNeededForBoot e)) config.system.build.fileSystems);
21
22
23 inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
24 inSystem = config.boot.supportedFilesystems.zfs or false;
25
26 autosnapPkg = pkgs.zfstools.override {
27 zfs = cfgZfs.package;
28 };
29
30 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
31
32 datasetToPool = x: elemAt (splitString "/" x) 0;
33
34 fsToPool = fs: datasetToPool fs.device;
35
36 zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
37
38 allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
39
40 rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
41
42 dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
43
44 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
45
46 # When importing ZFS pools, there's one difficulty: These scripts may run
47 # before the backing devices (physical HDDs, etc.) of the pool have been
48 # scanned and initialized.
49 #
50 # An attempted import with all devices missing will just fail, and can be
51 # retried, but an import where e.g. two out of three disks in a three-way
52 # mirror are missing, will succeed. This is a problem: When the missing disks
53 # are later discovered, they won't be automatically set online, rendering the
54 # pool redundancy-less (and far slower) until such time as the system reboots.
55 #
56 # The solution is the below. poolReady checks the status of an un-imported
57 # pool, to see if *every* device is available -- in which case the pool will be
58 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
59 #
60 # The import scripts then loop over this, waiting until the pool is ready or a
61 # sufficient amount of time has passed that we can assume it won't be. In the
62 # latter case it makes one last attempt at importing, allowing the system to
63 # (eventually) boot even with a degraded pool.
64 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
65 for o in $(cat /proc/cmdline); do
66 case $o in
67 zfs_force|zfs_force=1|zfs_force=y)
68 ZFS_FORCE="-f"
69 ;;
70 esac
71 done
72 poolReady() {
73 pool="$1"
74 state="$("${zpoolCmd}" import -d "${cfgZfs.devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
75 if [[ "$state" = "ONLINE" ]]; then
76 return 0
77 else
78 echo "Pool $pool in state $state, waiting"
79 return 1
80 fi
81 }
82 poolImported() {
83 pool="$1"
84 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
85 }
86 poolImport() {
87 pool="$1"
88 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
89 }
90 '';
91
92 getPoolFilesystems = pool:
93 filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
94
95 getPoolMounts = prefix: pool:
96 let
97 poolFSes = getPoolFilesystems pool;
98
99 # Remove the "/" suffix because even though most mountpoints
100 # won't have it, the "/" mountpoint will, and we can't have the
101 # trailing slash in "/sysroot/" in stage 1.
102 mountPoint = fs: escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
103
104 hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
105 in
106 map (x: "${mountPoint x}.mount") poolFSes
107 ++ lib.optional hasUsr "sysusr-usr.mount";
108
109 getKeyLocations = pool: if isBool cfgZfs.requestEncryptionCredentials then {
110 hasKeys = cfgZfs.requestEncryptionCredentials;
111 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
112 } else let
113 keys = filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
114 in {
115 hasKeys = keys != [];
116 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
117 };
118
119 createImportService = { pool, systemd, force, prefix ? "" }:
120 nameValuePair "zfs-import-${pool}" {
121 description = "Import ZFS pool \"${pool}\"";
122 # We wait for systemd-udev-settle to ensure devices are available,
123 # but don't *require* it, because mounts shouldn't be killed if it's stopped.
124 # In the future, hopefully someone will complete this:
125 # https://github.com/zfsonlinux/zfs/pull/4943
126 wants = [ "systemd-udev-settle.service" ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
127 after = [
128 "systemd-udev-settle.service"
129 "systemd-modules-load.service"
130 "systemd-ask-password-console.service"
131 ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
132 requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
133 before = getPoolMounts prefix pool ++ [ "shutdown.target" "zfs-import.target" ];
134 conflicts = [ "shutdown.target" ];
135 unitConfig = {
136 DefaultDependencies = "no";
137 };
138 serviceConfig = {
139 Type = "oneshot";
140 RemainAfterExit = true;
141 };
142 environment.ZFS_FORCE = optionalString force "-f";
143 script = let
144 keyLocations = getKeyLocations pool;
145 in (importLib {
146 # See comments at importLib definition.
147 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
148 awkCmd = "${pkgs.gawk}/bin/awk";
149 inherit cfgZfs;
150 }) + ''
151 if ! poolImported "${pool}"; then
152 echo -n "importing ZFS pool \"${pool}\"..."
153 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
154 for trial in `seq 1 60`; do
155 poolReady "${pool}" && poolImport "${pool}" && break
156 sleep 1
157 done
158 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
159 fi
160 if poolImported "${pool}"; then
161 ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
162
163
164 ${optionalString keyLocations.hasKeys ''
165 ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
166 {
167 if [[ "$ks" != unavailable ]]; then
168 continue
169 fi
170 case "$kl" in
171 none )
172 ;;
173 prompt )
174 tries=3
175 success=false
176 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
177 ${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
178 && success=true \
179 || tries=$((tries - 1))
180 done
181 [[ $success = true ]]
182 ;;
183 * )
184 ${cfgZfs.package}/sbin/zfs load-key "$ds"
185 ;;
186 esac
187 } < /dev/null # To protect while read ds kl in case anything reads stdin
188 done
189 ''}
190 echo "Successfully imported ${pool}"
191 else
192 exit 1
193 fi
194 '';
195 };
196
197 zedConf = generators.toKeyValue {
198 mkKeyValue = generators.mkKeyValueDefault {
199 mkValueString = v:
200 if isInt v then toString v
201 else if isString v then "\"${v}\""
202 else if true == v then "1"
203 else if false == v then "0"
204 else if isList v then "\"" + (concatStringsSep " " v) + "\""
205 else err "this value is" (toString v);
206 } "=";
207 } cfgZED.settings;
208in
209
210{
211
212 imports = [
213 (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
214 (mkRemovedOptionModule [ "boot" "zfs" "enableUnstable" ] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
215 ];
216
217 ###### interface
218
219 options = {
220 boot.zfs = {
221 package = mkOption {
222 type = types.package;
223 default = pkgs.zfs;
224 defaultText = literalExpression "pkgs.zfs";
225 description = "Configured ZFS userland tools package, use `pkgs.zfs_unstable` if you want to track the latest staging ZFS branch.";
226 };
227
228 modulePackage = mkOption {
229 internal = true; # It is supposed to be selected automatically, but can be overridden by expert users.
230 default = selectModulePackage cfgZfs.package;
231 type = types.package;
232 description = "Configured ZFS kernel module package.";
233 };
234
235 enabled = mkOption {
236 readOnly = true;
237 type = types.bool;
238 default = inInitrd || inSystem;
239 defaultText = literalMD "`true` if ZFS filesystem support is enabled";
240 description = "True if ZFS filesystem support is enabled";
241 };
242
243 allowHibernation = mkOption {
244 type = types.bool;
245 default = false;
246 description = ''
247 Allow hibernation support, this may be a unsafe option depending on your
248 setup. Make sure to NOT use Swap on ZFS.
249 '';
250 };
251
252 extraPools = mkOption {
253 type = types.listOf types.str;
254 default = [];
255 example = [ "tank" "data" ];
256 description = ''
257 Name or GUID of extra ZFS pools that you wish to import during boot.
258
259 Usually this is not necessary. Instead, you should set the mountpoint property
260 of ZFS filesystems to `legacy` and add the ZFS filesystems to
261 NixOS's {option}`fileSystems` option, which makes NixOS automatically
262 import the associated pool.
263
264 However, in some cases (e.g. if you have many filesystems) it may be preferable
265 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
266 will not be managing those filesystems, you will need to specify the ZFS pool here
267 so that NixOS automatically imports it on every boot.
268 '';
269 };
270
271 devNodes = mkOption {
272 type = types.path;
273 default = "/dev/disk/by-id";
274 description = ''
275 Name of directory from which to import ZFS devices.
276
277 This should be a path under /dev containing stable names for all devices needed, as
278 import may fail if device nodes are renamed concurrently with a device failing.
279 '';
280 };
281
282 forceImportRoot = mkOption {
283 type = types.bool;
284 default = true;
285 description = ''
286 Forcibly import the ZFS root pool(s) during early boot.
287
288 This is enabled by default for backwards compatibility purposes, but it is highly
289 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
290 to protect your ZFS pools.
291
292 If you set this option to `false` and NixOS subsequently fails to
293 boot because it cannot import the root pool, you should boot with the
294 `zfs_force=1` option as a kernel parameter (e.g. by manually
295 editing the kernel params in grub during boot). You should only need to do this
296 once.
297 '';
298 };
299
300 forceImportAll = mkOption {
301 type = types.bool;
302 default = false;
303 description = ''
304 Forcibly import all ZFS pool(s).
305
306 If you set this option to `false` and NixOS subsequently fails to
307 import your non-root ZFS pool(s), you should manually import each pool with
308 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
309 this once.
310 '';
311 };
312
313 requestEncryptionCredentials = mkOption {
314 type = types.either types.bool (types.listOf types.str);
315 default = true;
316 example = [ "tank" "data" ];
317 description = ''
318 If true on import encryption keys or passwords for all encrypted datasets
319 are requested. To only decrypt selected datasets supply a list of dataset
320 names instead. For root pools the encryption key can be supplied via both
321 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
322 '';
323 };
324
325 passwordTimeout = mkOption {
326 type = types.int;
327 default = 0;
328 description = ''
329 Timeout in seconds to wait for password entry for decrypt at boot.
330
331 Defaults to 0, which waits forever.
332 '';
333 };
334
335 removeLinuxDRM = lib.mkOption {
336 type = types.bool;
337 default = false;
338 description = ''
339 Patch the kernel to change symbols needed by ZFS from
340 EXPORT_SYMBOL_GPL to EXPORT_SYMBOL.
341
342 Currently has no effect, but may again in future if a kernel
343 update breaks ZFS due to symbols being newly changed to GPL.
344 '';
345 };
346 };
347
348 services.zfs.autoSnapshot = {
349 enable = mkOption {
350 default = false;
351 type = types.bool;
352 description = ''
353 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
354 Note that you must set the `com.sun:auto-snapshot`
355 property to `true` on all datasets which you wish
356 to auto-snapshot.
357
358 You can override a child dataset to use, or not use auto-snapshotting
359 by setting its flag with the given interval:
360 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
361 '';
362 };
363
364 flags = mkOption {
365 default = "-k -p";
366 example = "-k -p --utc";
367 type = types.str;
368 description = ''
369 Flags to pass to the zfs-auto-snapshot command.
370
371 Run `zfs-auto-snapshot` (without any arguments) to
372 see available flags.
373
374 If it's not too inconvenient for snapshots to have timestamps in UTC,
375 it is suggested that you append `--utc` to the list
376 of default options (see example).
377
378 Otherwise, snapshot names can cause name conflicts or apparent time
379 reversals due to daylight savings, timezone or other date/time changes.
380 '';
381 };
382
383 frequent = mkOption {
384 default = 4;
385 type = types.int;
386 description = ''
387 Number of frequent (15-minute) auto-snapshots that you wish to keep.
388 '';
389 };
390
391 hourly = mkOption {
392 default = 24;
393 type = types.int;
394 description = ''
395 Number of hourly auto-snapshots that you wish to keep.
396 '';
397 };
398
399 daily = mkOption {
400 default = 7;
401 type = types.int;
402 description = ''
403 Number of daily auto-snapshots that you wish to keep.
404 '';
405 };
406
407 weekly = mkOption {
408 default = 4;
409 type = types.int;
410 description = ''
411 Number of weekly auto-snapshots that you wish to keep.
412 '';
413 };
414
415 monthly = mkOption {
416 default = 12;
417 type = types.int;
418 description = ''
419 Number of monthly auto-snapshots that you wish to keep.
420 '';
421 };
422 };
423
424 services.zfs.trim = {
425 enable = mkOption {
426 description = "Whether to enable periodic TRIM on all ZFS pools.";
427 default = true;
428 example = false;
429 type = types.bool;
430 };
431
432 interval = mkOption {
433 default = "weekly";
434 type = types.str;
435 example = "daily";
436 description = ''
437 How often we run trim. For most desktop and server systems
438 a sufficient trimming frequency is once a week.
439
440 The format is described in
441 {manpage}`systemd.time(7)`.
442 '';
443 };
444
445 randomizedDelaySec = mkOption {
446 default = "6h";
447 type = types.str;
448 example = "12h";
449 description = ''
450 Add a randomized delay before each ZFS trim.
451 The delay will be chosen between zero and this value.
452 This value must be a time span in the format specified by
453 {manpage}`systemd.time(7)`
454 '';
455 };
456 };
457
458 services.zfs.autoScrub = {
459 enable = mkEnableOption "periodic scrubbing of ZFS pools";
460
461 interval = mkOption {
462 default = "monthly";
463 type = types.str;
464 example = "quarterly";
465 description = ''
466 Systemd calendar expression when to scrub ZFS pools. See
467 {manpage}`systemd.time(7)`.
468 '';
469 };
470
471 randomizedDelaySec = mkOption {
472 default = "6h";
473 type = types.str;
474 example = "12h";
475 description = ''
476 Add a randomized delay before each ZFS autoscrub.
477 The delay will be chosen between zero and this value.
478 This value must be a time span in the format specified by
479 {manpage}`systemd.time(7)`
480 '';
481 };
482
483 pools = mkOption {
484 default = [];
485 type = types.listOf types.str;
486 example = [ "tank" ];
487 description = ''
488 List of ZFS pools to periodically scrub. If empty, all pools
489 will be scrubbed.
490 '';
491 };
492 };
493
494 services.zfs.expandOnBoot = mkOption {
495 type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
496 default = "disabled";
497 example = [ "tank" "dozer" ];
498 description = ''
499 After importing, expand each device in the specified pools.
500
501 Set the value to the plain string "all" to expand all pools on boot:
502
503 services.zfs.expandOnBoot = "all";
504
505 or set the value to a list of pools to expand the disks of specific pools:
506
507 services.zfs.expandOnBoot = [ "tank" "dozer" ];
508 '';
509 };
510
511 services.zfs.zed = {
512 enableMail = mkOption {
513 type = types.bool;
514 default = config.services.mail.sendmailSetuidWrapper != null;
515 defaultText = literalExpression ''
516 config.services.mail.sendmailSetuidWrapper != null
517 '';
518 description = ''
519 Whether to enable ZED's ability to send emails.
520 '';
521 };
522
523 settings = mkOption {
524 type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
525 example = literalExpression ''
526 {
527 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
528
529 ZED_EMAIL_ADDR = [ "root" ];
530 ZED_EMAIL_PROG = "mail";
531 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
532
533 ZED_NOTIFY_INTERVAL_SECS = 3600;
534 ZED_NOTIFY_VERBOSE = false;
535
536 ZED_USE_ENCLOSURE_LEDS = true;
537 ZED_SCRUB_AFTER_RESILVER = false;
538 }
539 '';
540 description = ''
541 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
542
543 See
544 {manpage}`zed(8)`
545 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
546 '';
547 };
548 };
549 };
550
551 ###### implementation
552
553 config = mkMerge [
554 (mkIf cfgZfs.enabled {
555 assertions = [
556 {
557 assertion = cfgZfs.modulePackage.version == cfgZfs.package.version;
558 message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
559 }
560 {
561 assertion = config.networking.hostId != null;
562 message = "ZFS requires networking.hostId to be set";
563 }
564 {
565 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
566 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
567 }
568 {
569 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
570 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
571 }
572 {
573 assertion = !(elem "" allPools);
574 message = ''
575 Automatic pool detection found an empty pool name, which can't be used.
576 Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
577 should be a zfs dataset name, like `device = "pool/data/set"`.
578 This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
579 '';
580 }
581 ];
582
583 boot = {
584 kernelModules = [ "zfs" ];
585 # https://github.com/openzfs/zfs/issues/260
586 # https://github.com/openzfs/zfs/issues/12842
587 # https://github.com/NixOS/nixpkgs/issues/106093
588 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
589
590 extraModulePackages = [
591 cfgZfs.modulePackage
592 ];
593 };
594
595 boot.initrd = mkIf inInitrd {
596 # spl has been removed in ≥ 2.2.0.
597 kernelModules = [ "zfs" ] ++ lib.optional (lib.versionOlder "2.2.0" version) "spl";
598 extraUtilsCommands =
599 mkIf (!config.boot.initrd.systemd.enable) ''
600 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
601 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
602 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
603 copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
604 copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
605 '';
606 extraUtilsCommandsTest =
607 mkIf (!config.boot.initrd.systemd.enable) ''
608 $out/bin/zfs --help >/dev/null 2>&1
609 $out/bin/zpool --help >/dev/null 2>&1
610 '';
611 postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable) (concatStringsSep "\n" ([''
612 ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
613 ''] ++ [(importLib {
614 # See comments at importLib definition.
615 zpoolCmd = "zpool";
616 awkCmd = "awk";
617 inherit cfgZfs;
618 })] ++ (map (pool: ''
619 echo -n "importing root ZFS pool \"${pool}\"..."
620 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
621 if ! poolImported "${pool}"; then
622 for trial in `seq 1 60`; do
623 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
624 sleep 1
625 echo -n .
626 done
627 echo
628 if [[ -n "$msg" ]]; then
629 echo "$msg";
630 fi
631 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
632 fi
633
634 ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
635
636 ${if isBool cfgZfs.requestEncryptionCredentials
637 then optionalString cfgZfs.requestEncryptionCredentials ''
638 zfs load-key -a
639 ''
640 else concatMapStrings (fs: ''
641 zfs load-key -- ${escapeShellArg fs}
642 '') (filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
643 '') rootPools)));
644
645 # Systemd in stage 1
646 systemd = mkIf config.boot.initrd.systemd.enable {
647 packages = [cfgZfs.package];
648 services = listToAttrs (map (pool: createImportService {
649 inherit pool;
650 systemd = config.boot.initrd.systemd.package;
651 force = cfgZfs.forceImportRoot;
652 prefix = "/sysroot";
653 }) rootPools);
654 targets.zfs-import.wantedBy = [ "zfs.target" ];
655 targets.zfs.wantedBy = [ "initrd.target" ];
656 extraBin = {
657 zpool = "${cfgZfs.package}/sbin/zpool";
658 zfs = "${cfgZfs.package}/sbin/zfs";
659 awk = "${pkgs.gawk}/bin/awk";
660 };
661 storePaths = [
662 "${cfgZfs.package}/lib/udev/vdev_id"
663 "${cfgZfs.package}/lib/udev/zvol_id"
664 ];
665 };
666 services.udev.packages = [cfgZfs.package]; # to hook zvol naming, in stage 1
667 };
668
669 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
670 exec ${cfgZfs.package}/bin/zpool sync
671 '';
672 systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
673
674 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
675 boot.loader.grub = mkIf (inInitrd || inSystem) {
676 zfsSupport = true;
677 zfsPackage = cfgZfs.package;
678 };
679
680 services.zfs.zed.settings = {
681 ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault (
682 config.security.wrapperDir + "/" +
683 config.services.mail.sendmailSetuidWrapper.program
684 ));
685 # subject in header for sendmail
686 ZED_EMAIL_OPTS = mkIf cfgZED.enableMail (mkDefault "@ADDRESS@");
687
688 PATH = lib.makeBinPath [
689 cfgZfs.package
690 pkgs.coreutils
691 pkgs.curl
692 pkgs.gawk
693 pkgs.gnugrep
694 pkgs.gnused
695 pkgs.nettools
696 pkgs.util-linux
697 ];
698 };
699
700 # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
701 services.udev.extraRules = ''
702 ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
703 '';
704
705 environment.etc = genAttrs
706 (map
707 (file: "zfs/zed.d/${file}")
708 [
709 "all-syslog.sh"
710 "pool_import-led.sh"
711 "resilver_finish-start-scrub.sh"
712 "statechange-led.sh"
713 "vdev_attach-led.sh"
714 "zed-functions.sh"
715 "data-notify.sh"
716 "resilver_finish-notify.sh"
717 "scrub_finish-notify.sh"
718 "statechange-notify.sh"
719 "vdev_clear-led.sh"
720 ]
721 )
722 (file: { source = "${cfgZfs.package}/etc/${file}"; })
723 // {
724 "zfs/zed.d/zed.rc".text = zedConf;
725 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
726 };
727
728 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
729 environment.systemPackages = [ cfgZfs.package ]
730 ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
731
732 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
733 systemd.packages = [ cfgZfs.package ];
734
735 systemd.services = let
736 createImportService' = pool: createImportService {
737 inherit pool;
738 systemd = config.systemd.package;
739 force = cfgZfs.forceImportAll;
740 };
741
742 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
743 # to sync=disabled.
744 createSyncService = pool:
745 nameValuePair "zfs-sync-${pool}" {
746 description = "Sync ZFS pool \"${pool}\"";
747 wantedBy = [ "shutdown.target" ];
748 unitConfig = {
749 DefaultDependencies = false;
750 };
751 serviceConfig = {
752 Type = "oneshot";
753 RemainAfterExit = true;
754 };
755 script = ''
756 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
757 '';
758 };
759
760 createZfsService = serv:
761 nameValuePair serv {
762 after = [ "systemd-modules-load.service" ];
763 wantedBy = [ "zfs.target" ];
764 };
765
766 in listToAttrs (map createImportService' dataPools ++
767 map createSyncService allPools ++
768 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
769
770 systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
771
772 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
773 })
774
775 (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
776 systemd.services."zpool-expand@" = {
777 description = "Expand ZFS pools";
778 after = [ "zfs.target" ];
779
780 serviceConfig = {
781 Type = "oneshot";
782 RemainAfterExit = true;
783 };
784
785 scriptArgs = "%i";
786 path = [ cfgZfs.package ];
787
788 script = ''
789 pool=$1
790
791 echo "Expanding all devices for $pool."
792
793 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
794 '';
795 };
796
797 systemd.services."zpool-expand-pools" =
798 let
799 # Create a string, to be interpolated in a bash script
800 # which enumerates all of the pools to expand.
801 # If the `pools` option is `true`, we want to dynamically
802 # expand every pool. Otherwise we want to enumerate
803 # just the specifically provided list of pools.
804 poolListProvider = if cfgExpandOnBoot == "all"
805 then "$(zpool list -H -o name)"
806 else lib.escapeShellArgs cfgExpandOnBoot;
807 in
808 {
809 description = "Expand specified ZFS pools";
810 wantedBy = [ "default.target" ];
811 after = [ "zfs.target" ];
812
813 serviceConfig = {
814 Type = "oneshot";
815 RemainAfterExit = true;
816 };
817
818 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
819
820 script = ''
821 for pool in ${poolListProvider}; do
822 systemctl start --no-block "zpool-expand@$pool"
823 done
824 '';
825 };
826 })
827
828 (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
829 systemd.services = let
830 descr = name: if name == "frequent" then "15 mins"
831 else if name == "hourly" then "hour"
832 else if name == "daily" then "day"
833 else if name == "weekly" then "week"
834 else if name == "monthly" then "month"
835 else throw "unknown snapshot name";
836 numSnapshots = name: builtins.getAttr name cfgSnapshots;
837 in builtins.listToAttrs (map (snapName:
838 {
839 name = "zfs-snapshot-${snapName}";
840 value = {
841 description = "ZFS auto-snapshotting every ${descr snapName}";
842 after = [ "zfs-import.target" ];
843 serviceConfig = {
844 Type = "oneshot";
845 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
846 };
847 restartIfChanged = false;
848 };
849 }) snapshotNames);
850
851 systemd.timers = let
852 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
853 in builtins.listToAttrs (map (snapName:
854 {
855 name = "zfs-snapshot-${snapName}";
856 value = {
857 wantedBy = [ "timers.target" ];
858 timerConfig = {
859 OnCalendar = timer snapName;
860 Persistent = "yes";
861 };
862 };
863 }) snapshotNames);
864 })
865
866 (mkIf (cfgZfs.enabled && cfgScrub.enable) {
867 systemd.services.zfs-scrub = {
868 description = "ZFS pools scrubbing";
869 after = [ "zfs-import.target" ];
870 serviceConfig = {
871 Type = "simple";
872 };
873 script = ''
874 ${cfgZfs.package}/bin/zpool scrub -w ${
875 if cfgScrub.pools != [] then
876 (concatStringsSep " " cfgScrub.pools)
877 else
878 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
879 }
880 '';
881 };
882
883 systemd.timers.zfs-scrub = {
884 wantedBy = [ "timers.target" ];
885 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
886 timerConfig = {
887 OnCalendar = cfgScrub.interval;
888 Persistent = "yes";
889 RandomizedDelaySec = cfgScrub.randomizedDelaySec;
890 };
891 };
892 })
893
894 (mkIf (cfgZfs.enabled && cfgTrim.enable) {
895 systemd.services.zpool-trim = {
896 description = "ZFS pools trim";
897 after = [ "zfs-import.target" ];
898 path = [ cfgZfs.package ];
899 startAt = cfgTrim.interval;
900 # By default we ignore errors returned by the trim command, in case:
901 # - HDDs are mixed with SSDs
902 # - There is a SSDs in a pool that is currently trimmed.
903 # - There are only HDDs and we would set the system in a degraded state
904 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
905 };
906
907 systemd.timers.zpool-trim.timerConfig = {
908 Persistent = "yes";
909 RandomizedDelaySec = cfgTrim.randomizedDelaySec;
910 };
911 })
912 ];
913}