1{ config, lib, pkgs, utils, ... }:
2#
3# TODO: zfs tunables
4
5with utils;
6with lib;
7
8let
9
10 cfgZfs = config.boot.zfs;
11 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
12 cfgSnapshots = config.services.zfs.autoSnapshot;
13 cfgSnapFlags = cfgSnapshots.flags;
14 cfgScrub = config.services.zfs.autoScrub;
15 cfgTrim = config.services.zfs.trim;
16 cfgZED = config.services.zfs.zed;
17
18 inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
19 inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
20
21 autosnapPkg = pkgs.zfstools.override {
22 zfs = cfgZfs.package;
23 };
24
25 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
26
27 datasetToPool = x: elemAt (splitString "/" x) 0;
28
29 fsToPool = fs: datasetToPool fs.device;
30
31 zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
32
33 allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
34
35 rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
36
37 dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
38
39 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
40
41 # When importing ZFS pools, there's one difficulty: These scripts may run
42 # before the backing devices (physical HDDs, etc.) of the pool have been
43 # scanned and initialized.
44 #
45 # An attempted import with all devices missing will just fail, and can be
46 # retried, but an import where e.g. two out of three disks in a three-way
47 # mirror are missing, will succeed. This is a problem: When the missing disks
48 # are later discovered, they won't be automatically set online, rendering the
49 # pool redundancy-less (and far slower) until such time as the system reboots.
50 #
51 # The solution is the below. poolReady checks the status of an un-imported
52 # pool, to see if *every* device is available -- in which case the pool will be
53 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
54 #
55 # The import scripts then loop over this, waiting until the pool is ready or a
56 # sufficient amount of time has passed that we can assume it won't be. In the
57 # latter case it makes one last attempt at importing, allowing the system to
58 # (eventually) boot even with a degraded pool.
59 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
60 poolReady() {
61 pool="$1"
62 state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
63 if [[ "$state" = "ONLINE" ]]; then
64 return 0
65 else
66 echo "Pool $pool in state $state, waiting"
67 return 1
68 fi
69 }
70 poolImported() {
71 pool="$1"
72 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
73 }
74 poolImport() {
75 pool="$1"
76 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
77 }
78 '';
79
80 zedConf = generators.toKeyValue {
81 mkKeyValue = generators.mkKeyValueDefault {
82 mkValueString = v:
83 if isInt v then toString v
84 else if isString v then "\"${v}\""
85 else if true == v then "1"
86 else if false == v then "0"
87 else if isList v then "\"" + (concatStringsSep " " v) + "\""
88 else err "this value is" (toString v);
89 } "=";
90 } cfgZED.settings;
91in
92
93{
94
95 imports = [
96 (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
97 ];
98
99 ###### interface
100
101 options = {
102 boot.zfs = {
103 package = mkOption {
104 readOnly = true;
105 type = types.package;
106 default = if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs;
107 defaultText = literalExpression "if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs";
108 description = "Configured ZFS userland tools package.";
109 };
110
111 enabled = mkOption {
112 readOnly = true;
113 type = types.bool;
114 default = inInitrd || inSystem;
115 description = "True if ZFS filesystem support is enabled";
116 };
117
118 enableUnstable = mkOption {
119 type = types.bool;
120 default = false;
121 description = ''
122 Use the unstable zfs package. This might be an option, if the latest
123 kernel is not yet supported by a published release of ZFS. Enabling
124 this option will install a development version of ZFS on Linux. The
125 version will have already passed an extensive test suite, but it is
126 more likely to hit an undiscovered bug compared to running a released
127 version of ZFS on Linux.
128 '';
129 };
130
131 extraPools = mkOption {
132 type = types.listOf types.str;
133 default = [];
134 example = [ "tank" "data" ];
135 description = ''
136 Name or GUID of extra ZFS pools that you wish to import during boot.
137
138 Usually this is not necessary. Instead, you should set the mountpoint property
139 of ZFS filesystems to <literal>legacy</literal> and add the ZFS filesystems to
140 NixOS's <option>fileSystems</option> option, which makes NixOS automatically
141 import the associated pool.
142
143 However, in some cases (e.g. if you have many filesystems) it may be preferable
144 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
145 will not be managing those filesystems, you will need to specify the ZFS pool here
146 so that NixOS automatically imports it on every boot.
147 '';
148 };
149
150 devNodes = mkOption {
151 type = types.path;
152 default = "/dev/disk/by-id";
153 description = ''
154 Name of directory from which to import ZFS devices.
155
156 This should be a path under /dev containing stable names for all devices needed, as
157 import may fail if device nodes are renamed concurrently with a device failing.
158 '';
159 };
160
161 forceImportRoot = mkOption {
162 type = types.bool;
163 default = true;
164 description = ''
165 Forcibly import the ZFS root pool(s) during early boot.
166
167 This is enabled by default for backwards compatibility purposes, but it is highly
168 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
169 to protect your ZFS pools.
170
171 If you set this option to <literal>false</literal> and NixOS subsequently fails to
172 boot because it cannot import the root pool, you should boot with the
173 <literal>zfs_force=1</literal> option as a kernel parameter (e.g. by manually
174 editing the kernel params in grub during boot). You should only need to do this
175 once.
176 '';
177 };
178
179 forceImportAll = mkOption {
180 type = types.bool;
181 default = false;
182 description = ''
183 Forcibly import all ZFS pool(s).
184
185 If you set this option to <literal>false</literal> and NixOS subsequently fails to
186 import your non-root ZFS pool(s), you should manually import each pool with
187 "zpool import -f <pool-name>", and then reboot. You should only need to do
188 this once.
189 '';
190 };
191
192 requestEncryptionCredentials = mkOption {
193 type = types.either types.bool (types.listOf types.str);
194 default = true;
195 example = [ "tank" "data" ];
196 description = ''
197 If true on import encryption keys or passwords for all encrypted datasets
198 are requested. To only decrypt selected datasets supply a list of dataset
199 names instead. For root pools the encryption key can be supplied via both
200 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
201 '';
202 };
203 };
204
205 services.zfs.autoSnapshot = {
206 enable = mkOption {
207 default = false;
208 type = types.bool;
209 description = ''
210 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
211 Note that you must set the <literal>com.sun:auto-snapshot</literal>
212 property to <literal>true</literal> on all datasets which you wish
213 to auto-snapshot.
214
215 You can override a child dataset to use, or not use auto-snapshotting
216 by setting its flag with the given interval:
217 <literal>zfs set com.sun:auto-snapshot:weekly=false DATASET</literal>
218 '';
219 };
220
221 flags = mkOption {
222 default = "-k -p";
223 example = "-k -p --utc";
224 type = types.str;
225 description = ''
226 Flags to pass to the zfs-auto-snapshot command.
227
228 Run <literal>zfs-auto-snapshot</literal> (without any arguments) to
229 see available flags.
230
231 If it's not too inconvenient for snapshots to have timestamps in UTC,
232 it is suggested that you append <literal>--utc</literal> to the list
233 of default options (see example).
234
235 Otherwise, snapshot names can cause name conflicts or apparent time
236 reversals due to daylight savings, timezone or other date/time changes.
237 '';
238 };
239
240 frequent = mkOption {
241 default = 4;
242 type = types.int;
243 description = ''
244 Number of frequent (15-minute) auto-snapshots that you wish to keep.
245 '';
246 };
247
248 hourly = mkOption {
249 default = 24;
250 type = types.int;
251 description = ''
252 Number of hourly auto-snapshots that you wish to keep.
253 '';
254 };
255
256 daily = mkOption {
257 default = 7;
258 type = types.int;
259 description = ''
260 Number of daily auto-snapshots that you wish to keep.
261 '';
262 };
263
264 weekly = mkOption {
265 default = 4;
266 type = types.int;
267 description = ''
268 Number of weekly auto-snapshots that you wish to keep.
269 '';
270 };
271
272 monthly = mkOption {
273 default = 12;
274 type = types.int;
275 description = ''
276 Number of monthly auto-snapshots that you wish to keep.
277 '';
278 };
279 };
280
281 services.zfs.trim = {
282 enable = mkOption {
283 description = "Whether to enable periodic TRIM on all ZFS pools.";
284 default = true;
285 example = false;
286 type = types.bool;
287 };
288
289 interval = mkOption {
290 default = "weekly";
291 type = types.str;
292 example = "daily";
293 description = ''
294 How often we run trim. For most desktop and server systems
295 a sufficient trimming frequency is once a week.
296
297 The format is described in
298 <citerefentry><refentrytitle>systemd.time</refentrytitle>
299 <manvolnum>7</manvolnum></citerefentry>.
300 '';
301 };
302 };
303
304 services.zfs.autoScrub = {
305 enable = mkEnableOption "periodic scrubbing of ZFS pools";
306
307 interval = mkOption {
308 default = "Sun, 02:00";
309 type = types.str;
310 example = "daily";
311 description = ''
312 Systemd calendar expression when to scrub ZFS pools. See
313 <citerefentry><refentrytitle>systemd.time</refentrytitle>
314 <manvolnum>7</manvolnum></citerefentry>.
315 '';
316 };
317
318 pools = mkOption {
319 default = [];
320 type = types.listOf types.str;
321 example = [ "tank" ];
322 description = ''
323 List of ZFS pools to periodically scrub. If empty, all pools
324 will be scrubbed.
325 '';
326 };
327 };
328
329 services.zfs.expandOnBoot = mkOption {
330 type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
331 default = "disabled";
332 example = [ "tank" "dozer" ];
333 description = ''
334 After importing, expand each device in the specified pools.
335
336 Set the value to the plain string "all" to expand all pools on boot:
337
338 services.zfs.expandOnBoot = "all";
339
340 or set the value to a list of pools to expand the disks of specific pools:
341
342 services.zfs.expandOnBoot = [ "tank" "dozer" ];
343 '';
344 };
345
346 services.zfs.zed = {
347 enableMail = mkEnableOption "ZED's ability to send emails" // {
348 default = cfgZfs.package.enableMail;
349 };
350
351 settings = mkOption {
352 type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
353 example = literalExpression ''
354 {
355 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
356
357 ZED_EMAIL_ADDR = [ "root" ];
358 ZED_EMAIL_PROG = "mail";
359 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
360
361 ZED_NOTIFY_INTERVAL_SECS = 3600;
362 ZED_NOTIFY_VERBOSE = false;
363
364 ZED_USE_ENCLOSURE_LEDS = true;
365 ZED_SCRUB_AFTER_RESILVER = false;
366 }
367 '';
368 description = ''
369 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
370
371 See
372 <citerefentry><refentrytitle>zed</refentrytitle><manvolnum>8</manvolnum></citerefentry>
373 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
374 '';
375 };
376 };
377 };
378
379 ###### implementation
380
381 config = mkMerge [
382 (mkIf cfgZfs.enabled {
383 assertions = [
384 {
385 assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
386 message = ''
387 To allow ZED to send emails, ZFS needs to be configured to enable
388 this. To do so, one must override the `zfs` package and set
389 `enableMail` to true.
390 '';
391 }
392 {
393 assertion = config.networking.hostId != null;
394 message = "ZFS requires networking.hostId to be set";
395 }
396 {
397 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
398 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
399 }
400 ];
401
402 boot = {
403 kernelModules = [ "zfs" ];
404
405 extraModulePackages = [
406 (if config.boot.zfs.enableUnstable then
407 config.boot.kernelPackages.zfsUnstable
408 else
409 config.boot.kernelPackages.zfs)
410 ];
411 };
412
413 boot.initrd = mkIf inInitrd {
414 kernelModules = [ "zfs" ] ++ optional (!cfgZfs.enableUnstable) "spl";
415 extraUtilsCommands =
416 ''
417 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
418 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
419 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
420 '';
421 extraUtilsCommandsTest = mkIf inInitrd
422 ''
423 $out/bin/zfs --help >/dev/null 2>&1
424 $out/bin/zpool --help >/dev/null 2>&1
425 '';
426 postDeviceCommands = concatStringsSep "\n" ([''
427 ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
428
429 for o in $(cat /proc/cmdline); do
430 case $o in
431 zfs_force|zfs_force=1)
432 ZFS_FORCE="-f"
433 ;;
434 esac
435 done
436 ''] ++ [(importLib {
437 # See comments at importLib definition.
438 zpoolCmd = "zpool";
439 awkCmd = "awk";
440 inherit cfgZfs;
441 })] ++ (map (pool: ''
442 echo -n "importing root ZFS pool \"${pool}\"..."
443 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
444 if ! poolImported "${pool}"; then
445 for trial in `seq 1 60`; do
446 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
447 sleep 1
448 echo -n .
449 done
450 echo
451 if [[ -n "$msg" ]]; then
452 echo "$msg";
453 fi
454 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
455 fi
456 ${if isBool cfgZfs.requestEncryptionCredentials
457 then optionalString cfgZfs.requestEncryptionCredentials ''
458 zfs load-key -a
459 ''
460 else concatMapStrings (fs: ''
461 zfs load-key ${fs}
462 '') cfgZfs.requestEncryptionCredentials}
463 '') rootPools));
464 };
465
466 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
467 boot.loader.grub = mkIf (inInitrd || inSystem) {
468 zfsSupport = true;
469 };
470
471 services.zfs.zed.settings = {
472 ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
473 PATH = lib.makeBinPath [
474 cfgZfs.package
475 pkgs.coreutils
476 pkgs.curl
477 pkgs.gawk
478 pkgs.gnugrep
479 pkgs.gnused
480 pkgs.nettools
481 pkgs.util-linux
482 ];
483 };
484
485 environment.etc = genAttrs
486 (map
487 (file: "zfs/zed.d/${file}")
488 [
489 "all-syslog.sh"
490 "pool_import-led.sh"
491 "resilver_finish-start-scrub.sh"
492 "statechange-led.sh"
493 "vdev_attach-led.sh"
494 "zed-functions.sh"
495 "data-notify.sh"
496 "resilver_finish-notify.sh"
497 "scrub_finish-notify.sh"
498 "statechange-notify.sh"
499 "vdev_clear-led.sh"
500 ]
501 )
502 (file: { source = "${cfgZfs.package}/etc/${file}"; })
503 // {
504 "zfs/zed.d/zed.rc".text = zedConf;
505 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
506 };
507
508 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
509 environment.systemPackages = [ cfgZfs.package ]
510 ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
511
512 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
513 systemd.packages = [ cfgZfs.package ];
514
515 systemd.services = let
516 getPoolFilesystems = pool:
517 filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
518
519 getPoolMounts = pool:
520 let
521 mountPoint = fs: escapeSystemdPath fs.mountPoint;
522 in
523 map (x: "${mountPoint x}.mount") (getPoolFilesystems pool);
524
525 createImportService = pool:
526 nameValuePair "zfs-import-${pool}" {
527 description = "Import ZFS pool \"${pool}\"";
528 # we need systemd-udev-settle until https://github.com/zfsonlinux/zfs/pull/4943 is merged
529 requires = [ "systemd-udev-settle.service" ];
530 after = [
531 "systemd-udev-settle.service"
532 "systemd-modules-load.service"
533 "systemd-ask-password-console.service"
534 ];
535 wantedBy = (getPoolMounts pool) ++ [ "local-fs.target" ];
536 before = (getPoolMounts pool) ++ [ "local-fs.target" ];
537 unitConfig = {
538 DefaultDependencies = "no";
539 };
540 serviceConfig = {
541 Type = "oneshot";
542 RemainAfterExit = true;
543 };
544 environment.ZFS_FORCE = optionalString cfgZfs.forceImportAll "-f";
545 script = (importLib {
546 # See comments at importLib definition.
547 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
548 awkCmd = "${pkgs.gawk}/bin/awk";
549 inherit cfgZfs;
550 }) + ''
551 poolImported "${pool}" && exit
552 echo -n "importing ZFS pool \"${pool}\"..."
553 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
554 for trial in `seq 1 60`; do
555 poolReady "${pool}" && poolImport "${pool}" && break
556 sleep 1
557 done
558 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
559 if poolImported "${pool}"; then
560 ${optionalString (if isBool cfgZfs.requestEncryptionCredentials
561 then cfgZfs.requestEncryptionCredentials
562 else cfgZfs.requestEncryptionCredentials != []) ''
563 ${cfgZfs.package}/sbin/zfs list -rHo name,keylocation ${pool} | while IFS=$'\t' read ds kl; do
564 {
565 ${optionalString (!isBool cfgZfs.requestEncryptionCredentials) ''
566 if ! echo '${concatStringsSep "\n" cfgZfs.requestEncryptionCredentials}' | grep -qFx "$ds"; then
567 continue
568 fi
569 ''}
570 case "$kl" in
571 none )
572 ;;
573 prompt )
574 ${config.systemd.package}/bin/systemd-ask-password "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds"
575 ;;
576 * )
577 ${cfgZfs.package}/sbin/zfs load-key "$ds"
578 ;;
579 esac
580 } < /dev/null # To protect while read ds kl in case anything reads stdin
581 done
582 ''}
583 echo "Successfully imported ${pool}"
584 else
585 exit 1
586 fi
587 '';
588 };
589
590 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
591 # to sync=disabled.
592 createSyncService = pool:
593 nameValuePair "zfs-sync-${pool}" {
594 description = "Sync ZFS pool \"${pool}\"";
595 wantedBy = [ "shutdown.target" ];
596 unitConfig = {
597 DefaultDependencies = false;
598 };
599 serviceConfig = {
600 Type = "oneshot";
601 RemainAfterExit = true;
602 };
603 script = ''
604 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
605 '';
606 };
607
608 createZfsService = serv:
609 nameValuePair serv {
610 after = [ "systemd-modules-load.service" ];
611 wantedBy = [ "zfs.target" ];
612 };
613
614 in listToAttrs (map createImportService dataPools ++
615 map createSyncService allPools ++
616 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
617
618 systemd.targets.zfs-import =
619 let
620 services = map (pool: "zfs-import-${pool}.service") dataPools;
621 in
622 {
623 requires = services;
624 after = services;
625 wantedBy = [ "zfs.target" ];
626 };
627
628 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
629 })
630
631 (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
632 systemd.services."zpool-expand@" = {
633 description = "Expand ZFS pools";
634 after = [ "zfs.target" ];
635
636 serviceConfig = {
637 Type = "oneshot";
638 RemainAfterExit = true;
639 };
640
641 scriptArgs = "%i";
642 path = [ pkgs.gawk cfgZfs.package ];
643
644 # ZFS has no way of enumerating just devices in a pool in a way
645 # that 'zpool online -e' supports. Thus, we've implemented a
646 # bit of a strange approach of highlighting just devices.
647 # See: https://github.com/openzfs/zfs/issues/12505
648 script = let
649 # This UUID has been chosen at random and is to provide a
650 # collision-proof, predictable token to search for
651 magicIdentifier = "NIXOS-ZFS-ZPOOL-DEVICE-IDENTIFIER-37108bec-aff6-4b58-9e5e-53c7c9766f05";
652 zpoolScripts = pkgs.writeShellScriptBin "device-highlighter" ''
653 echo "${magicIdentifier}"
654 '';
655 in ''
656 pool=$1
657
658 echo "Expanding all devices for $pool."
659
660 # Put our device-highlighter script it to the PATH
661 export ZPOOL_SCRIPTS_PATH=${zpoolScripts}/bin
662
663 # Enable running our precisely specified zpool script as root
664 export ZPOOL_SCRIPTS_AS_ROOT=1
665
666 devices() (
667 zpool status -c device-highlighter "$pool" \
668 | awk '($2 == "ONLINE" && $6 == "${magicIdentifier}") { print $1; }'
669 )
670
671 for device in $(devices); do
672 echo "Attempting to expand $device of $pool..."
673 if ! zpool online -e "$pool" "$device"; then
674 echo "Failed to expand '$device' of '$pool'."
675 fi
676 done
677 '';
678 };
679
680 systemd.services."zpool-expand-pools" =
681 let
682 # Create a string, to be interpolated in a bash script
683 # which enumerates all of the pools to expand.
684 # If the `pools` option is `true`, we want to dynamically
685 # expand every pool. Otherwise we want to enumerate
686 # just the specifically provided list of pools.
687 poolListProvider = if cfgExpandOnBoot == "all"
688 then "$(zpool list -H | awk '{print $1}')"
689 else lib.escapeShellArgs cfgExpandOnBoot;
690 in
691 {
692 description = "Expand specified ZFS pools";
693 wantedBy = [ "default.target" ];
694 after = [ "zfs.target" ];
695
696 serviceConfig = {
697 Type = "oneshot";
698 RemainAfterExit = true;
699 };
700
701 path = [ pkgs.gawk cfgZfs.package ];
702
703 script = ''
704 for pool in ${poolListProvider}; do
705 systemctl start --no-block "zpool-expand@$pool"
706 done
707 '';
708 };
709 })
710
711 (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
712 systemd.services = let
713 descr = name: if name == "frequent" then "15 mins"
714 else if name == "hourly" then "hour"
715 else if name == "daily" then "day"
716 else if name == "weekly" then "week"
717 else if name == "monthly" then "month"
718 else throw "unknown snapshot name";
719 numSnapshots = name: builtins.getAttr name cfgSnapshots;
720 in builtins.listToAttrs (map (snapName:
721 {
722 name = "zfs-snapshot-${snapName}";
723 value = {
724 description = "ZFS auto-snapshotting every ${descr snapName}";
725 after = [ "zfs-import.target" ];
726 serviceConfig = {
727 Type = "oneshot";
728 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
729 };
730 restartIfChanged = false;
731 };
732 }) snapshotNames);
733
734 systemd.timers = let
735 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
736 in builtins.listToAttrs (map (snapName:
737 {
738 name = "zfs-snapshot-${snapName}";
739 value = {
740 wantedBy = [ "timers.target" ];
741 timerConfig = {
742 OnCalendar = timer snapName;
743 Persistent = "yes";
744 };
745 };
746 }) snapshotNames);
747 })
748
749 (mkIf (cfgZfs.enabled && cfgScrub.enable) {
750 systemd.services.zfs-scrub = {
751 description = "ZFS pools scrubbing";
752 after = [ "zfs-import.target" ];
753 serviceConfig = {
754 Type = "oneshot";
755 };
756 script = ''
757 ${cfgZfs.package}/bin/zpool scrub ${
758 if cfgScrub.pools != [] then
759 (concatStringsSep " " cfgScrub.pools)
760 else
761 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
762 }
763 '';
764 };
765
766 systemd.timers.zfs-scrub = {
767 wantedBy = [ "timers.target" ];
768 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
769 timerConfig = {
770 OnCalendar = cfgScrub.interval;
771 Persistent = "yes";
772 };
773 };
774 })
775
776 (mkIf (cfgZfs.enabled && cfgTrim.enable) {
777 systemd.services.zpool-trim = {
778 description = "ZFS pools trim";
779 after = [ "zfs-import.target" ];
780 path = [ cfgZfs.package ];
781 startAt = cfgTrim.interval;
782 # By default we ignore errors returned by the trim command, in case:
783 # - HDDs are mixed with SSDs
784 # - There is a SSDs in a pool that is currently trimmed.
785 # - There are only HDDs and we would set the system in a degraded state
786 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
787 };
788
789 systemd.timers.zpool-trim.timerConfig.Persistent = "yes";
790 })
791 ];
792}