1{ system ? builtins.currentSystem,
2 config ? {},
3 pkgs ? import ../.. { inherit system config; }
4}:
5
6with import ../lib/testing-python.nix { inherit system pkgs; };
7
8let
9
10 makeZfsTest = name:
11 { kernelPackage ? if enableUnstable
12 then pkgs.zfsUnstable.latestCompatibleLinuxPackages
13 else pkgs.linuxPackages
14 , enableUnstable ? false
15 , extraTest ? ""
16 }:
17 makeTest {
18 name = "zfs-" + name;
19 meta = with pkgs.lib.maintainers; {
20 maintainers = [ adisbladis ];
21 };
22
23 nodes.machine = { pkgs, lib, ... }:
24 let
25 usersharePath = "/var/lib/samba/usershares";
26 in {
27 virtualisation.emptyDiskImages = [ 4096 ];
28 networking.hostId = "deadbeef";
29 boot.kernelPackages = kernelPackage;
30 boot.supportedFilesystems = [ "zfs" ];
31 boot.zfs.enableUnstable = enableUnstable;
32
33 services.samba = {
34 enable = true;
35 extraConfig = ''
36 registry shares = yes
37 usershare path = ${usersharePath}
38 usershare allow guests = yes
39 usershare max shares = 100
40 usershare owner only = no
41 '';
42 };
43 systemd.services.samba-smbd.serviceConfig.ExecStartPre =
44 "${pkgs.coreutils}/bin/mkdir -m +t -p ${usersharePath}";
45
46 environment.systemPackages = [ pkgs.parted ];
47
48 # Setup regular fileSystems machinery to ensure forceImportAll can be
49 # tested via the regular service units.
50 virtualisation.fileSystems = {
51 "/forcepool" = {
52 device = "forcepool";
53 fsType = "zfs";
54 options = [ "noauto" ];
55 };
56 };
57
58 # forcepool doesn't exist at first boot, and we need to manually test
59 # the import after tweaking the hostId.
60 systemd.services.zfs-import-forcepool.wantedBy = lib.mkVMOverride [];
61 systemd.targets.zfs.wantedBy = lib.mkVMOverride [];
62 boot.zfs.forceImportAll = true;
63 # /dev/disk/by-id doesn't get populated in the NixOS test framework
64 boot.zfs.devNodes = "/dev/disk/by-uuid";
65 };
66
67 testScript = ''
68 machine.succeed(
69 "modprobe zfs",
70 "zpool status",
71 "ls /dev",
72 "mkdir /tmp/mnt",
73 "udevadm settle",
74 "parted --script /dev/vdb mklabel msdos",
75 "parted --script /dev/vdb -- mkpart primary 1024M -1s",
76 "udevadm settle",
77 "zpool create rpool /dev/vdb1",
78 "zfs create -o mountpoint=legacy rpool/root",
79 # shared datasets cannot have legacy mountpoint
80 "zfs create rpool/shared_smb",
81 "mount -t zfs rpool/root /tmp/mnt",
82 "udevadm settle",
83 # wait for samba services
84 "systemctl is-system-running --wait",
85 "zfs set sharesmb=on rpool/shared_smb",
86 "zfs share rpool/shared_smb",
87 "smbclient -gNL localhost | grep rpool_shared_smb",
88 "umount /tmp/mnt",
89 "zpool destroy rpool",
90 "udevadm settle",
91 )
92
93 machine.succeed(
94 'echo password | zpool create -o altroot="/tmp/mnt" '
95 + "-O encryption=aes-256-gcm -O keyformat=passphrase rpool /dev/vdb1",
96 "zfs create -o mountpoint=legacy rpool/root",
97 "mount -t zfs rpool/root /tmp/mnt",
98 "udevadm settle",
99 "umount /tmp/mnt",
100 "zpool destroy rpool",
101 "udevadm settle",
102 )
103
104 with subtest("boot.zfs.forceImportAll works"):
105 machine.succeed(
106 "rm /etc/hostid",
107 "zgenhostid deadcafe",
108 "zpool create forcepool /dev/vdb1 -O mountpoint=legacy",
109 )
110 machine.shutdown()
111 machine.start()
112 machine.succeed("udevadm settle")
113 machine.fail("zpool import forcepool")
114 machine.succeed(
115 "systemctl start zfs-import-forcepool.service",
116 "mount -t zfs forcepool /tmp/mnt",
117 )
118 '' + extraTest;
119
120 };
121
122
123in {
124
125 stable = makeZfsTest "stable" { };
126
127 unstable = makeZfsTest "unstable" {
128 enableUnstable = true;
129 };
130
131 installer = (import ./installer.nix { }).zfsroot;
132
133 expand-partitions = makeTest {
134 name = "multi-disk-zfs";
135 nodes = {
136 machine = { pkgs, ... }: {
137 environment.systemPackages = [ pkgs.parted ];
138 boot.supportedFilesystems = [ "zfs" ];
139 networking.hostId = "00000000";
140
141 virtualisation = {
142 emptyDiskImages = [ 20480 20480 20480 20480 20480 20480 ];
143 };
144
145 specialisation.resize.configuration = {
146 services.zfs.expandOnBoot = [ "tank" ];
147 };
148 };
149 };
150
151 testScript = { nodes, ... }:
152 ''
153 start_all()
154 machine.wait_for_unit("default.target")
155 print(machine.succeed('mount'))
156
157 print(machine.succeed('parted --script /dev/vdb -- mklabel gpt'))
158 print(machine.succeed('parted --script /dev/vdb -- mkpart primary 1M 70M'))
159
160 print(machine.succeed('parted --script /dev/vdc -- mklabel gpt'))
161 print(machine.succeed('parted --script /dev/vdc -- mkpart primary 1M 70M'))
162
163 print(machine.succeed('zpool create tank mirror /dev/vdb1 /dev/vdc1 mirror /dev/vdd /dev/vde mirror /dev/vdf /dev/vdg'))
164 print(machine.succeed('zpool list -v'))
165 print(machine.succeed('mount'))
166 start_size = int(machine.succeed('df -k --output=size /tank | tail -n1').strip())
167
168 print(machine.succeed("/run/current-system/specialisation/resize/bin/switch-to-configuration test >&2"))
169 machine.wait_for_unit("zpool-expand-pools.service")
170 machine.wait_for_unit("zpool-expand@tank.service")
171
172 print(machine.succeed('zpool list -v'))
173 new_size = int(machine.succeed('df -k --output=size /tank | tail -n1').strip())
174
175 if (new_size - start_size) > 20000000:
176 print("Disk grew appropriately.")
177 else:
178 print(f"Disk went from {start_size} to {new_size}, which doesn't seem right.")
179 exit(1)
180 '';
181 };
182}