···
1
-
import ./make-test-python.nix (
5
-
# the single node ipv6 address
6
-
ip = "2001:db8:ffff::";
7
-
# the global ceph cluster id
8
-
cluster = "54465b37-b9d8-4539-a1f9-dd33c75ee45a";
11
-
"0" = "1c1b7ea9-06bf-4d30-9a01-37ac3a0254aa";
12
-
"1" = "bd5a6f49-69d5-428c-ac25-a99f0c44375c";
13
-
"2" = "c90de6c7-86c6-41da-9694-e794096dfc5c";
4
+
# the single node ipv6 address
5
+
ip = "2001:db8:ffff::";
6
+
# the global ceph cluster id
7
+
cluster = "54465b37-b9d8-4539-a1f9-dd33c75ee45a";
10
+
"0" = "1c1b7ea9-06bf-4d30-9a01-37ac3a0254aa";
11
+
"1" = "bd5a6f49-69d5-428c-ac25-a99f0c44375c";
12
+
"2" = "c90de6c7-86c6-41da-9694-e794096dfc5c";
16
+
name = "basic-single-node-ceph-cluster-bluestore-dmcrypt";
17
+
meta.maintainers = with lib.maintainers; [
18
-
name = "basic-single-node-ceph-cluster-bluestore-dmcrypt";
20
-
maintainers = with lib.maintainers; [
30
+
# disks for bluestore
31
+
virtualisation.emptyDiskImages = [
28
-
{ pkgs, config, ... }:
30
-
# disks for bluestore
31
-
virtualisation.emptyDiskImages = [
37
-
# networking setup (no external connectivity required, only local IPv6)
38
-
networking.useDHCP = false;
37
+
# networking setup (no external connectivity required, only local IPv6)
38
+
networking.useDHCP = false;
41
+
wait-online.extraArgs = [
41
-
wait-online.extraArgs = [
50
-
addresses = [ { Address = "${ip}/128"; } ];
50
+
addresses = [ { Address = "${ip}/128"; } ];
55
-
# do not start the ceph target by default so we can format the disks first
56
-
systemd.targets.ceph.wantedBy = lib.mkForce [ ];
55
+
# do not start the ceph target by default so we can format the disks first
56
+
systemd.targets.ceph.wantedBy = lib.mkForce [ ];
58
-
# add the packages to systemPackages so the testscript doesn't run into any unexpected issues
59
-
# this shouldn't be required on production systems which have their required packages in the unit paths only
60
-
# but it helps in case one needs to actually run the tooling anyway
61
-
environment.systemPackages = with pkgs; [
58
+
# add the packages to systemPackages so the testscript doesn't run into any unexpected issues
59
+
# this shouldn't be required on production systems which have their required packages in the unit paths only
60
+
# but it helps in case one needs to actually run the tooling anyway
61
+
environment.systemPackages = with pkgs; [
69
-
client.enable = true;
74
-
ms_bind_ipv4 = "false";
75
-
ms_bind_ipv6 = "true";
77
-
ms_cluster_mode = "secure";
78
-
ms_service_mode = "secure";
79
-
ms_client_mode = "secure";
80
-
ms_mon_cluster_mode = "secure";
81
-
ms_mon_service_mode = "secure";
82
-
ms_mon_client_mode = "secure";
83
-
# less default modules, cuts down on memory and startup time in the tests
84
-
mgr_initial_modules = "";
85
-
# distribute by OSD, not by host, as per https://docs.ceph.com/en/reef/cephadm/install/#single-host
86
-
osd_crush_chooseleaf_type = "0";
88
-
client.extraConfig."mon.0" = {
90
-
mon_addr = "v2:[${ip}]:3300";
91
-
public_addr = "v2:[${ip}]:3300";
95
-
clusterNetwork = "${ip}/64";
96
-
publicNetwork = "${ip}/64";
97
-
monInitialMembers = "0";
69
+
client.enable = true;
74
+
ms_bind_ipv4 = "false";
75
+
ms_bind_ipv6 = "true";
77
+
ms_cluster_mode = "secure";
78
+
ms_service_mode = "secure";
79
+
ms_client_mode = "secure";
80
+
ms_mon_cluster_mode = "secure";
81
+
ms_mon_service_mode = "secure";
82
+
ms_mon_client_mode = "secure";
83
+
# less default modules, cuts down on memory and startup time in the tests
84
+
mgr_initial_modules = "";
85
+
# distribute by OSD, not by host, as per https://docs.ceph.com/en/reef/cephadm/install/#single-host
86
+
osd_crush_chooseleaf_type = "0";
88
+
client.extraConfig."mon.0" = {
90
+
mon_addr = "v2:[${ip}]:3300";
91
+
public_addr = "v2:[${ip}]:3300";
95
+
clusterNetwork = "${ip}/64";
96
+
publicNetwork = "${ip}/64";
97
+
monInitialMembers = "0";
107
-
daemons = builtins.attrNames osd-fsid-map;
107
+
daemons = builtins.attrNames osd-fsid-map;
112
-
daemons = [ "ceph" ];
112
+
daemons = [ "ceph" ];
118
-
osd-name = id: "ceph-osd-${id}";
119
-
osd-pre-start = id: [
120
-
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm activate --bluestore ${id} ${osd-fsid-map.${id}} --no-systemd"
121
-
"${config.services.ceph.osd.package.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${id} --cluster ${config.services.ceph.global.clusterName}"
123
-
osd-post-stop = id: [
124
-
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm deactivate ${id}"
118
+
osd-name = id: "ceph-osd-${id}";
119
+
osd-pre-start = id: [
120
+
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm activate --bluestore ${id} ${osd-fsid-map.${id}} --no-systemd"
121
+
"${config.services.ceph.osd.package.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${id} --cluster ${config.services.ceph.global.clusterName}"
123
+
osd-post-stop = id: [
124
+
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm deactivate ${id}"
127
+
name = osd-name id;
129
+
serviceConfig.ExecStartPre = lib.mkForce (osd-pre-start id);
130
+
serviceConfig.ExecStopPost = osd-post-stop id;
131
+
unitConfig.ConditionPathExists = lib.mkForce [ ];
132
+
unitConfig.StartLimitBurst = lib.mkForce 4;
133
+
path = with pkgs; [
127
-
name = osd-name id;
129
-
serviceConfig.ExecStartPre = lib.mkForce (osd-pre-start id);
130
-
serviceConfig.ExecStopPost = osd-post-stop id;
131
-
unitConfig.ConditionPathExists = lib.mkForce [ ];
132
-
unitConfig.StartLimitBurst = lib.mkForce 4;
133
-
path = with pkgs; [
141
-
lib.pipe config.services.ceph.osd.daemons [
142
-
(builtins.map map-osd)
143
-
builtins.listToAttrs
141
+
lib.pipe config.services.ceph.osd.daemons [
142
+
(builtins.map map-osd)
143
+
builtins.listToAttrs
153
-
ceph.wait_for_unit("default.target")
150
+
ceph.wait_for_unit("default.target")
155
-
# Bootstrap ceph-mon daemon
157
-
"mkdir -p /var/lib/ceph/bootstrap-osd",
158
-
"ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
159
-
"ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
160
-
"ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'",
161
-
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
162
-
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring",
163
-
"monmaptool --create --fsid ${cluster} --addv 0 'v2:[${ip}]:3300/0' --clobber /tmp/ceph.initial-monmap",
164
-
"mkdir -p /var/lib/ceph/mon/ceph-0",
165
-
"ceph-mon --mkfs -i 0 --monmap /tmp/ceph.initial-monmap --keyring /tmp/ceph.mon.keyring",
166
-
"chown ceph:ceph -R /tmp/ceph.mon.keyring /var/lib/ceph",
167
-
"systemctl start ceph-mon-0.service",
152
+
# Bootstrap ceph-mon daemon
154
+
"mkdir -p /var/lib/ceph/bootstrap-osd",
155
+
"ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
156
+
"ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
157
+
"ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'",
158
+
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
159
+
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring",
160
+
"monmaptool --create --fsid ${cluster} --addv 0 'v2:[${ip}]:3300/0' --clobber /tmp/ceph.initial-monmap",
161
+
"mkdir -p /var/lib/ceph/mon/ceph-0",
162
+
"ceph-mon --mkfs -i 0 --monmap /tmp/ceph.initial-monmap --keyring /tmp/ceph.mon.keyring",
163
+
"chown ceph:ceph -R /tmp/ceph.mon.keyring /var/lib/ceph",
164
+
"systemctl start ceph-mon-0.service",
170
-
ceph.wait_for_unit("ceph-mon-0.service")
171
-
# should the mon not start or bind for some reason this gives us a better error message than the config commands running into a timeout
172
-
ceph.wait_for_open_port(3300, "${ip}")
174
-
# required for HEALTH_OK
175
-
"ceph config set mon auth_allow_insecure_global_id_reclaim false",
177
-
"ceph config set global ms_bind_ipv4 false",
178
-
"ceph config set global ms_bind_ipv6 true",
179
-
# the new (secure) protocol
180
-
"ceph config set global ms_bind_msgr1 false",
181
-
"ceph config set global ms_bind_msgr2 true",
182
-
# just a small little thing
183
-
"ceph config set mon mon_compact_on_start true",
167
+
ceph.wait_for_unit("ceph-mon-0.service")
168
+
# should the mon not start or bind for some reason this gives us a better error message than the config commands running into a timeout
169
+
ceph.wait_for_open_port(3300, "${ip}")
171
+
# required for HEALTH_OK
172
+
"ceph config set mon auth_allow_insecure_global_id_reclaim false",
174
+
"ceph config set global ms_bind_ipv4 false",
175
+
"ceph config set global ms_bind_ipv6 true",
176
+
# the new (secure) protocol
177
+
"ceph config set global ms_bind_msgr1 false",
178
+
"ceph config set global ms_bind_msgr2 true",
179
+
# just a small little thing
180
+
"ceph config set mon mon_compact_on_start true",
186
-
# Can't check ceph status until a mon is up
187
-
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
183
+
# Can't check ceph status until a mon is up
184
+
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
189
-
# Bootstrap OSDs (do this before starting the mgr because cryptsetup and the mgr both eat a lot of memory)
191
-
# this will automatically do what's required for LVM, cryptsetup, and stores all the data in Ceph's internal databases
192
-
"ceph-volume lvm prepare --bluestore --data /dev/vdb --dmcrypt --no-systemd --osd-id 0 --osd-fsid ${osd-fsid-map."0"}",
193
-
"ceph-volume lvm prepare --bluestore --data /dev/vdc --dmcrypt --no-systemd --osd-id 1 --osd-fsid ${osd-fsid-map."1"}",
194
-
"ceph-volume lvm prepare --bluestore --data /dev/vdd --dmcrypt --no-systemd --osd-id 2 --osd-fsid ${osd-fsid-map."2"}",
195
-
"sudo ceph-volume lvm deactivate 0",
196
-
"sudo ceph-volume lvm deactivate 1",
197
-
"sudo ceph-volume lvm deactivate 2",
198
-
"chown -R ceph:ceph /var/lib/ceph",
186
+
# Bootstrap OSDs (do this before starting the mgr because cryptsetup and the mgr both eat a lot of memory)
188
+
# this will automatically do what's required for LVM, cryptsetup, and stores all the data in Ceph's internal databases
189
+
"ceph-volume lvm prepare --bluestore --data /dev/vdb --dmcrypt --no-systemd --osd-id 0 --osd-fsid ${osd-fsid-map."0"}",
190
+
"ceph-volume lvm prepare --bluestore --data /dev/vdc --dmcrypt --no-systemd --osd-id 1 --osd-fsid ${osd-fsid-map."1"}",
191
+
"ceph-volume lvm prepare --bluestore --data /dev/vdd --dmcrypt --no-systemd --osd-id 2 --osd-fsid ${osd-fsid-map."2"}",
192
+
"sudo ceph-volume lvm deactivate 0",
193
+
"sudo ceph-volume lvm deactivate 1",
194
+
"sudo ceph-volume lvm deactivate 2",
195
+
"chown -R ceph:ceph /var/lib/ceph",
201
-
# Start OSDs (again, argon2id eats memory, so this happens before starting the mgr)
203
-
"systemctl start ceph-osd-0.service",
204
-
"systemctl start ceph-osd-1.service",
205
-
"systemctl start ceph-osd-2.service",
207
-
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
208
-
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
198
+
# Start OSDs (again, argon2id eats memory, so this happens before starting the mgr)
200
+
"systemctl start ceph-osd-0.service",
201
+
"systemctl start ceph-osd-1.service",
202
+
"systemctl start ceph-osd-2.service",
204
+
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
205
+
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
210
-
# Start the ceph-mgr daemon, after copying in the keyring
212
-
"mkdir -p /var/lib/ceph/mgr/ceph-ceph/",
213
-
"ceph auth get-or-create -o /var/lib/ceph/mgr/ceph-ceph/keyring mgr.ceph mon 'allow profile mgr' osd 'allow *' mds 'allow *'",
214
-
"chown -R ceph:ceph /var/lib/ceph/mgr/ceph-ceph/",
215
-
"systemctl start ceph-mgr-ceph.service",
217
-
ceph.wait_for_unit("ceph-mgr-ceph")
218
-
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
219
-
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
220
-
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
221
-
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
207
+
# Start the ceph-mgr daemon, after copying in the keyring
209
+
"mkdir -p /var/lib/ceph/mgr/ceph-ceph/",
210
+
"ceph auth get-or-create -o /var/lib/ceph/mgr/ceph-ceph/keyring mgr.ceph mon 'allow profile mgr' osd 'allow *' mds 'allow *'",
211
+
"chown -R ceph:ceph /var/lib/ceph/mgr/ceph-ceph/",
212
+
"systemctl start ceph-mgr-ceph.service",
214
+
ceph.wait_for_unit("ceph-mgr-ceph")
215
+
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
216
+
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
217
+
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
218
+
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
223
-
# test the actual storage
225
-
"ceph osd pool create single-node-test 32 32",
226
-
"ceph osd pool ls | grep 'single-node-test'",
220
+
# test the actual storage
222
+
"ceph osd pool create single-node-test 32 32",
223
+
"ceph osd pool ls | grep 'single-node-test'",
228
-
# We need to enable an application on the pool, otherwise it will
229
-
# stay unhealthy in state POOL_APP_NOT_ENABLED.
230
-
# Creating a CephFS would do this automatically, but we haven't done that here.
231
-
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
232
-
# We use the custom application name "nixos-test" for this.
233
-
"ceph osd pool application enable single-node-test nixos-test",
225
+
# We need to enable an application on the pool, otherwise it will
226
+
# stay unhealthy in state POOL_APP_NOT_ENABLED.
227
+
# Creating a CephFS would do this automatically, but we haven't done that here.
228
+
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
229
+
# We use the custom application name "nixos-test" for this.
230
+
"ceph osd pool application enable single-node-test nixos-test",
235
-
"ceph osd pool rename single-node-test single-node-other-test",
236
-
"ceph osd pool ls | grep 'single-node-other-test'",
238
-
ceph.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
239
-
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
240
-
ceph.wait_until_succeeds("ceph -s | grep '33 active+clean'")
242
-
# the old pool should be gone
243
-
"ceph osd pool ls | grep 'multi-node-test'",
244
-
# deleting the pool should fail without setting mon_allow_pool_delete
245
-
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
232
+
"ceph osd pool rename single-node-test single-node-other-test",
233
+
"ceph osd pool ls | grep 'single-node-other-test'",
235
+
ceph.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
236
+
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
237
+
ceph.wait_until_succeeds("ceph -s | grep '33 active+clean'")
239
+
# the old pool should be gone
240
+
"ceph osd pool ls | grep 'multi-node-test'",
241
+
# deleting the pool should fail without setting mon_allow_pool_delete
242
+
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
248
-
# rebooting gets rid of any potential tmpfs mounts or device-mapper devices
251
-
ceph.wait_for_unit("default.target")
245
+
# rebooting gets rid of any potential tmpfs mounts or device-mapper devices
248
+
ceph.wait_for_unit("default.target")
253
-
# Start it up (again OSDs first due to memory constraints of cryptsetup and mgr)
254
-
ceph.systemctl("start ceph-mon-0.service")
255
-
ceph.wait_for_unit("ceph-mon-0")
256
-
ceph.systemctl("start ceph-osd-0.service")
257
-
ceph.wait_for_unit("ceph-osd-0")
258
-
ceph.systemctl("start ceph-osd-1.service")
259
-
ceph.wait_for_unit("ceph-osd-1")
260
-
ceph.systemctl("start ceph-osd-2.service")
261
-
ceph.wait_for_unit("ceph-osd-2")
262
-
ceph.systemctl("start ceph-mgr-ceph.service")
263
-
ceph.wait_for_unit("ceph-mgr-ceph")
250
+
# Start it up (again OSDs first due to memory constraints of cryptsetup and mgr)
251
+
ceph.systemctl("start ceph-mon-0.service")
252
+
ceph.wait_for_unit("ceph-mon-0")
253
+
ceph.systemctl("start ceph-osd-0.service")
254
+
ceph.wait_for_unit("ceph-osd-0")
255
+
ceph.systemctl("start ceph-osd-1.service")
256
+
ceph.wait_for_unit("ceph-osd-1")
257
+
ceph.systemctl("start ceph-osd-2.service")
258
+
ceph.wait_for_unit("ceph-osd-2")
259
+
ceph.systemctl("start ceph-mgr-ceph.service")
260
+
ceph.wait_for_unit("ceph-mgr-ceph")
265
-
# Ensure the cluster comes back up again
266
-
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
267
-
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
268
-
ceph.wait_until_succeeds("ceph osd stat | grep -E '3 osds: 3 up[^,]*, 3 in'")
269
-
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
270
-
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
262
+
# Ensure the cluster comes back up again
263
+
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
264
+
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
265
+
ceph.wait_until_succeeds("ceph osd stat | grep -E '3 osds: 3 up[^,]*, 3 in'")
266
+
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
267
+
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")