nixosTests: handleTest -> runTest, batch 2 (#414711)

Aleksana 019723d6 d36d50ad

+60 -39
nixos/tests/all-tests.nix
···
inherit runTest;
package = pkgs.bird3;
};
-
birdwatcher = handleTest ./birdwatcher.nix { };
+
birdwatcher = runTest ./birdwatcher.nix;
bitbox-bridge = runTest ./bitbox-bridge.nix;
bitcoind = runTest ./bitcoind.nix;
bittorrent = runTest ./bittorrent.nix;
blockbook-frontend = runTest ./blockbook-frontend.nix;
-
blocky = handleTest ./blocky.nix { };
+
blocky = runTest ./blocky.nix;
bookstack = runTest ./bookstack.nix;
boot = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./boot.nix { };
bootspec = handleTestOn [ "x86_64-linux" ] ./bootspec.nix { };
···
borgbackup = runTest ./borgbackup.nix;
borgmatic = runTest ./borgmatic.nix;
botamusique = runTest ./botamusique.nix;
-
bpf = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./bpf.nix { };
+
bpf = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./bpf.nix;
bpftune = runTest ./bpftune.nix;
breitbandmessung = runTest ./breitbandmessung.nix;
broadcast-box = runTest ./broadcast-box.nix;
···
buildkite-agents = runTest ./buildkite-agents.nix;
c2fmzq = runTest ./c2fmzq.nix;
caddy = runTest ./caddy.nix;
-
cadvisor = handleTestOn [ "x86_64-linux" ] ./cadvisor.nix { };
+
cadvisor = runTestOn [ "x86_64-linux" ] ./cadvisor.nix;
cage = runTest ./cage.nix;
cagebreak = runTest ./cagebreak.nix;
calibre-web = runTest ./calibre-web.nix;
calibre-server = import ./calibre-server.nix { inherit pkgs runTest; };
canaille = runTest ./canaille.nix;
castopod = runTest ./castopod.nix;
-
cassandra_4 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_4; };
+
cassandra = runTest {
+
imports = [ ./cassandra.nix ];
+
_module.args.getPackage = pkgs: pkgs.cassandra;
+
};
centrifugo = runTest ./centrifugo.nix;
-
ceph-multi-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix { };
-
ceph-single-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node.nix { };
-
ceph-single-node-bluestore = handleTestOn [
+
ceph-multi-node = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix;
+
ceph-single-node = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node.nix;
+
ceph-single-node-bluestore = runTestOn [
"aarch64-linux"
"x86_64-linux"
-
] ./ceph-single-node-bluestore.nix { };
-
ceph-single-node-bluestore-dmcrypt = handleTestOn [
+
] ./ceph-single-node-bluestore.nix;
+
ceph-single-node-bluestore-dmcrypt = runTestOn [
"aarch64-linux"
"x86_64-linux"
-
] ./ceph-single-node-bluestore-dmcrypt.nix { };
-
certmgr = handleTest ./certmgr.nix { };
-
cfssl = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./cfssl.nix { };
+
] ./ceph-single-node-bluestore-dmcrypt.nix;
+
certmgr = import ./certmgr.nix { inherit pkgs runTest; };
+
cfssl = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./cfssl.nix;
cgit = runTest ./cgit.nix;
charliecloud = runTest ./charliecloud.nix;
chromadb = runTest ./chromadb.nix;
chromium = (handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./chromium.nix { }).stable or { };
-
chrony = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./chrony.nix { };
-
chrony-ptp = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./chrony-ptp.nix { };
+
chrony = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./chrony.nix;
+
chrony-ptp = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./chrony-ptp.nix;
cinnamon = runTest ./cinnamon.nix;
cinnamon-wayland = runTest ./cinnamon-wayland.nix;
cjdns = runTest ./cjdns.nix;
clatd = runTest ./clatd.nix;
clickhouse = import ./clickhouse { inherit runTest; };
-
cloud-init = handleTest ./cloud-init.nix { };
-
cloud-init-hostname = handleTest ./cloud-init-hostname.nix { };
+
cloud-init = runTest ./cloud-init.nix;
+
cloud-init-hostname = runTest ./cloud-init-hostname.nix;
cloudlog = runTest ./cloudlog.nix;
-
cntr = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./cntr.nix { };
+
cntr = import ./cntr.nix {
+
inherit (pkgs) lib;
+
runTest = runTestOn [
+
"aarch64-linux"
+
"x86_64-linux"
+
];
+
};
cockpit = runTest ./cockpit.nix;
-
cockroachdb = handleTestOn [ "x86_64-linux" ] ./cockroachdb.nix { };
+
cockroachdb = runTestOn [ "x86_64-linux" ] ./cockroachdb.nix;
code-server = runTest ./code-server.nix;
coder = runTest ./coder.nix;
collectd = runTest ./collectd.nix;
···
containers-tmpfs = runTest ./containers-tmpfs.nix;
containers-unified-hierarchy = runTest ./containers-unified-hierarchy.nix;
convos = runTest ./convos.nix;
-
corerad = handleTest ./corerad.nix { };
+
corerad = runTest ./corerad.nix;
corteza = runTest ./corteza.nix;
cosmic = runTest {
imports = [ ./cosmic.nix ];
···
coturn = runTest ./coturn.nix;
couchdb = runTest ./couchdb.nix;
crabfit = runTest ./crabfit.nix;
-
cri-o = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./cri-o.nix { };
+
cri-o = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./cri-o.nix;
cryptpad = runTest ./cryptpad.nix;
cups-pdf = runTest ./cups-pdf.nix;
curl-impersonate = runTest ./curl-impersonate.nix;
-
custom-ca = handleTest ./custom-ca.nix { };
+
custom-ca = import ./custom-ca.nix { inherit pkgs runTest; };
croc = runTest ./croc.nix;
cross-seed = runTest ./cross-seed.nix;
cyrus-imap = runTest ./cyrus-imap.nix;
···
dependency-track = runTest ./dependency-track.nix;
devpi-server = runTest ./devpi-server.nix;
dex-oidc = runTest ./dex-oidc.nix;
-
dhparams = handleTest ./dhparams.nix { };
+
dhparams = runTest ./dhparams.nix;
disable-installer-tools = runTest ./disable-installer-tools.nix;
discourse = runTest ./discourse.nix;
-
dnscrypt-proxy2 = handleTestOn [ "x86_64-linux" ] ./dnscrypt-proxy2.nix { };
+
dnscrypt-proxy2 = runTestOn [ "x86_64-linux" ] ./dnscrypt-proxy2.nix;
dnsdist = import ./dnsdist.nix { inherit pkgs runTest; };
doas = runTest ./doas.nix;
docker = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./docker.nix;
docker-rootless = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./docker-rootless.nix;
docker-registry = runTest ./docker-registry.nix;
-
docker-tools = handleTestOn [ "x86_64-linux" ] ./docker-tools.nix { };
+
docker-tools = runTestOn [ "x86_64-linux" ] ./docker-tools.nix;
docker-tools-nix-shell = runTest ./docker-tools-nix-shell.nix;
docker-tools-cross = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./docker-tools-cross.nix;
docker-tools-overlay = runTestOn [ "x86_64-linux" ] ./docker-tools-overlay.nix;
···
dokuwiki = runTest ./dokuwiki.nix;
dolibarr = runTest ./dolibarr.nix;
domination = runTest ./domination.nix;
-
dovecot = handleTest ./dovecot.nix { };
+
dovecot = runTest ./dovecot.nix;
drawterm = discoverTests (import ./drawterm.nix);
draupnir = runTest ./matrix/draupnir.nix;
drbd = runTest ./drbd.nix;
···
drbd-driver = runTest ./drbd-driver.nix;
dublin-traceroute = runTest ./dublin-traceroute.nix;
dwl = runTestOn [ "x86_64-linux" "aarch64-linux" ] ./dwl.nix;
-
earlyoom = handleTestOn [ "x86_64-linux" ] ./earlyoom.nix { };
-
early-mount-options = handleTest ./early-mount-options.nix { };
+
earlyoom = runTestOn [ "x86_64-linux" ] ./earlyoom.nix;
+
early-mount-options = runTest ./early-mount-options.nix;
ec2-config = (handleTestOn [ "x86_64-linux" ] ./ec2.nix { }).boot-ec2-config or { };
ec2-nixops = (handleTestOn [ "x86_64-linux" ] ./ec2.nix { }).boot-ec2-nixops or { };
echoip = runTest ./echoip.nix;
···
activation-etc-overlay-mutable = runTest ./activation/etc-overlay-mutable.nix;
activation-etc-overlay-immutable = runTest ./activation/etc-overlay-immutable.nix;
activation-perlless = runTest ./activation/perlless.nix;
-
etcd = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd.nix { };
-
etcd-cluster = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd-cluster.nix { };
+
etcd = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd.nix;
+
etcd-cluster = runTestOn [ "aarch64-linux" "x86_64-linux" ] ./etcd/etcd-cluster.nix;
etebase-server = runTest ./etebase-server.nix;
etesync-dav = runTest ./etesync-dav.nix;
evcc = runTest ./evcc.nix;
···
fakeroute = runTest ./fakeroute.nix;
fancontrol = runTest ./fancontrol.nix;
fanout = runTest ./fanout.nix;
-
fcitx5 = handleTest ./fcitx5 { };
+
fcitx5 = runTest ./fcitx5;
fedimintd = runTest ./fedimintd.nix;
fenics = runTest ./fenics.nix;
ferm = runTest ./ferm.nix;
-
ferretdb = handleTest ./ferretdb.nix { };
+
ferretdb = import ./ferretdb.nix { inherit pkgs runTest; };
fider = runTest ./fider.nix;
filesender = runTest ./filesender.nix;
filebrowser = runTest ./filebrowser.nix;
···
};
firefoxpwa = runTest ./firefoxpwa.nix;
firejail = runTest ./firejail.nix;
-
firewall = handleTest ./firewall.nix { nftables = false; };
-
firewall-nftables = handleTest ./firewall.nix { nftables = true; };
+
firewall = runTest {
+
imports = [ ./firewall.nix ];
+
_module.args.nftables = false;
+
};
+
firewall-nftables = runTest {
+
imports = [ ./firewall.nix ];
+
_module.args.nftables = true;
+
};
fish = runTest ./fish.nix;
firezone = runTest ./firezone/firezone.nix;
-
flannel = handleTestOn [ "x86_64-linux" ] ./flannel.nix { };
+
flannel = runTestOn [ "x86_64-linux" ] ./flannel.nix;
flaresolverr = runTest ./flaresolverr.nix;
flood = runTest ./flood.nix;
floorp = runTest {
···
freenet = runTest ./freenet.nix;
freeswitch = runTest ./freeswitch.nix;
freetube = discoverTests (import ./freetube.nix);
-
freshrss = handleTest ./freshrss { };
+
freshrss = import ./freshrss { inherit runTest; };
frigate = runTest ./frigate.nix;
froide-govplan = runTest ./web-apps/froide-govplan.nix;
frp = runTest ./frp.nix;
frr = runTest ./frr.nix;
-
fsck = handleTest ./fsck.nix { };
-
fsck-systemd-stage-1 = handleTest ./fsck.nix { systemdStage1 = true; };
+
fsck = runTest {
+
imports = [ ./fsck.nix ];
+
_module.args.systemdStage1 = false;
+
};
+
fsck-systemd-stage-1 = runTest {
+
imports = [ ./fsck.nix ];
+
_module.args.systemdStage1 = true;
+
};
ft2-clone = runTest ./ft2-clone.nix;
legit = runTest ./legit.nix;
mimir = runTest ./mimir.nix;
···
};
gatus = runTest ./gatus.nix;
getaddrinfo = runTest ./getaddrinfo.nix;
-
gemstash = handleTest ./gemstash.nix { };
+
gemstash = import ./gemstash.nix { inherit pkgs runTest; };
geoclue2 = runTest ./geoclue2.nix;
geoserver = runTest ./geoserver.nix;
gerrit = runTest ./gerrit.nix;
+3 -15
nixos/tests/birdwatcher.nix
···
# This test does a basic functionality check for birdwatcher
{
-
system ? builtins.currentSystem,
-
pkgs ? import ../.. {
-
inherit system;
-
config = { };
-
},
-
}:
-
-
let
-
inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
-
inherit (pkgs.lib) optionalString;
-
in
-
makeTest {
name = "birdwatcher";
-
nodes = {
-
host1 = {
+
nodes.host1 =
+
{ pkgs, ... }:
+
{
environment.systemPackages = with pkgs; [ jq ];
services.bird = {
enable = true;
···
'';
};
};
-
};
testScript = ''
start_all()
+1 -1
nixos/tests/blocky.nix
···
-
import ./make-test-python.nix {
+
{
name = "blocky";
nodes = {
+39 -41
nixos/tests/bpf.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, ... }:
-
{
-
name = "bpf";
-
meta.maintainers = with pkgs.lib.maintainers; [ martinetd ];
+
{ lib, ... }:
+
{
+
name = "bpf";
+
meta.maintainers = with lib.maintainers; [ martinetd ];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
programs.bcc.enable = true;
-
environment.systemPackages = with pkgs; [ bpftrace ];
-
};
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
programs.bcc.enable = true;
+
environment.systemPackages = with pkgs; [ bpftrace ];
+
};
-
testScript = ''
-
## bcc
-
# syscount -d 1 stops 1s after probe started so is good for that
-
print(machine.succeed("syscount -d 1"))
+
testScript = ''
+
## bcc
+
# syscount -d 1 stops 1s after probe started so is good for that
+
print(machine.succeed("syscount -d 1"))
-
## bpftrace
-
# list probes
-
machine.succeed("bpftrace -l")
-
# simple BEGIN probe (user probe on bpftrace itself)
-
print(machine.succeed("bpftrace -e 'BEGIN { print(\"ok\\n\"); exit(); }'"))
-
# tracepoint
-
print(machine.succeed("bpftrace -e 'tracepoint:syscalls:sys_enter_* { print(probe); exit() }'"))
-
# kprobe
-
print(machine.succeed("bpftrace -e 'kprobe:schedule { print(probe); exit() }'"))
-
# BTF
-
print(machine.succeed("bpftrace -e 'kprobe:schedule { "
-
" printf(\"tgid: %d\\n\", ((struct task_struct*) curtask)->tgid); exit() "
-
"}'"))
-
# module BTF (bpftrace >= 0.17)
-
# test is currently disabled on aarch64 as kfunc does not work there yet
-
# https://github.com/iovisor/bpftrace/issues/2496
-
print(machine.succeed("uname -m | grep aarch64 || "
-
"bpftrace -e 'kfunc:nft_trans_alloc_gfp { "
-
" printf(\"portid: %d\\n\", args->ctx->portid); "
-
"} BEGIN { exit() }'"))
-
# glibc includes
-
print(machine.succeed("bpftrace -e '#include <errno.h>\n"
-
"BEGIN { printf(\"ok %d\\n\", EINVAL); exit(); }'"))
-
'';
-
}
-
)
+
## bpftrace
+
# list probes
+
machine.succeed("bpftrace -l")
+
# simple BEGIN probe (user probe on bpftrace itself)
+
print(machine.succeed("bpftrace -e 'BEGIN { print(\"ok\\n\"); exit(); }'"))
+
# tracepoint
+
print(machine.succeed("bpftrace -e 'tracepoint:syscalls:sys_enter_* { print(probe); exit() }'"))
+
# kprobe
+
print(machine.succeed("bpftrace -e 'kprobe:schedule { print(probe); exit() }'"))
+
# BTF
+
print(machine.succeed("bpftrace -e 'kprobe:schedule { "
+
" printf(\"tgid: %d\\n\", ((struct task_struct*) curtask)->tgid); exit() "
+
"}'"))
+
# module BTF (bpftrace >= 0.17)
+
# test is currently disabled on aarch64 as kfunc does not work there yet
+
# https://github.com/iovisor/bpftrace/issues/2496
+
print(machine.succeed("uname -m | grep aarch64 || "
+
"bpftrace -e 'kfunc:nft_trans_alloc_gfp { "
+
" printf(\"portid: %d\\n\", args->ctx->portid); "
+
"} BEGIN { exit() }'"))
+
# glibc includes
+
print(machine.succeed("bpftrace -e '#include <errno.h>\n"
+
"BEGIN { printf(\"ok %d\\n\", EINVAL); exit(); }'"))
+
'';
+
}
+26 -32
nixos/tests/cadvisor.nix
···
-
import ./make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "cadvisor";
-
meta.maintainers = with lib.maintainers; [ offline ];
+
{ lib, ... }:
+
{
+
name = "cadvisor";
+
meta.maintainers = with lib.maintainers; [ offline ];
-
nodes = {
-
machine =
-
{ ... }:
-
{
-
services.cadvisor.enable = true;
-
};
+
nodes = {
+
machine = {
+
services.cadvisor.enable = true;
+
};
-
influxdb =
-
{ lib, ... }:
-
{
-
services.cadvisor.enable = true;
-
services.cadvisor.storageDriver = "influxdb";
-
services.influxdb.enable = true;
-
};
+
influxdb = {
+
services.cadvisor.enable = true;
+
services.cadvisor.storageDriver = "influxdb";
+
services.influxdb.enable = true;
};
+
};
-
testScript = ''
-
start_all()
-
machine.wait_for_unit("cadvisor.service")
-
machine.succeed("curl -f http://localhost:8080/containers/")
+
testScript = ''
+
start_all()
+
machine.wait_for_unit("cadvisor.service")
+
machine.succeed("curl -f http://localhost:8080/containers/")
-
influxdb.wait_for_unit("influxdb.service")
+
influxdb.wait_for_unit("influxdb.service")
-
# create influxdb database
-
influxdb.succeed(
-
'curl -f -XPOST http://localhost:8086/query --data-urlencode "q=CREATE DATABASE root"'
-
)
+
# create influxdb database
+
influxdb.succeed(
+
'curl -f -XPOST http://localhost:8086/query --data-urlencode "q=CREATE DATABASE root"'
+
)
-
influxdb.wait_for_unit("cadvisor.service")
-
influxdb.succeed("curl -f http://localhost:8080/containers/")
-
'';
-
}
-
)
+
influxdb.wait_for_unit("cadvisor.service")
+
influxdb.succeed("curl -f http://localhost:8080/containers/")
+
'';
+
}
+145 -147
nixos/tests/cassandra.nix
···
-
import ./make-test-python.nix (
-
{
-
pkgs,
-
lib,
-
testPackage ? pkgs.cassandra,
-
...
-
}:
-
let
-
clusterName = "NixOS Automated-Test Cluster";
+
{
+
pkgs,
+
lib,
+
getPackage ? pkgs: pkgs.cassandra_4,
+
...
+
}:
+
let
+
testPackage = getPackage pkgs;
+
clusterName = "NixOS Automated-Test Cluster";
+
testRemoteAuth = lib.versionAtLeast testPackage.version "3.11";
+
jmxRoles = [
+
{
+
username = "me";
+
password = "password";
+
}
+
];
+
jmxRolesFile = ./cassandra-jmx-roles;
+
jmxAuthArgs = "-u ${(builtins.elemAt jmxRoles 0).username} -pw ${(builtins.elemAt jmxRoles 0).password}";
+
jmxPort = 7200; # Non-standard port so it doesn't accidentally work
+
jmxPortStr = toString jmxPort;
-
testRemoteAuth = lib.versionAtLeast testPackage.version "3.11";
-
jmxRoles = [
-
{
-
username = "me";
-
password = "password";
-
}
-
];
-
jmxRolesFile = ./cassandra-jmx-roles;
-
jmxAuthArgs = "-u ${(builtins.elemAt jmxRoles 0).username} -pw ${(builtins.elemAt jmxRoles 0).password}";
-
jmxPort = 7200; # Non-standard port so it doesn't accidentally work
-
jmxPortStr = toString jmxPort;
-
-
# Would usually be assigned to 512M.
-
# Set it to a different value, so that we can check whether our config
-
# actually changes it.
-
numMaxHeapSize = "400";
-
getHeapLimitCommand = ''
-
nodetool info -p ${jmxPortStr} | grep "^Heap Memory" | awk '{print $NF}'
-
'';
-
checkHeapLimitCommand = pkgs.writeShellScript "check-heap-limit.sh" ''
-
[ 1 -eq "$(echo "$(${getHeapLimitCommand}) < ${numMaxHeapSize}" | ${pkgs.bc}/bin/bc)" ]
-
'';
+
# Would usually be assigned to 512M.
+
# Set it to a different value, so that we can check whether our config
+
# actually changes it.
+
numMaxHeapSize = "400";
+
getHeapLimitCommand = ''
+
nodetool info -p ${jmxPortStr} | grep "^Heap Memory" | awk '{print $NF}'
+
'';
+
checkHeapLimitCommand = pkgs.writeShellScript "check-heap-limit.sh" ''
+
[ 1 -eq "$(echo "$(${getHeapLimitCommand}) < ${numMaxHeapSize}" | ${pkgs.bc}/bin/bc)" ]
+
'';
-
cassandraCfg = ipAddress: {
-
enable = true;
-
inherit clusterName;
-
listenAddress = ipAddress;
-
rpcAddress = ipAddress;
-
seedAddresses = [ "192.168.1.1" ];
-
package = testPackage;
-
maxHeapSize = "${numMaxHeapSize}M";
-
heapNewSize = "100M";
-
inherit jmxPort;
-
};
-
nodeCfg =
-
ipAddress: extra:
-
{ pkgs, config, ... }:
-
rec {
-
environment.systemPackages = [ testPackage ];
-
networking = {
-
firewall.allowedTCPPorts = [
-
7000
-
9042
-
services.cassandra.jmxPort
-
];
-
useDHCP = false;
-
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
-
{
-
address = ipAddress;
-
prefixLength = 24;
-
}
-
];
-
};
-
services.cassandra = cassandraCfg ipAddress // extra;
+
cassandraCfg = pkgs: ipAddress: {
+
enable = true;
+
inherit clusterName;
+
listenAddress = ipAddress;
+
rpcAddress = ipAddress;
+
seedAddresses = [ "192.168.1.1" ];
+
package = getPackage pkgs;
+
maxHeapSize = "${numMaxHeapSize}M";
+
heapNewSize = "100M";
+
inherit jmxPort;
+
};
+
nodeCfg =
+
ipAddress: extra:
+
{ pkgs, config, ... }:
+
rec {
+
environment.systemPackages = [ (getPackage pkgs) ];
+
networking = {
+
firewall.allowedTCPPorts = [
+
7000
+
9042
+
services.cassandra.jmxPort
+
];
+
useDHCP = false;
+
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+
{
+
address = ipAddress;
+
prefixLength = 24;
+
}
+
];
};
-
in
-
{
-
name = "cassandra-${testPackage.version}";
-
meta = {
-
maintainers = with lib.maintainers; [ johnazoidberg ];
+
services.cassandra = cassandraCfg pkgs ipAddress // extra;
};
+
in
+
{
+
name = "cassandra-${testPackage.version}";
+
meta = {
+
maintainers = with lib.maintainers; [ johnazoidberg ];
+
};
-
nodes = {
-
cass0 = nodeCfg "192.168.1.1" { };
-
cass1 = nodeCfg "192.168.1.2" (
-
lib.optionalAttrs testRemoteAuth {
-
inherit jmxRoles;
-
remoteJmx = true;
-
}
-
);
-
cass2 = nodeCfg "192.168.1.3" { jvmOpts = [ "-Dcassandra.replace_address=cass1" ]; };
-
};
+
nodes = {
+
cass0 = nodeCfg "192.168.1.1" { };
+
cass1 = nodeCfg "192.168.1.2" (
+
lib.optionalAttrs testRemoteAuth {
+
inherit jmxRoles;
+
remoteJmx = true;
+
}
+
);
+
cass2 = nodeCfg "192.168.1.3" { jvmOpts = [ "-Dcassandra.replace_address=cass1" ]; };
+
};
-
testScript =
-
''
-
# Check configuration
-
with subtest("Timers exist"):
-
cass0.succeed("systemctl list-timers | grep cassandra-full-repair.timer")
-
cass0.succeed("systemctl list-timers | grep cassandra-incremental-repair.timer")
+
testScript =
+
''
+
# Check configuration
+
with subtest("Timers exist"):
+
cass0.succeed("systemctl list-timers | grep cassandra-full-repair.timer")
+
cass0.succeed("systemctl list-timers | grep cassandra-incremental-repair.timer")
-
with subtest("Can connect via cqlsh"):
-
cass0.wait_for_unit("cassandra.service")
-
cass0.wait_until_succeeds("nc -z cass0 9042")
-
cass0.succeed("echo 'show version;' | cqlsh cass0")
+
with subtest("Can connect via cqlsh"):
+
cass0.wait_for_unit("cassandra.service")
+
cass0.wait_until_succeeds("nc -z cass0 9042")
+
cass0.succeed("echo 'show version;' | cqlsh cass0")
-
with subtest("Nodetool is operational"):
-
cass0.wait_for_unit("cassandra.service")
-
cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
-
cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass0'")
+
with subtest("Nodetool is operational"):
+
cass0.wait_for_unit("cassandra.service")
+
cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+
cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass0'")
-
with subtest("Cluster name was set"):
-
cass0.wait_for_unit("cassandra.service")
-
cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
-
cass0.wait_until_succeeds(
-
"nodetool describecluster -p ${jmxPortStr} | grep 'Name: ${clusterName}'"
-
)
+
with subtest("Cluster name was set"):
+
cass0.wait_for_unit("cassandra.service")
+
cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+
cass0.wait_until_succeeds(
+
"nodetool describecluster -p ${jmxPortStr} | grep 'Name: ${clusterName}'"
+
)
-
with subtest("Heap limit set correctly"):
-
# Nodetool takes a while until it can display info
-
cass0.wait_until_succeeds("nodetool info -p ${jmxPortStr}")
-
cass0.succeed("${checkHeapLimitCommand}")
+
with subtest("Heap limit set correctly"):
+
# Nodetool takes a while until it can display info
+
cass0.wait_until_succeeds("nodetool info -p ${jmxPortStr}")
+
cass0.succeed("${checkHeapLimitCommand}")
-
# Check cluster interaction
-
with subtest("Bring up cluster"):
-
cass1.wait_for_unit("cassandra.service")
-
cass1.wait_until_succeeds(
-
"nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN' | grep 2"
-
)
-
cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'")
-
''
-
+ lib.optionalString testRemoteAuth ''
-
with subtest("Remote authenticated jmx"):
-
# Doesn't work if not enabled
-
cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
-
cass1.fail("nc -z 192.168.1.1 ${jmxPortStr}")
-
cass1.fail("nodetool -p ${jmxPortStr} -h 192.168.1.1 status")
+
# Check cluster interaction
+
with subtest("Bring up cluster"):
+
cass1.wait_for_unit("cassandra.service")
+
cass1.wait_until_succeeds(
+
"nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN' | grep 2"
+
)
+
cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'")
+
''
+
+ lib.optionalString testRemoteAuth ''
+
with subtest("Remote authenticated jmx"):
+
# Doesn't work if not enabled
+
cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+
cass1.fail("nc -z 192.168.1.1 ${jmxPortStr}")
+
cass1.fail("nodetool -p ${jmxPortStr} -h 192.168.1.1 status")
-
# Works if enabled
-
cass1.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
-
cass0.succeed("nodetool -p ${jmxPortStr} -h 192.168.1.2 ${jmxAuthArgs} status")
-
''
-
+ ''
-
with subtest("Break and fix node"):
-
cass1.block()
-
cass0.wait_until_succeeds(
-
"nodetool status -p ${jmxPortStr} --resolve-ip | egrep -c '^DN[[:space:]]+cass1'"
-
)
-
cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN' | grep 1")
-
cass1.unblock()
-
cass1.wait_until_succeeds(
-
"nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN' | grep 2"
-
)
-
cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN' | grep 2")
+
# Works if enabled
+
cass1.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+
cass0.succeed("nodetool -p ${jmxPortStr} -h 192.168.1.2 ${jmxAuthArgs} status")
+
''
+
+ ''
+
with subtest("Break and fix node"):
+
cass1.block()
+
cass0.wait_until_succeeds(
+
"nodetool status -p ${jmxPortStr} --resolve-ip | egrep -c '^DN[[:space:]]+cass1'"
+
)
+
cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN' | grep 1")
+
cass1.unblock()
+
cass1.wait_until_succeeds(
+
"nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN' | grep 2"
+
)
+
cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN' | grep 2")
-
with subtest("Replace crashed node"):
-
cass1.block() # .crash() waits until it's fully shutdown
-
cass2.start()
-
cass0.wait_until_fails(
-
"nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'"
-
)
+
with subtest("Replace crashed node"):
+
cass1.block() # .crash() waits until it's fully shutdown
+
cass2.start()
+
cass0.wait_until_fails(
+
"nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'"
+
)
-
cass2.wait_for_unit("cassandra.service")
-
cass0.wait_until_succeeds(
-
"nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass2'"
-
)
-
'';
+
cass2.wait_for_unit("cassandra.service")
+
cass0.wait_until_succeeds(
+
"nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass2'"
+
)
+
'';
-
passthru = {
-
inherit testPackage;
-
};
-
}
-
)
+
passthru = {
+
inherit testPackage;
+
};
+
}
+248 -259
nixos/tests/ceph-multi-node.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, lib, ... }:
-
-
let
-
cfg = {
-
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
-
monA = {
-
name = "a";
-
ip = "192.168.1.1";
+
{ lib, ... }:
+
let
+
cfg = {
+
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+
monA = {
+
name = "a";
+
ip = "192.168.1.1";
+
};
+
osd0 = {
+
name = "0";
+
ip = "192.168.1.2";
+
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+
};
+
osd1 = {
+
name = "1";
+
ip = "192.168.1.3";
+
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+
};
+
osd2 = {
+
name = "2";
+
ip = "192.168.1.4";
+
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+
};
+
};
+
generateCephConfig =
+
{ daemonConfig }:
+
{
+
enable = true;
+
global = {
+
fsid = cfg.clusterId;
+
monHost = cfg.monA.ip;
+
monInitialMembers = cfg.monA.name;
};
-
osd0 = {
-
name = "0";
-
ip = "192.168.1.2";
-
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
-
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+
}
+
// daemonConfig;
+
+
generateHost =
+
{ cephConfig, networkConfig }:
+
{ pkgs, ... }:
+
{
+
virtualisation = {
+
emptyDiskImages = [ 20480 ];
+
vlans = [ 1 ];
};
-
osd1 = {
-
name = "1";
-
ip = "192.168.1.3";
-
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
-
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
-
};
-
osd2 = {
-
name = "2";
-
ip = "192.168.1.4";
-
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
-
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
-
};
-
};
-
generateCephConfig =
-
{ daemonConfig }:
-
{
-
enable = true;
-
global = {
-
fsid = cfg.clusterId;
-
monHost = cfg.monA.ip;
-
monInitialMembers = cfg.monA.name;
-
};
-
}
-
// daemonConfig;
-
generateHost =
-
{
-
pkgs,
-
cephConfig,
-
networkConfig,
-
...
-
}:
-
{
-
virtualisation = {
-
emptyDiskImages = [ 20480 ];
-
vlans = [ 1 ];
-
};
+
networking = networkConfig;
-
networking = networkConfig;
+
environment.systemPackages = with pkgs; [
+
bash
+
sudo
+
ceph
+
xfsprogs
+
libressl.nc
+
];
-
environment.systemPackages = with pkgs; [
-
bash
-
sudo
-
ceph
-
xfsprogs
-
libressl.nc
-
];
+
boot.kernelModules = [ "xfs" ];
-
boot.kernelModules = [ "xfs" ];
+
services.ceph = cephConfig;
+
};
-
services.ceph = cephConfig;
-
};
-
-
networkMonA = {
-
dhcpcd.enable = false;
-
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+
networkMonA = {
+
dhcpcd.enable = false;
+
interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [
+
{
+
address = cfg.monA.ip;
+
prefixLength = 24;
+
}
+
];
+
firewall = {
+
allowedTCPPorts = [
+
6789
+
3300
+
];
+
allowedTCPPortRanges = [
{
-
address = cfg.monA.ip;
-
prefixLength = 24;
+
from = 6800;
+
to = 7300;
}
];
-
firewall = {
-
allowedTCPPorts = [
-
6789
-
3300
-
];
-
allowedTCPPortRanges = [
-
{
-
from = 6800;
-
to = 7300;
-
}
-
];
-
};
};
-
cephConfigMonA = generateCephConfig {
-
daemonConfig = {
-
mon = {
-
enable = true;
-
daemons = [ cfg.monA.name ];
-
};
-
mgr = {
-
enable = true;
-
daemons = [ cfg.monA.name ];
-
};
+
};
+
cephConfigMonA = generateCephConfig {
+
daemonConfig = {
+
mon = {
+
enable = true;
+
daemons = [ cfg.monA.name ];
+
};
+
mgr = {
+
enable = true;
+
daemons = [ cfg.monA.name ];
};
};
+
};
-
networkOsd = osd: {
-
dhcpcd.enable = false;
-
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+
networkOsd = osd: {
+
dhcpcd.enable = false;
+
interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [
+
{
+
address = osd.ip;
+
prefixLength = 24;
+
}
+
];
+
firewall = {
+
allowedTCPPortRanges = [
{
-
address = osd.ip;
-
prefixLength = 24;
+
from = 6800;
+
to = 7300;
}
];
-
firewall = {
-
allowedTCPPortRanges = [
-
{
-
from = 6800;
-
to = 7300;
-
}
-
];
-
};
};
+
};
-
cephConfigOsd =
-
osd:
-
generateCephConfig {
-
daemonConfig = {
-
osd = {
-
enable = true;
-
daemons = [ osd.name ];
-
};
+
cephConfigOsd =
+
osd:
+
generateCephConfig {
+
daemonConfig = {
+
osd = {
+
enable = true;
+
daemons = [ osd.name ];
};
};
+
};
+
+
# Following deployment is based on the manual deployment described here:
+
# https://docs.ceph.com/docs/master/install/manual-deployment/
+
# For other ways to deploy a ceph cluster, look at the documentation at
+
# https://docs.ceph.com/docs/master/
+
testscript =
+
{ ... }:
+
''
+
start_all()
-
# Following deployment is based on the manual deployment described here:
-
# https://docs.ceph.com/docs/master/install/manual-deployment/
-
# For other ways to deploy a ceph cluster, look at the documentation at
-
# https://docs.ceph.com/docs/master/
-
testscript =
-
{ ... }:
-
''
-
start_all()
+
monA.wait_for_unit("network.target")
+
osd0.wait_for_unit("network.target")
+
osd1.wait_for_unit("network.target")
+
osd2.wait_for_unit("network.target")
-
monA.wait_for_unit("network.target")
-
osd0.wait_for_unit("network.target")
-
osd1.wait_for_unit("network.target")
-
osd2.wait_for_unit("network.target")
+
# Bootstrap ceph-mon daemon
+
monA.succeed(
+
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+
"systemctl start ceph-mon-${cfg.monA.name}",
+
)
+
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+
monA.succeed("ceph mon enable-msgr2")
+
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
-
# Bootstrap ceph-mon daemon
-
monA.succeed(
-
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
-
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
-
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
-
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
-
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
-
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
-
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
-
"systemctl start ceph-mon-${cfg.monA.name}",
-
)
-
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
-
monA.succeed("ceph mon enable-msgr2")
-
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
+
# Can't check ceph status until a mon is up
+
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
-
# Can't check ceph status until a mon is up
-
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
# Start the ceph-mgr daemon, it has no deps and hardly any setup
+
monA.succeed(
+
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+
"systemctl start ceph-mgr-${cfg.monA.name}",
+
)
+
monA.wait_for_unit("ceph-mgr-a")
+
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
# Start the ceph-mgr daemon, it has no deps and hardly any setup
-
monA.succeed(
-
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
-
"systemctl start ceph-mgr-${cfg.monA.name}",
-
)
-
monA.wait_for_unit("ceph-mgr-a")
-
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
# Send the admin keyring to the OSD machines
+
monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
+
osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+
osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+
osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
-
# Send the admin keyring to the OSD machines
-
monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
-
osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
-
osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
-
osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+
# Bootstrap OSDs
+
osd0.succeed(
+
"mkfs.xfs /dev/vdb",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+
)
+
osd1.succeed(
+
"mkfs.xfs /dev/vdb",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+
)
+
osd2.succeed(
+
"mkfs.xfs /dev/vdb",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+
)
-
# Bootstrap OSDs
-
osd0.succeed(
-
"mkfs.xfs /dev/vdb",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
-
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
-
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
-
)
-
osd1.succeed(
-
"mkfs.xfs /dev/vdb",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
-
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
-
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
-
)
-
osd2.succeed(
-
"mkfs.xfs /dev/vdb",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
-
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
-
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
-
)
+
# Initialize the OSDs with regular filestore
+
osd0.succeed(
+
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+
"chown -R ceph:ceph /var/lib/ceph/osd",
+
"systemctl start ceph-osd-${cfg.osd0.name}",
+
)
+
osd1.succeed(
+
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+
"chown -R ceph:ceph /var/lib/ceph/osd",
+
"systemctl start ceph-osd-${cfg.osd1.name}",
+
)
+
osd2.succeed(
+
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+
"chown -R ceph:ceph /var/lib/ceph/osd",
+
"systemctl start ceph-osd-${cfg.osd2.name}",
+
)
+
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
# Initialize the OSDs with regular filestore
-
osd0.succeed(
-
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
-
"chown -R ceph:ceph /var/lib/ceph/osd",
-
"systemctl start ceph-osd-${cfg.osd0.name}",
-
)
-
osd1.succeed(
-
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
-
"chown -R ceph:ceph /var/lib/ceph/osd",
-
"systemctl start ceph-osd-${cfg.osd1.name}",
-
)
-
osd2.succeed(
-
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
-
"chown -R ceph:ceph /var/lib/ceph/osd",
-
"systemctl start ceph-osd-${cfg.osd2.name}",
-
)
-
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
monA.succeed(
+
"ceph osd pool create multi-node-test 32 32",
+
"ceph osd pool ls | grep 'multi-node-test'",
-
monA.succeed(
-
"ceph osd pool create multi-node-test 32 32",
-
"ceph osd pool ls | grep 'multi-node-test'",
+
# We need to enable an application on the pool, otherwise it will
+
# stay unhealthy in state POOL_APP_NOT_ENABLED.
+
# Creating a CephFS would do this automatically, but we haven't done that here.
+
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+
# We use the custom application name "nixos-test" for this.
+
"ceph osd pool application enable multi-node-test nixos-test",
-
# We need to enable an application on the pool, otherwise it will
-
# stay unhealthy in state POOL_APP_NOT_ENABLED.
-
# Creating a CephFS would do this automatically, but we haven't done that here.
-
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
-
# We use the custom application name "nixos-test" for this.
-
"ceph osd pool application enable multi-node-test nixos-test",
+
"ceph osd pool rename multi-node-test multi-node-other-test",
+
"ceph osd pool ls | grep 'multi-node-other-test'",
+
)
+
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+
monA.succeed("ceph osd pool set multi-node-other-test size 2")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+
monA.fail(
+
"ceph osd pool ls | grep 'multi-node-test'",
+
"ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
+
)
-
"ceph osd pool rename multi-node-test multi-node-other-test",
-
"ceph osd pool ls | grep 'multi-node-other-test'",
-
)
-
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
-
monA.succeed("ceph osd pool set multi-node-other-test size 2")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
-
monA.fail(
-
"ceph osd pool ls | grep 'multi-node-test'",
-
"ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
-
)
+
# Shut down ceph on all machines in a very unpolite way
+
monA.crash()
+
osd0.crash()
+
osd1.crash()
+
osd2.crash()
-
# Shut down ceph on all machines in a very unpolite way
-
monA.crash()
-
osd0.crash()
-
osd1.crash()
-
osd2.crash()
+
# Start it up
+
osd0.start()
+
osd1.start()
+
osd2.start()
+
monA.start()
-
# Start it up
-
osd0.start()
-
osd1.start()
-
osd2.start()
-
monA.start()
+
# Ensure the cluster comes back up again
+
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
'';
+
in
+
{
+
name = "basic-multi-node-ceph-cluster";
+
meta = with lib.maintainers; {
+
maintainers = [ lejonet ];
+
};
-
# Ensure the cluster comes back up again
-
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
-
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
'';
-
in
-
{
-
name = "basic-multi-node-ceph-cluster";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [ lejonet ];
+
nodes = {
+
monA = generateHost {
+
cephConfig = cephConfigMonA;
+
networkConfig = networkMonA;
+
};
+
osd0 = generateHost {
+
cephConfig = cephConfigOsd cfg.osd0;
+
networkConfig = networkOsd cfg.osd0;
+
};
+
osd1 = generateHost {
+
cephConfig = cephConfigOsd cfg.osd1;
+
networkConfig = networkOsd cfg.osd1;
};
-
-
nodes = {
-
monA = generateHost {
-
pkgs = pkgs;
-
cephConfig = cephConfigMonA;
-
networkConfig = networkMonA;
-
};
-
osd0 = generateHost {
-
pkgs = pkgs;
-
cephConfig = cephConfigOsd cfg.osd0;
-
networkConfig = networkOsd cfg.osd0;
-
};
-
osd1 = generateHost {
-
pkgs = pkgs;
-
cephConfig = cephConfigOsd cfg.osd1;
-
networkConfig = networkOsd cfg.osd1;
-
};
-
osd2 = generateHost {
-
pkgs = pkgs;
-
cephConfig = cephConfigOsd cfg.osd2;
-
networkConfig = networkOsd cfg.osd2;
-
};
+
osd2 = generateHost {
+
cephConfig = cephConfigOsd cfg.osd2;
+
networkConfig = networkOsd cfg.osd2;
};
+
};
-
testScript = testscript;
-
}
-
)
+
testScript = testscript;
+
}
+240 -244
nixos/tests/ceph-single-node-bluestore-dmcrypt.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, lib, ... }:
+
{ lib, ... }:
-
let
-
# the single node ipv6 address
-
ip = "2001:db8:ffff::";
-
# the global ceph cluster id
-
cluster = "54465b37-b9d8-4539-a1f9-dd33c75ee45a";
-
# the fsids of OSDs
-
osd-fsid-map = {
-
"0" = "1c1b7ea9-06bf-4d30-9a01-37ac3a0254aa";
-
"1" = "bd5a6f49-69d5-428c-ac25-a99f0c44375c";
-
"2" = "c90de6c7-86c6-41da-9694-e794096dfc5c";
-
};
+
let
+
# the single node ipv6 address
+
ip = "2001:db8:ffff::";
+
# the global ceph cluster id
+
cluster = "54465b37-b9d8-4539-a1f9-dd33c75ee45a";
+
# the fsids of OSDs
+
osd-fsid-map = {
+
"0" = "1c1b7ea9-06bf-4d30-9a01-37ac3a0254aa";
+
"1" = "bd5a6f49-69d5-428c-ac25-a99f0c44375c";
+
"2" = "c90de6c7-86c6-41da-9694-e794096dfc5c";
+
};
+
in
+
{
+
name = "basic-single-node-ceph-cluster-bluestore-dmcrypt";
+
meta.maintainers = with lib.maintainers; [
+
benaryorg
+
nh2
+
];
-
in
-
{
-
name = "basic-single-node-ceph-cluster-bluestore-dmcrypt";
-
meta = {
-
maintainers = with lib.maintainers; [
-
benaryorg
-
nh2
+
nodes.ceph =
+
{
+
lib,
+
pkgs,
+
config,
+
...
+
}:
+
{
+
# disks for bluestore
+
virtualisation.emptyDiskImages = [
+
20480
+
20480
+
20480
];
-
};
-
nodes = {
-
ceph =
-
{ pkgs, config, ... }:
-
{
-
# disks for bluestore
-
virtualisation.emptyDiskImages = [
-
20480
-
20480
-
20480
-
];
-
-
# networking setup (no external connectivity required, only local IPv6)
-
networking.useDHCP = false;
-
systemd.network = {
+
# networking setup (no external connectivity required, only local IPv6)
+
networking.useDHCP = false;
+
systemd.network = {
+
enable = true;
+
wait-online.extraArgs = [
+
"-i"
+
"lo"
+
];
+
networks = {
+
"40-loopback" = {
enable = true;
-
wait-online.extraArgs = [
-
"-i"
-
"lo"
-
];
-
networks = {
-
"40-loopback" = {
-
enable = true;
-
name = "lo";
-
DHCP = "no";
-
addresses = [ { Address = "${ip}/128"; } ];
-
};
-
};
+
name = "lo";
+
DHCP = "no";
+
addresses = [ { Address = "${ip}/128"; } ];
};
+
};
+
};
-
# do not start the ceph target by default so we can format the disks first
-
systemd.targets.ceph.wantedBy = lib.mkForce [ ];
+
# do not start the ceph target by default so we can format the disks first
+
systemd.targets.ceph.wantedBy = lib.mkForce [ ];
-
# add the packages to systemPackages so the testscript doesn't run into any unexpected issues
-
# this shouldn't be required on production systems which have their required packages in the unit paths only
-
# but it helps in case one needs to actually run the tooling anyway
-
environment.systemPackages = with pkgs; [
-
ceph
-
cryptsetup
-
lvm2
-
];
+
# add the packages to systemPackages so the testscript doesn't run into any unexpected issues
+
# this shouldn't be required on production systems which have their required packages in the unit paths only
+
# but it helps in case one needs to actually run the tooling anyway
+
environment.systemPackages = with pkgs; [
+
ceph
+
cryptsetup
+
lvm2
+
];
-
services.ceph = {
-
enable = true;
-
client.enable = true;
-
extraConfig = {
-
public_addr = ip;
-
cluster_addr = ip;
-
# ipv6
-
ms_bind_ipv4 = "false";
-
ms_bind_ipv6 = "true";
-
# msgr2 settings
-
ms_cluster_mode = "secure";
-
ms_service_mode = "secure";
-
ms_client_mode = "secure";
-
ms_mon_cluster_mode = "secure";
-
ms_mon_service_mode = "secure";
-
ms_mon_client_mode = "secure";
-
# less default modules, cuts down on memory and startup time in the tests
-
mgr_initial_modules = "";
-
# distribute by OSD, not by host, as per https://docs.ceph.com/en/reef/cephadm/install/#single-host
-
osd_crush_chooseleaf_type = "0";
-
};
-
client.extraConfig."mon.0" = {
-
host = "ceph";
-
mon_addr = "v2:[${ip}]:3300";
-
public_addr = "v2:[${ip}]:3300";
-
};
-
global = {
-
fsid = cluster;
-
clusterNetwork = "${ip}/64";
-
publicNetwork = "${ip}/64";
-
monInitialMembers = "0";
-
};
+
services.ceph = {
+
enable = true;
+
client.enable = true;
+
extraConfig = {
+
public_addr = ip;
+
cluster_addr = ip;
+
# ipv6
+
ms_bind_ipv4 = "false";
+
ms_bind_ipv6 = "true";
+
# msgr2 settings
+
ms_cluster_mode = "secure";
+
ms_service_mode = "secure";
+
ms_client_mode = "secure";
+
ms_mon_cluster_mode = "secure";
+
ms_mon_service_mode = "secure";
+
ms_mon_client_mode = "secure";
+
# less default modules, cuts down on memory and startup time in the tests
+
mgr_initial_modules = "";
+
# distribute by OSD, not by host, as per https://docs.ceph.com/en/reef/cephadm/install/#single-host
+
osd_crush_chooseleaf_type = "0";
+
};
+
client.extraConfig."mon.0" = {
+
host = "ceph";
+
mon_addr = "v2:[${ip}]:3300";
+
public_addr = "v2:[${ip}]:3300";
+
};
+
global = {
+
fsid = cluster;
+
clusterNetwork = "${ip}/64";
+
publicNetwork = "${ip}/64";
+
monInitialMembers = "0";
+
};
-
mon = {
-
enable = true;
-
daemons = [ "0" ];
-
};
+
mon = {
+
enable = true;
+
daemons = [ "0" ];
+
};
-
osd = {
-
enable = true;
-
daemons = builtins.attrNames osd-fsid-map;
-
};
+
osd = {
+
enable = true;
+
daemons = builtins.attrNames osd-fsid-map;
+
};
-
mgr = {
-
enable = true;
-
daemons = [ "ceph" ];
-
};
-
};
+
mgr = {
+
enable = true;
+
daemons = [ "ceph" ];
+
};
+
};
-
systemd.services =
-
let
-
osd-name = id: "ceph-osd-${id}";
-
osd-pre-start = id: [
-
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm activate --bluestore ${id} ${osd-fsid-map.${id}} --no-systemd"
-
"${config.services.ceph.osd.package.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${id} --cluster ${config.services.ceph.global.clusterName}"
-
];
-
osd-post-stop = id: [
-
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm deactivate ${id}"
+
systemd.services =
+
let
+
osd-name = id: "ceph-osd-${id}";
+
osd-pre-start = id: [
+
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm activate --bluestore ${id} ${osd-fsid-map.${id}} --no-systemd"
+
"${config.services.ceph.osd.package.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${id} --cluster ${config.services.ceph.global.clusterName}"
+
];
+
osd-post-stop = id: [
+
"!${config.services.ceph.osd.package.out}/bin/ceph-volume lvm deactivate ${id}"
+
];
+
map-osd = id: {
+
name = osd-name id;
+
value = {
+
serviceConfig.ExecStartPre = lib.mkForce (osd-pre-start id);
+
serviceConfig.ExecStopPost = osd-post-stop id;
+
unitConfig.ConditionPathExists = lib.mkForce [ ];
+
unitConfig.StartLimitBurst = lib.mkForce 4;
+
path = with pkgs; [
+
util-linux
+
lvm2
+
cryptsetup
];
-
map-osd = id: {
-
name = osd-name id;
-
value = {
-
serviceConfig.ExecStartPre = lib.mkForce (osd-pre-start id);
-
serviceConfig.ExecStopPost = osd-post-stop id;
-
unitConfig.ConditionPathExists = lib.mkForce [ ];
-
unitConfig.StartLimitBurst = lib.mkForce 4;
-
path = with pkgs; [
-
util-linux
-
lvm2
-
cryptsetup
-
];
-
};
-
};
-
in
-
lib.pipe config.services.ceph.osd.daemons [
-
(builtins.map map-osd)
-
builtins.listToAttrs
-
];
-
};
+
};
+
};
+
in
+
lib.pipe config.services.ceph.osd.daemons [
+
(builtins.map map-osd)
+
builtins.listToAttrs
+
];
};
-
testScript =
-
{ ... }:
-
''
-
start_all()
+
testScript = ''
+
start_all()
-
ceph.wait_for_unit("default.target")
+
ceph.wait_for_unit("default.target")
-
# Bootstrap ceph-mon daemon
-
ceph.succeed(
-
"mkdir -p /var/lib/ceph/bootstrap-osd",
-
"ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
-
"ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
-
"ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'",
-
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
-
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring",
-
"monmaptool --create --fsid ${cluster} --addv 0 'v2:[${ip}]:3300/0' --clobber /tmp/ceph.initial-monmap",
-
"mkdir -p /var/lib/ceph/mon/ceph-0",
-
"ceph-mon --mkfs -i 0 --monmap /tmp/ceph.initial-monmap --keyring /tmp/ceph.mon.keyring",
-
"chown ceph:ceph -R /tmp/ceph.mon.keyring /var/lib/ceph",
-
"systemctl start ceph-mon-0.service",
-
)
+
# Bootstrap ceph-mon daemon
+
ceph.succeed(
+
"mkdir -p /var/lib/ceph/bootstrap-osd",
+
"ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+
"ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+
"ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'",
+
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+
"ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring",
+
"monmaptool --create --fsid ${cluster} --addv 0 'v2:[${ip}]:3300/0' --clobber /tmp/ceph.initial-monmap",
+
"mkdir -p /var/lib/ceph/mon/ceph-0",
+
"ceph-mon --mkfs -i 0 --monmap /tmp/ceph.initial-monmap --keyring /tmp/ceph.mon.keyring",
+
"chown ceph:ceph -R /tmp/ceph.mon.keyring /var/lib/ceph",
+
"systemctl start ceph-mon-0.service",
+
)
-
ceph.wait_for_unit("ceph-mon-0.service")
-
# should the mon not start or bind for some reason this gives us a better error message than the config commands running into a timeout
-
ceph.wait_for_open_port(3300, "${ip}")
-
ceph.succeed(
-
# required for HEALTH_OK
-
"ceph config set mon auth_allow_insecure_global_id_reclaim false",
-
# IPv6
-
"ceph config set global ms_bind_ipv4 false",
-
"ceph config set global ms_bind_ipv6 true",
-
# the new (secure) protocol
-
"ceph config set global ms_bind_msgr1 false",
-
"ceph config set global ms_bind_msgr2 true",
-
# just a small little thing
-
"ceph config set mon mon_compact_on_start true",
-
)
+
ceph.wait_for_unit("ceph-mon-0.service")
+
# should the mon not start or bind for some reason this gives us a better error message than the config commands running into a timeout
+
ceph.wait_for_open_port(3300, "${ip}")
+
ceph.succeed(
+
# required for HEALTH_OK
+
"ceph config set mon auth_allow_insecure_global_id_reclaim false",
+
# IPv6
+
"ceph config set global ms_bind_ipv4 false",
+
"ceph config set global ms_bind_ipv6 true",
+
# the new (secure) protocol
+
"ceph config set global ms_bind_msgr1 false",
+
"ceph config set global ms_bind_msgr2 true",
+
# just a small little thing
+
"ceph config set mon mon_compact_on_start true",
+
)
-
# Can't check ceph status until a mon is up
-
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
+
# Can't check ceph status until a mon is up
+
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
-
# Bootstrap OSDs (do this before starting the mgr because cryptsetup and the mgr both eat a lot of memory)
-
ceph.succeed(
-
# this will automatically do what's required for LVM, cryptsetup, and stores all the data in Ceph's internal databases
-
"ceph-volume lvm prepare --bluestore --data /dev/vdb --dmcrypt --no-systemd --osd-id 0 --osd-fsid ${osd-fsid-map."0"}",
-
"ceph-volume lvm prepare --bluestore --data /dev/vdc --dmcrypt --no-systemd --osd-id 1 --osd-fsid ${osd-fsid-map."1"}",
-
"ceph-volume lvm prepare --bluestore --data /dev/vdd --dmcrypt --no-systemd --osd-id 2 --osd-fsid ${osd-fsid-map."2"}",
-
"sudo ceph-volume lvm deactivate 0",
-
"sudo ceph-volume lvm deactivate 1",
-
"sudo ceph-volume lvm deactivate 2",
-
"chown -R ceph:ceph /var/lib/ceph",
-
)
+
# Bootstrap OSDs (do this before starting the mgr because cryptsetup and the mgr both eat a lot of memory)
+
ceph.succeed(
+
# this will automatically do what's required for LVM, cryptsetup, and stores all the data in Ceph's internal databases
+
"ceph-volume lvm prepare --bluestore --data /dev/vdb --dmcrypt --no-systemd --osd-id 0 --osd-fsid ${osd-fsid-map."0"}",
+
"ceph-volume lvm prepare --bluestore --data /dev/vdc --dmcrypt --no-systemd --osd-id 1 --osd-fsid ${osd-fsid-map."1"}",
+
"ceph-volume lvm prepare --bluestore --data /dev/vdd --dmcrypt --no-systemd --osd-id 2 --osd-fsid ${osd-fsid-map."2"}",
+
"sudo ceph-volume lvm deactivate 0",
+
"sudo ceph-volume lvm deactivate 1",
+
"sudo ceph-volume lvm deactivate 2",
+
"chown -R ceph:ceph /var/lib/ceph",
+
)
-
# Start OSDs (again, argon2id eats memory, so this happens before starting the mgr)
-
ceph.succeed(
-
"systemctl start ceph-osd-0.service",
-
"systemctl start ceph-osd-1.service",
-
"systemctl start ceph-osd-2.service",
-
)
-
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
-
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
# Start OSDs (again, argon2id eats memory, so this happens before starting the mgr)
+
ceph.succeed(
+
"systemctl start ceph-osd-0.service",
+
"systemctl start ceph-osd-1.service",
+
"systemctl start ceph-osd-2.service",
+
)
+
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
+
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
# Start the ceph-mgr daemon, after copying in the keyring
-
ceph.succeed(
-
"mkdir -p /var/lib/ceph/mgr/ceph-ceph/",
-
"ceph auth get-or-create -o /var/lib/ceph/mgr/ceph-ceph/keyring mgr.ceph mon 'allow profile mgr' osd 'allow *' mds 'allow *'",
-
"chown -R ceph:ceph /var/lib/ceph/mgr/ceph-ceph/",
-
"systemctl start ceph-mgr-ceph.service",
-
)
-
ceph.wait_for_unit("ceph-mgr-ceph")
-
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
-
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
-
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
# Start the ceph-mgr daemon, after copying in the keyring
+
ceph.succeed(
+
"mkdir -p /var/lib/ceph/mgr/ceph-ceph/",
+
"ceph auth get-or-create -o /var/lib/ceph/mgr/ceph-ceph/keyring mgr.ceph mon 'allow profile mgr' osd 'allow *' mds 'allow *'",
+
"chown -R ceph:ceph /var/lib/ceph/mgr/ceph-ceph/",
+
"systemctl start ceph-mgr-ceph.service",
+
)
+
ceph.wait_for_unit("ceph-mgr-ceph")
+
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
+
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
+
ceph.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
# test the actual storage
-
ceph.succeed(
-
"ceph osd pool create single-node-test 32 32",
-
"ceph osd pool ls | grep 'single-node-test'",
+
# test the actual storage
+
ceph.succeed(
+
"ceph osd pool create single-node-test 32 32",
+
"ceph osd pool ls | grep 'single-node-test'",
-
# We need to enable an application on the pool, otherwise it will
-
# stay unhealthy in state POOL_APP_NOT_ENABLED.
-
# Creating a CephFS would do this automatically, but we haven't done that here.
-
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
-
# We use the custom application name "nixos-test" for this.
-
"ceph osd pool application enable single-node-test nixos-test",
+
# We need to enable an application on the pool, otherwise it will
+
# stay unhealthy in state POOL_APP_NOT_ENABLED.
+
# Creating a CephFS would do this automatically, but we haven't done that here.
+
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+
# We use the custom application name "nixos-test" for this.
+
"ceph osd pool application enable single-node-test nixos-test",
-
"ceph osd pool rename single-node-test single-node-other-test",
-
"ceph osd pool ls | grep 'single-node-other-test'",
-
)
-
ceph.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
-
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
ceph.wait_until_succeeds("ceph -s | grep '33 active+clean'")
-
ceph.fail(
-
# the old pool should be gone
-
"ceph osd pool ls | grep 'multi-node-test'",
-
# deleting the pool should fail without setting mon_allow_pool_delete
-
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
-
)
+
"ceph osd pool rename single-node-test single-node-other-test",
+
"ceph osd pool ls | grep 'single-node-other-test'",
+
)
+
ceph.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
ceph.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+
ceph.fail(
+
# the old pool should be gone
+
"ceph osd pool ls | grep 'multi-node-test'",
+
# deleting the pool should fail without setting mon_allow_pool_delete
+
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+
)
-
# rebooting gets rid of any potential tmpfs mounts or device-mapper devices
-
ceph.shutdown()
-
ceph.start()
-
ceph.wait_for_unit("default.target")
+
# rebooting gets rid of any potential tmpfs mounts or device-mapper devices
+
ceph.shutdown()
+
ceph.start()
+
ceph.wait_for_unit("default.target")
-
# Start it up (again OSDs first due to memory constraints of cryptsetup and mgr)
-
ceph.systemctl("start ceph-mon-0.service")
-
ceph.wait_for_unit("ceph-mon-0")
-
ceph.systemctl("start ceph-osd-0.service")
-
ceph.wait_for_unit("ceph-osd-0")
-
ceph.systemctl("start ceph-osd-1.service")
-
ceph.wait_for_unit("ceph-osd-1")
-
ceph.systemctl("start ceph-osd-2.service")
-
ceph.wait_for_unit("ceph-osd-2")
-
ceph.systemctl("start ceph-mgr-ceph.service")
-
ceph.wait_for_unit("ceph-mgr-ceph")
+
# Start it up (again OSDs first due to memory constraints of cryptsetup and mgr)
+
ceph.systemctl("start ceph-mon-0.service")
+
ceph.wait_for_unit("ceph-mon-0")
+
ceph.systemctl("start ceph-osd-0.service")
+
ceph.wait_for_unit("ceph-osd-0")
+
ceph.systemctl("start ceph-osd-1.service")
+
ceph.wait_for_unit("ceph-osd-1")
+
ceph.systemctl("start ceph-osd-2.service")
+
ceph.wait_for_unit("ceph-osd-2")
+
ceph.systemctl("start ceph-mgr-ceph.service")
+
ceph.wait_for_unit("ceph-mgr-ceph")
-
# Ensure the cluster comes back up again
-
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
-
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
-
ceph.wait_until_succeeds("ceph osd stat | grep -E '3 osds: 3 up[^,]*, 3 in'")
-
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
-
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
'';
-
}
-
)
+
# Ensure the cluster comes back up again
+
ceph.succeed("ceph -s | grep 'mon: 1 daemons'")
+
ceph.wait_until_succeeds("ceph -s | grep 'quorum 0'")
+
ceph.wait_until_succeeds("ceph osd stat | grep -E '3 osds: 3 up[^,]*, 3 in'")
+
ceph.wait_until_succeeds("ceph -s | grep 'mgr: ceph(active,'")
+
ceph.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
'';
+
}
+203 -209
nixos/tests/ceph-single-node-bluestore.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, lib, ... }:
+
{ lib, ... }:
-
let
-
cfg = {
-
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
-
monA = {
-
name = "a";
-
ip = "192.168.1.1";
+
let
+
cfg = {
+
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+
monA = {
+
name = "a";
+
ip = "192.168.1.1";
+
};
+
osd0 = {
+
name = "0";
+
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+
};
+
osd1 = {
+
name = "1";
+
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+
};
+
osd2 = {
+
name = "2";
+
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+
};
+
};
+
generateCephConfig =
+
{ daemonConfig }:
+
{
+
enable = true;
+
global = {
+
fsid = cfg.clusterId;
+
monHost = cfg.monA.ip;
+
monInitialMembers = cfg.monA.name;
};
-
osd0 = {
-
name = "0";
-
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
-
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
-
};
-
osd1 = {
-
name = "1";
-
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
-
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
-
};
-
osd2 = {
-
name = "2";
-
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
-
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+
}
+
// daemonConfig;
+
+
generateHost =
+
{
+
cephConfig,
+
networkConfig,
+
}:
+
{ pkgs, ... }:
+
{
+
virtualisation = {
+
emptyDiskImages = [
+
20480
+
20480
+
20480
+
];
+
vlans = [ 1 ];
};
-
};
-
generateCephConfig =
-
{ daemonConfig }:
-
{
-
enable = true;
-
global = {
-
fsid = cfg.clusterId;
-
monHost = cfg.monA.ip;
-
monInitialMembers = cfg.monA.name;
-
};
-
}
-
// daemonConfig;
-
generateHost =
-
{
-
pkgs,
-
cephConfig,
-
networkConfig,
-
...
-
}:
-
{
-
virtualisation = {
-
emptyDiskImages = [
-
20480
-
20480
-
20480
-
];
-
vlans = [ 1 ];
-
};
+
networking = networkConfig;
-
networking = networkConfig;
+
environment.systemPackages = with pkgs; [
+
bash
+
sudo
+
ceph
+
xfsprogs
+
];
-
environment.systemPackages = with pkgs; [
-
bash
-
sudo
-
ceph
-
xfsprogs
-
];
+
boot.kernelModules = [ "xfs" ];
-
boot.kernelModules = [ "xfs" ];
+
services.ceph = cephConfig;
+
};
-
services.ceph = cephConfig;
+
networkMonA = {
+
dhcpcd.enable = false;
+
interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [
+
{
+
address = cfg.monA.ip;
+
prefixLength = 24;
+
}
+
];
+
};
+
cephConfigMonA = generateCephConfig {
+
daemonConfig = {
+
mon = {
+
enable = true;
+
daemons = [ cfg.monA.name ];
};
-
-
networkMonA = {
-
dhcpcd.enable = false;
-
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
-
{
-
address = cfg.monA.ip;
-
prefixLength = 24;
-
}
-
];
-
};
-
cephConfigMonA = generateCephConfig {
-
daemonConfig = {
-
mon = {
-
enable = true;
-
daemons = [ cfg.monA.name ];
-
};
-
mgr = {
-
enable = true;
-
daemons = [ cfg.monA.name ];
-
};
-
osd = {
-
enable = true;
-
daemons = [
-
cfg.osd0.name
-
cfg.osd1.name
-
cfg.osd2.name
-
];
-
};
+
mgr = {
+
enable = true;
+
daemons = [ cfg.monA.name ];
+
};
+
osd = {
+
enable = true;
+
daemons = [
+
cfg.osd0.name
+
cfg.osd1.name
+
cfg.osd2.name
+
];
};
};
+
};
-
# Following deployment is based on the manual deployment described here:
-
# https://docs.ceph.com/docs/master/install/manual-deployment/
-
# For other ways to deploy a ceph cluster, look at the documentation at
-
# https://docs.ceph.com/docs/master/
-
testscript =
-
{ ... }:
-
''
-
start_all()
+
# Following deployment is based on the manual deployment described here:
+
# https://docs.ceph.com/docs/master/install/manual-deployment/
+
# For other ways to deploy a ceph cluster, look at the documentation at
+
# https://docs.ceph.com/docs/master/
+
testScript = ''
+
start_all()
-
monA.wait_for_unit("network.target")
+
monA.wait_for_unit("network.target")
-
# Bootstrap ceph-mon daemon
-
monA.succeed(
-
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
-
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
-
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
-
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
-
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
-
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
-
"systemctl start ceph-mon-${cfg.monA.name}",
-
)
-
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
-
monA.succeed("ceph mon enable-msgr2")
-
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
+
# Bootstrap ceph-mon daemon
+
monA.succeed(
+
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+
"systemctl start ceph-mon-${cfg.monA.name}",
+
)
+
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+
monA.succeed("ceph mon enable-msgr2")
+
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
-
# Can't check ceph status until a mon is up
-
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
# Can't check ceph status until a mon is up
+
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
-
# Start the ceph-mgr daemon, after copying in the keyring
-
monA.succeed(
-
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
-
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
-
"systemctl start ceph-mgr-${cfg.monA.name}",
-
)
-
monA.wait_for_unit("ceph-mgr-a")
-
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
# Start the ceph-mgr daemon, after copying in the keyring
+
monA.succeed(
+
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+
"systemctl start ceph-mgr-${cfg.monA.name}",
+
)
+
monA.wait_for_unit("ceph-mgr-a")
+
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
# Bootstrap OSDs
-
monA.succeed(
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
-
"echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd0.name}/type",
-
"ln -sf /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}/block",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
-
"echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd1.name}/type",
-
"ln -sf /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}/block",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
-
"echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd2.name}/type",
-
"ln -sf /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}/block",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
-
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
-
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
-
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
-
)
+
# Bootstrap OSDs
+
monA.succeed(
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+
"echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd0.name}/type",
+
"ln -sf /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}/block",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+
"echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd1.name}/type",
+
"ln -sf /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}/block",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+
"echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd2.name}/type",
+
"ln -sf /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}/block",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+
)
-
# Initialize the OSDs with regular filestore
-
monA.succeed(
-
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
-
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
-
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
-
"chown -R ceph:ceph /var/lib/ceph/osd",
-
"systemctl start ceph-osd-${cfg.osd0.name}",
-
"systemctl start ceph-osd-${cfg.osd1.name}",
-
"systemctl start ceph-osd-${cfg.osd2.name}",
-
)
-
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
# Initialize the OSDs with regular filestore
+
monA.succeed(
+
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+
"chown -R ceph:ceph /var/lib/ceph/osd",
+
"systemctl start ceph-osd-${cfg.osd0.name}",
+
"systemctl start ceph-osd-${cfg.osd1.name}",
+
"systemctl start ceph-osd-${cfg.osd2.name}",
+
)
+
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
monA.succeed(
-
"ceph osd pool create single-node-test 32 32",
-
"ceph osd pool ls | grep 'single-node-test'",
+
monA.succeed(
+
"ceph osd pool create single-node-test 32 32",
+
"ceph osd pool ls | grep 'single-node-test'",
-
# We need to enable an application on the pool, otherwise it will
-
# stay unhealthy in state POOL_APP_NOT_ENABLED.
-
# Creating a CephFS would do this automatically, but we haven't done that here.
-
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
-
# We use the custom application name "nixos-test" for this.
-
"ceph osd pool application enable single-node-test nixos-test",
+
# We need to enable an application on the pool, otherwise it will
+
# stay unhealthy in state POOL_APP_NOT_ENABLED.
+
# Creating a CephFS would do this automatically, but we haven't done that here.
+
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+
# We use the custom application name "nixos-test" for this.
+
"ceph osd pool application enable single-node-test nixos-test",
-
"ceph osd pool rename single-node-test single-node-other-test",
-
"ceph osd pool ls | grep 'single-node-other-test'",
-
)
-
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
-
monA.succeed(
-
"ceph osd getcrushmap -o crush",
-
"crushtool -d crush -o decrushed",
-
"sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
-
"crushtool -c modcrush -o recrushed",
-
"ceph osd setcrushmap -i recrushed",
-
"ceph osd pool set single-node-other-test size 2",
-
)
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
-
monA.fail(
-
"ceph osd pool ls | grep 'multi-node-test'",
-
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
-
)
+
"ceph osd pool rename single-node-test single-node-other-test",
+
"ceph osd pool ls | grep 'single-node-other-test'",
+
)
+
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+
monA.succeed(
+
"ceph osd getcrushmap -o crush",
+
"crushtool -d crush -o decrushed",
+
"sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
+
"crushtool -c modcrush -o recrushed",
+
"ceph osd setcrushmap -i recrushed",
+
"ceph osd pool set single-node-other-test size 2",
+
)
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+
monA.fail(
+
"ceph osd pool ls | grep 'multi-node-test'",
+
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+
)
-
# Shut down ceph by stopping ceph.target.
-
monA.succeed("systemctl stop ceph.target")
+
# Shut down ceph by stopping ceph.target.
+
monA.succeed("systemctl stop ceph.target")
-
# Start it up
-
monA.succeed("systemctl start ceph.target")
-
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
-
monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
-
monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
-
monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
-
monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
+
# Start it up
+
monA.succeed("systemctl start ceph.target")
+
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+
monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
+
monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
+
monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
+
monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
-
# Ensure the cluster comes back up again
-
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
-
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
'';
-
in
-
{
-
name = "basic-single-node-ceph-cluster-bluestore";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [ lukegb ];
-
};
+
# Ensure the cluster comes back up again
+
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
'';
+
in
+
{
+
name = "basic-single-node-ceph-cluster-bluestore";
+
meta = with lib.maintainers; {
+
maintainers = [ lukegb ];
+
};
-
nodes = {
-
monA = generateHost {
-
pkgs = pkgs;
-
cephConfig = cephConfigMonA;
-
networkConfig = networkMonA;
-
};
+
nodes = {
+
monA = generateHost {
+
cephConfig = cephConfigMonA;
+
networkConfig = networkMonA;
};
+
};
-
testScript = testscript;
-
}
-
)
+
inherit testScript;
+
}
+216 -222
nixos/tests/ceph-single-node.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, lib, ... }:
+
{ lib, ... }:
-
let
-
cfg = {
-
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
-
monA = {
-
name = "a";
-
ip = "192.168.1.1";
+
let
+
cfg = {
+
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+
monA = {
+
name = "a";
+
ip = "192.168.1.1";
+
};
+
osd0 = {
+
name = "0";
+
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+
};
+
osd1 = {
+
name = "1";
+
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+
};
+
osd2 = {
+
name = "2";
+
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+
};
+
};
+
generateCephConfig =
+
{ daemonConfig }:
+
{
+
enable = true;
+
global = {
+
fsid = cfg.clusterId;
+
monHost = cfg.monA.ip;
+
monInitialMembers = cfg.monA.name;
};
-
osd0 = {
-
name = "0";
-
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
-
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
-
};
-
osd1 = {
-
name = "1";
-
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
-
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
-
};
-
osd2 = {
-
name = "2";
-
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
-
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+
}
+
// daemonConfig;
+
+
generateHost =
+
{
+
cephConfig,
+
networkConfig,
+
}:
+
{ pkgs, ... }:
+
{
+
virtualisation = {
+
emptyDiskImages = [
+
20480
+
20480
+
20480
+
];
+
vlans = [ 1 ];
};
-
};
-
generateCephConfig =
-
{ daemonConfig }:
-
{
-
enable = true;
-
global = {
-
fsid = cfg.clusterId;
-
monHost = cfg.monA.ip;
-
monInitialMembers = cfg.monA.name;
-
};
-
}
-
// daemonConfig;
-
generateHost =
-
{
-
pkgs,
-
cephConfig,
-
networkConfig,
-
...
-
}:
-
{
-
virtualisation = {
-
emptyDiskImages = [
-
20480
-
20480
-
20480
-
];
-
vlans = [ 1 ];
-
};
+
networking = networkConfig;
-
networking = networkConfig;
+
environment.systemPackages = with pkgs; [
+
bash
+
sudo
+
ceph
+
xfsprogs
+
];
-
environment.systemPackages = with pkgs; [
-
bash
-
sudo
-
ceph
-
xfsprogs
-
];
+
boot.kernelModules = [ "xfs" ];
-
boot.kernelModules = [ "xfs" ];
+
services.ceph = cephConfig;
+
};
-
services.ceph = cephConfig;
+
networkMonA = {
+
dhcpcd.enable = false;
+
interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [
+
{
+
address = cfg.monA.ip;
+
prefixLength = 24;
+
}
+
];
+
};
+
cephConfigMonA = generateCephConfig {
+
daemonConfig = {
+
mon = {
+
enable = true;
+
daemons = [ cfg.monA.name ];
};
-
-
networkMonA = {
-
dhcpcd.enable = false;
-
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
-
{
-
address = cfg.monA.ip;
-
prefixLength = 24;
-
}
-
];
-
};
-
cephConfigMonA = generateCephConfig {
-
daemonConfig = {
-
mon = {
-
enable = true;
-
daemons = [ cfg.monA.name ];
-
};
-
mgr = {
-
enable = true;
-
daemons = [ cfg.monA.name ];
-
};
-
osd = {
-
enable = true;
-
daemons = [
-
cfg.osd0.name
-
cfg.osd1.name
-
cfg.osd2.name
-
];
-
};
+
mgr = {
+
enable = true;
+
daemons = [ cfg.monA.name ];
+
};
+
osd = {
+
enable = true;
+
daemons = [
+
cfg.osd0.name
+
cfg.osd1.name
+
cfg.osd2.name
+
];
};
};
+
};
-
# Following deployment is based on the manual deployment described here:
-
# https://docs.ceph.com/docs/master/install/manual-deployment/
-
# For other ways to deploy a ceph cluster, look at the documentation at
-
# https://docs.ceph.com/docs/master/
-
testscript =
-
{ ... }:
-
''
-
start_all()
+
# Following deployment is based on the manual deployment described here:
+
# https://docs.ceph.com/docs/master/install/manual-deployment/
+
# For other ways to deploy a ceph cluster, look at the documentation at
+
# https://docs.ceph.com/docs/master/
+
testScript = ''
+
start_all()
-
monA.wait_for_unit("network.target")
+
monA.wait_for_unit("network.target")
-
# Bootstrap ceph-mon daemon
-
monA.succeed(
-
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
-
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
-
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
-
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
-
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
-
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
-
"systemctl start ceph-mon-${cfg.monA.name}",
-
)
-
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
-
monA.succeed("ceph mon enable-msgr2")
-
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
+
# Bootstrap ceph-mon daemon
+
monA.succeed(
+
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+
"systemctl start ceph-mon-${cfg.monA.name}",
+
)
+
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+
monA.succeed("ceph mon enable-msgr2")
+
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
-
# Can't check ceph status until a mon is up
-
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
# Can't check ceph status until a mon is up
+
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
-
# Start the ceph-mgr daemon, after copying in the keyring
-
monA.succeed(
-
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
-
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
-
"systemctl start ceph-mgr-${cfg.monA.name}",
-
)
-
monA.wait_for_unit("ceph-mgr-a")
-
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
# Start the ceph-mgr daemon, after copying in the keyring
+
monA.succeed(
+
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+
"systemctl start ceph-mgr-${cfg.monA.name}",
+
)
+
monA.wait_for_unit("ceph-mgr-a")
+
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
# Bootstrap OSDs
-
monA.succeed(
-
"mkfs.xfs /dev/vdb",
-
"mkfs.xfs /dev/vdc",
-
"mkfs.xfs /dev/vdd",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
-
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
-
"mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
-
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
-
"mount /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
-
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
-
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
-
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
-
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
-
)
+
# Bootstrap OSDs
+
monA.succeed(
+
"mkfs.xfs /dev/vdb",
+
"mkfs.xfs /dev/vdc",
+
"mkfs.xfs /dev/vdd",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+
"mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+
"mount /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+
)
-
# Initialize the OSDs with regular filestore
-
monA.succeed(
-
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
-
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
-
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
-
"chown -R ceph:ceph /var/lib/ceph/osd",
-
"systemctl start ceph-osd-${cfg.osd0.name}",
-
"systemctl start ceph-osd-${cfg.osd1.name}",
-
"systemctl start ceph-osd-${cfg.osd2.name}",
-
)
-
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
# Initialize the OSDs with regular filestore
+
monA.succeed(
+
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+
"chown -R ceph:ceph /var/lib/ceph/osd",
+
"systemctl start ceph-osd-${cfg.osd0.name}",
+
"systemctl start ceph-osd-${cfg.osd1.name}",
+
"systemctl start ceph-osd-${cfg.osd2.name}",
+
)
+
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
monA.succeed(
-
"ceph osd pool create single-node-test 32 32",
-
"ceph osd pool ls | grep 'single-node-test'",
+
monA.succeed(
+
"ceph osd pool create single-node-test 32 32",
+
"ceph osd pool ls | grep 'single-node-test'",
-
# We need to enable an application on the pool, otherwise it will
-
# stay unhealthy in state POOL_APP_NOT_ENABLED.
-
# Creating a CephFS would do this automatically, but we haven't done that here.
-
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
-
# We use the custom application name "nixos-test" for this.
-
"ceph osd pool application enable single-node-test nixos-test",
+
# We need to enable an application on the pool, otherwise it will
+
# stay unhealthy in state POOL_APP_NOT_ENABLED.
+
# Creating a CephFS would do this automatically, but we haven't done that here.
+
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
+
# We use the custom application name "nixos-test" for this.
+
"ceph osd pool application enable single-node-test nixos-test",
-
"ceph osd pool rename single-node-test single-node-other-test",
-
"ceph osd pool ls | grep 'single-node-other-test'",
-
)
-
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
-
monA.succeed(
-
"ceph osd getcrushmap -o crush",
-
"crushtool -d crush -o decrushed",
-
"sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
-
"crushtool -c modcrush -o recrushed",
-
"ceph osd setcrushmap -i recrushed",
-
"ceph osd pool set single-node-other-test size 2",
-
)
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
-
monA.fail(
-
"ceph osd pool ls | grep 'multi-node-test'",
-
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
-
)
+
"ceph osd pool rename single-node-test single-node-other-test",
+
"ceph osd pool ls | grep 'single-node-other-test'",
+
)
+
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+
monA.succeed(
+
"ceph osd getcrushmap -o crush",
+
"crushtool -d crush -o decrushed",
+
"sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
+
"crushtool -c modcrush -o recrushed",
+
"ceph osd setcrushmap -i recrushed",
+
"ceph osd pool set single-node-other-test size 2",
+
)
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+
monA.fail(
+
"ceph osd pool ls | grep 'multi-node-test'",
+
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+
)
-
# Shut down ceph by stopping ceph.target.
-
monA.succeed("systemctl stop ceph.target")
+
# Shut down ceph by stopping ceph.target.
+
monA.succeed("systemctl stop ceph.target")
-
# Start it up
-
monA.succeed("systemctl start ceph.target")
-
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
-
monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
-
monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
-
monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
-
monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
+
# Start it up
+
monA.succeed("systemctl start ceph.target")
+
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+
monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
+
monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
+
monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
+
monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
-
# Ensure the cluster comes back up again
-
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
-
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
-
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
# Ensure the cluster comes back up again
+
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
# Enable the dashboard and recheck health
-
monA.succeed(
-
"ceph mgr module enable dashboard",
-
"ceph config set mgr mgr/dashboard/ssl false",
-
# default is 8080 but it's better to be explicit
-
"ceph config set mgr mgr/dashboard/server_port 8080",
-
)
-
monA.wait_for_open_port(8080)
-
monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
-
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
-
'';
-
in
-
{
-
name = "basic-single-node-ceph-cluster";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [
-
lejonet
-
johanot
-
];
-
};
+
# Enable the dashboard and recheck health
+
monA.succeed(
+
"ceph mgr module enable dashboard",
+
"ceph config set mgr mgr/dashboard/ssl false",
+
# default is 8080 but it's better to be explicit
+
"ceph config set mgr mgr/dashboard/server_port 8080",
+
)
+
monA.wait_for_open_port(8080)
+
monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
+
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
'';
+
in
+
{
+
name = "basic-single-node-ceph-cluster";
+
meta = with lib.maintainers; {
+
maintainers = [
+
lejonet
+
johanot
+
];
+
};
-
nodes = {
-
monA = generateHost {
-
pkgs = pkgs;
-
cephConfig = cephConfigMonA;
-
networkConfig = networkMonA;
-
};
+
nodes = {
+
monA = generateHost {
+
cephConfig = cephConfigMonA;
+
networkConfig = networkMonA;
};
+
};
-
testScript = testscript;
-
}
-
)
+
inherit testScript;
+
}
+2 -7
nixos/tests/certmgr.nix
···
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
}:
+
{ runTest, pkgs, ... }:
-
with import ../lib/testing-python.nix { inherit system pkgs; };
let
mkSpec =
{
···
specs,
testScript,
}:
-
makeTest {
+
runTest {
name = "certmgr-" + svcManager;
nodes = {
machine =
+81 -83
nixos/tests/cfssl.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, ... }:
-
{
-
name = "cfssl";
+
{ pkgs, ... }:
+
{
+
name = "cfssl";
-
nodes.machine =
-
{
-
config,
-
lib,
-
pkgs,
-
...
-
}:
-
{
-
networking.firewall.allowedTCPPorts = [ config.services.cfssl.port ];
+
nodes.machine =
+
{
+
config,
+
lib,
+
pkgs,
+
...
+
}:
+
{
+
networking.firewall.allowedTCPPorts = [ config.services.cfssl.port ];
-
services.cfssl.enable = true;
-
systemd.services.cfssl.after = [ "cfssl-init.service" ];
+
services.cfssl.enable = true;
+
systemd.services.cfssl.after = [ "cfssl-init.service" ];
-
systemd.services.cfssl-init = {
-
description = "Initialize the cfssl CA";
-
wantedBy = [ "multi-user.target" ];
-
serviceConfig = {
-
User = "cfssl";
-
Type = "oneshot";
-
WorkingDirectory = config.services.cfssl.dataDir;
-
};
-
script = with pkgs; ''
-
${cfssl}/bin/cfssl genkey -initca ${
-
pkgs.writeText "ca.json" (
-
builtins.toJSON {
-
hosts = [ "ca.example.com" ];
-
key = {
-
algo = "rsa";
-
size = 4096;
-
};
-
names = [
-
{
-
C = "US";
-
L = "San Francisco";
-
O = "Internet Widgets, LLC";
-
OU = "Certificate Authority";
-
ST = "California";
-
}
-
];
-
}
-
)
-
} | ${cfssl}/bin/cfssljson -bare ca
-
'';
+
systemd.services.cfssl-init = {
+
description = "Initialize the cfssl CA";
+
wantedBy = [ "multi-user.target" ];
+
serviceConfig = {
+
User = "cfssl";
+
Type = "oneshot";
+
WorkingDirectory = config.services.cfssl.dataDir;
};
+
script = with pkgs; ''
+
${cfssl}/bin/cfssl genkey -initca ${
+
pkgs.writeText "ca.json" (
+
builtins.toJSON {
+
hosts = [ "ca.example.com" ];
+
key = {
+
algo = "rsa";
+
size = 4096;
+
};
+
names = [
+
{
+
C = "US";
+
L = "San Francisco";
+
O = "Internet Widgets, LLC";
+
OU = "Certificate Authority";
+
ST = "California";
+
}
+
];
+
}
+
)
+
} | ${cfssl}/bin/cfssljson -bare ca
+
'';
};
+
};
-
testScript =
-
let
-
cfsslrequest =
-
with pkgs;
-
writeScript "cfsslrequest" ''
-
curl -f -X POST -H "Content-Type: application/json" -d @${csr} \
-
http://localhost:8888/api/v1/cfssl/newkey | ${cfssl}/bin/cfssljson /tmp/certificate
-
'';
-
csr = pkgs.writeText "csr.json" (
-
builtins.toJSON {
-
CN = "www.example.com";
-
hosts = [
-
"example.com"
-
"www.example.com"
-
];
-
key = {
-
algo = "rsa";
-
size = 2048;
-
};
-
names = [
-
{
-
C = "US";
-
L = "San Francisco";
-
O = "Example Company, LLC";
-
OU = "Operations";
-
ST = "California";
-
}
-
];
-
}
-
);
-
in
-
''
-
machine.wait_for_unit("cfssl.service")
-
machine.wait_until_succeeds("${cfsslrequest}")
-
machine.succeed("ls /tmp/certificate-key.pem")
-
'';
-
}
-
)
+
testScript =
+
let
+
cfsslrequest =
+
with pkgs;
+
writeScript "cfsslrequest" ''
+
curl -f -X POST -H "Content-Type: application/json" -d @${csr} \
+
http://localhost:8888/api/v1/cfssl/newkey | ${cfssl}/bin/cfssljson /tmp/certificate
+
'';
+
csr = pkgs.writeText "csr.json" (
+
builtins.toJSON {
+
CN = "www.example.com";
+
hosts = [
+
"example.com"
+
"www.example.com"
+
];
+
key = {
+
algo = "rsa";
+
size = 2048;
+
};
+
names = [
+
{
+
C = "US";
+
L = "San Francisco";
+
O = "Example Company, LLC";
+
OU = "Operations";
+
ST = "California";
+
}
+
];
+
}
+
);
+
in
+
''
+
machine.wait_for_unit("cfssl.service")
+
machine.wait_until_succeeds("${cfsslrequest}")
+
machine.succeed("ls /tmp/certificate-key.pem")
+
'';
+
}
+18 -26
nixos/tests/chrony-ptp.nix
···
-
import ./make-test-python.nix (
-
{ lib, ... }:
-
{
-
name = "chrony-ptp";
+
{ lib, ... }:
+
{
+
name = "chrony-ptp";
-
meta = {
-
maintainers = with lib.maintainers; [ gkleen ];
-
};
+
meta.maintainers = with lib.maintainers; [ gkleen ];
-
nodes = {
-
qemuGuest =
-
{ lib, ... }:
-
{
-
boot.kernelModules = [ "ptp_kvm" ];
+
nodes.qemuGuest = {
+
boot.kernelModules = [ "ptp_kvm" ];
-
services.chrony = {
-
enable = true;
-
extraConfig = ''
-
refclock PHC /dev/ptp_kvm poll 2 dpoll -2 offset 0 stratum 3
-
'';
-
};
-
};
+
services.chrony = {
+
enable = true;
+
extraConfig = ''
+
refclock PHC /dev/ptp_kvm poll 2 dpoll -2 offset 0 stratum 3
+
'';
};
+
};
-
testScript = ''
-
start_all()
+
testScript = ''
+
start_all()
-
qemuGuest.wait_for_unit('multi-user.target')
-
qemuGuest.succeed('systemctl is-active chronyd.service')
-
'';
-
}
-
)
+
qemuGuest.wait_for_unit('multi-user.target')
+
qemuGuest.succeed('systemctl is-active chronyd.service')
+
'';
+
}
+18 -24
nixos/tests/chrony.nix
···
-
import ./make-test-python.nix (
-
{ lib, ... }:
-
{
-
name = "chrony";
+
{ lib, ... }:
+
{
+
name = "chrony";
-
meta = {
-
maintainers = with lib.maintainers; [ fpletz ];
-
};
+
meta.maintainers = with lib.maintainers; [ fpletz ];
-
nodes = {
-
machine = {
-
services.chrony.enable = true;
+
nodes.machine = {
+
services.chrony.enable = true;
-
specialisation.hardened.configuration = {
-
services.chrony.enableMemoryLocking = true;
-
};
-
};
+
specialisation.hardened.configuration = {
+
services.chrony.enableMemoryLocking = true;
};
+
};
-
testScript = ''
-
machine.start()
-
machine.wait_for_unit('multi-user.target')
-
machine.succeed('systemctl is-active chronyd.service')
-
machine.succeed('/run/booted-system/specialisation/hardened/bin/switch-to-configuration test')
-
machine.succeed('systemctl restart chronyd.service')
-
machine.wait_for_unit('chronyd.service')
-
'';
-
}
-
)
+
testScript = ''
+
machine.start()
+
machine.wait_for_unit('multi-user.target')
+
machine.succeed('systemctl is-active chronyd.service')
+
machine.succeed('/run/booted-system/specialisation/hardened/bin/switch-to-configuration test')
+
machine.succeed('systemctl restart chronyd.service')
+
machine.wait_for_unit('chronyd.service')
+
'';
+
}
+6 -17
nixos/tests/cloud-init-hostname.nix
···
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
}:
-
-
with import ../lib/testing-python.nix { inherit system pkgs; };
-
with pkgs.lib;
-
+
{ lib, pkgs, ... }:
let
# Hostname can also be set through "hostname" in user-data.
# This is how proxmox configures hostname through cloud-init.
···
${pkgs.cdrkit}/bin/genisoimage -volid cidata -joliet -rock -o $out/metadata.iso $out/iso
'';
};
-
in
-
makeTest {
+
{
name = "cloud-init-hostname";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [
-
lewo
-
illustris
-
];
-
};
+
meta.maintainers = with lib.maintainers; [
+
lewo
+
illustris
+
];
nodes.machine2 =
{ ... }:
+15 -24
nixos/tests/cloud-init.nix
···
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
}:
-
-
with import ../lib/testing-python.nix { inherit system pkgs; };
-
with pkgs.lib;
+
{ lib, pkgs, ... }:
let
inherit (import ./ssh-keys.nix pkgs)
···
};
in
-
makeTest {
+
{
name = "cloud-init";
-
meta.maintainers = with pkgs.lib.maintainers; [
+
meta.maintainers = with lib.maintainers; [
lewo
illustris
];
-
nodes.machine =
-
{ ... }:
-
{
-
virtualisation.qemu.options = [
-
"-cdrom"
-
"${metadataDrive}/metadata.iso"
-
];
-
services.cloud-init = {
-
enable = true;
-
network.enable = true;
-
};
-
services.openssh.enable = true;
-
networking.hostName = "";
-
networking.useDHCP = false;
+
nodes.machine = {
+
virtualisation.qemu.options = [
+
"-cdrom"
+
"${metadataDrive}/metadata.iso"
+
];
+
services.cloud-init = {
+
enable = true;
+
network.enable = true;
};
+
services.openssh.enable = true;
+
networking.hostName = "";
+
networking.useDHCP = false;
+
};
testScript = ''
# To wait until cloud-init terminates its run
unnamed.wait_for_unit("cloud-init-local.service")
+24 -36
nixos/tests/cntr.nix
···
# Test for cntr tool
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
lib ? pkgs.lib,
-
}:
+
+
{ runTest, lib }:
let
-
inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
-
mkOCITest =
backend:
-
makeTest {
+
runTest {
name = "cntr-${backend}";
-
meta = {
-
maintainers = with lib.maintainers; [
-
sorki
-
mic92
-
];
-
};
+
meta.maintainers = with lib.maintainers; [
+
sorki
+
mic92
+
];
-
nodes = {
-
${backend} =
-
{ pkgs, ... }:
-
{
-
environment.systemPackages = [ pkgs.cntr ];
-
virtualisation.oci-containers = {
-
inherit backend;
-
containers.nginx = {
-
image = "nginx-container";
-
imageStream = pkgs.dockerTools.examples.nginxStream;
-
ports = [ "8181:80" ];
-
};
+
nodes.${backend} =
+
{ pkgs, ... }:
+
{
+
environment.systemPackages = [ pkgs.cntr ];
+
virtualisation.oci-containers = {
+
inherit backend;
+
containers.nginx = {
+
image = "nginx-container";
+
imageStream = pkgs.dockerTools.examples.nginxStream;
+
ports = [ "8181:80" ];
};
};
-
};
+
};
testScript = ''
start_all()
···
'';
};
-
mkContainersTest = makeTest {
+
mkContainersTest = runTest {
name = "cntr-containers";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [
-
sorki
-
mic92
-
];
-
};
+
meta.maintainers = with lib.maintainers; [
+
sorki
+
mic92
+
];
nodes.machine =
-
{ lib, ... }:
+
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.cntr ];
containers.test = {
+24 -28
nixos/tests/cockroachdb.nix
···
# requirements, but would probably allow both aarch64/x86_64 to work.
#
+
{ lib, ... }:
+
let
-
# Creates a node. If 'joinNode' parameter, a string containing an IP address,
# is non-null, then the CockroachDB server will attempt to join/connect to
# the cluster node specified at that address.
···
config,
...
}:
-
{
# Bank/TPC-C benchmarks take some memory to complete
virtualisation.memorySize = 2048;
···
${pkgs.chrony}/bin/chronyc waitsync
'';
};
-
in
-
import ./make-test-python.nix (
-
{ pkgs, ... }:
-
{
-
name = "cockroachdb";
-
meta.maintainers = with pkgs.lib.maintainers; [ thoughtpolice ];
+
{
+
name = "cockroachdb";
+
meta.maintainers = with lib.maintainers; [ thoughtpolice ];
-
nodes = {
-
node1 = makeNode "country=us,region=east,dc=1" "192.168.1.1" null;
-
node2 = makeNode "country=us,region=west,dc=2b" "192.168.1.2" "192.168.1.1";
-
node3 = makeNode "country=eu,region=west,dc=2" "192.168.1.3" "192.168.1.1";
-
};
+
nodes = {
+
node1 = makeNode "country=us,region=east,dc=1" "192.168.1.1" null;
+
node2 = makeNode "country=us,region=west,dc=2b" "192.168.1.2" "192.168.1.1";
+
node3 = makeNode "country=eu,region=west,dc=2" "192.168.1.3" "192.168.1.1";
+
};
-
# NOTE: All the nodes must start in order and you must NOT use startAll, because
-
# there's otherwise no way to guarantee that node1 will start before the others try
-
# to join it.
-
testScript = ''
-
for node in node1, node2, node3:
-
node.start()
-
node.wait_for_unit("cockroachdb")
-
node1.succeed(
-
"cockroach sql --host=192.168.1.1 --insecure -e 'SHOW ALL CLUSTER SETTINGS' 2>&1",
-
"cockroach workload init bank 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
-
"cockroach workload run bank --duration=1m 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
-
)
-
'';
-
}
-
)
+
# NOTE: All the nodes must start in order and you must NOT use startAll, because
+
# there's otherwise no way to guarantee that node1 will start before the others try
+
# to join it.
+
testScript = ''
+
for node in node1, node2, node3:
+
node.start()
+
node.wait_for_unit("cockroachdb")
+
node1.succeed(
+
"cockroach sql --host=192.168.1.1 --insecure -e 'SHOW ALL CLUSTER SETTINGS' 2>&1",
+
"cockroach workload init bank 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+
"cockroach workload run bank --duration=1m 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+
)
+
'';
+
}
+43 -49
nixos/tests/corerad.nix
···
-
import ./make-test-python.nix ({
+
{
name = "corerad";
nodes = {
-
router =
-
{ config, pkgs, ... }:
-
{
-
config = {
-
# This machine simulates a router with IPv6 forwarding and a static IPv6 address.
-
boot.kernel.sysctl = {
-
"net.ipv6.conf.all.forwarding" = true;
-
};
-
networking.interfaces.eth1 = {
-
ipv6.addresses = [
-
{
-
address = "fd00:dead:beef:dead::1";
-
prefixLength = 64;
-
}
-
];
-
};
-
services.corerad = {
-
enable = true;
-
# Serve router advertisements to the client machine with prefix information matching
-
# any IPv6 /64 prefixes configured on this interface.
-
#
-
# This configuration is identical to the example in the CoreRAD NixOS module.
-
settings = {
-
interfaces = [
-
{
-
name = "eth0";
-
monitor = true;
-
}
-
{
-
name = "eth1";
-
advertise = true;
-
prefix = [ { prefix = "::/64"; } ];
-
}
-
];
-
debug = {
-
address = "localhost:9430";
-
prometheus = true;
-
};
-
};
+
router = {
+
# This machine simulates a router with IPv6 forwarding and a static IPv6 address.
+
boot.kernel.sysctl = {
+
"net.ipv6.conf.all.forwarding" = true;
+
};
+
networking.interfaces.eth1 = {
+
ipv6.addresses = [
+
{
+
address = "fd00:dead:beef:dead::1";
+
prefixLength = 64;
+
}
+
];
+
};
+
services.corerad = {
+
enable = true;
+
# Serve router advertisements to the client machine with prefix information matching
+
# any IPv6 /64 prefixes configured on this interface.
+
#
+
# This configuration is identical to the example in the CoreRAD NixOS module.
+
settings = {
+
interfaces = [
+
{
+
name = "eth0";
+
monitor = true;
+
}
+
{
+
name = "eth1";
+
advertise = true;
+
prefix = [ { prefix = "::/64"; } ];
+
}
+
];
+
debug = {
+
address = "localhost:9430";
+
prometheus = true;
};
};
};
+
};
client =
-
{ config, pkgs, ... }:
+
{ pkgs, ... }:
{
# Use IPv6 SLAAC from router advertisements, and install rdisc6 so we can
# trigger one immediately.
-
config = {
-
boot.kernel.sysctl = {
-
"net.ipv6.conf.all.autoconf" = true;
-
};
-
environment.systemPackages = with pkgs; [
-
ndisc6
-
];
+
boot.kernel.sysctl = {
+
"net.ipv6.conf.all.autoconf" = true;
};
+
environment.systemPackages = with pkgs; [
+
ndisc6
+
];
};
};
···
"corerad_build_info" in out
), "Build info metric was not found in Prometheus output"
'';
-
})
+
}
+15 -20
nixos/tests/cri-o.nix
···
-
# This test runs CRI-O and verifies via critest
-
import ./make-test-python.nix (
-
{ pkgs, ... }:
-
{
-
name = "cri-o";
-
meta.maintainers = with pkgs.lib; teams.podman.members;
+
{ lib, ... }:
+
{
+
name = "cri-o";
+
meta.maintainers = lib.teams.podman.members;
-
nodes = {
-
crio = {
-
virtualisation.cri-o.enable = true;
-
};
-
};
+
nodes.crio = {
+
virtualisation.cri-o.enable = true;
+
};
-
testScript = ''
-
start_all()
-
crio.wait_for_unit("crio.service")
-
crio.succeed(
-
"critest --ginkgo.focus='Runtime info' --runtime-endpoint unix:///var/run/crio/crio.sock"
-
)
-
'';
-
}
-
)
+
testScript = ''
+
start_all()
+
crio.wait_for_unit("crio.service")
+
crio.succeed(
+
"critest --ginkgo.focus='Runtime info' --runtime-endpoint unix:///var/run/crio/crio.sock"
+
)
+
'';
+
}
+4 -10
nixos/tests/custom-ca.nix
···
# The test checks that certificates issued by a custom
# trusted CA are accepted but those from an unknown CA are rejected.
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
}:
-
-
with import ../lib/testing-python.nix { inherit system pkgs; };
+
{ runTest, pkgs }:
let
inherit (pkgs) lib;
···
};
};
-
curlTest = makeTest {
+
curlTest = runTest {
name = "custom-ca-curl";
meta.maintainers = with lib.maintainers; [ rnhmjoj ];
nodes.machine = { ... }: webserverConfig;
···
mkBrowserTest =
browser: testParams:
-
makeTest {
+
runTest {
name = "custom-ca-${browser}";
meta.maintainers = with lib.maintainers; [ rnhmjoj ];
···
{
curl = curlTest;
}
-
// pkgs.lib.mapAttrs mkBrowserTest {
+
// lib.mapAttrs mkBrowserTest {
firefox = {
error = "Security Risk";
};
+1 -1
nixos/tests/dhparams.nix
···
-
import ./make-test-python.nix {
+
{
name = "dhparams";
nodes.machine =
+33 -37
nixos/tests/dnscrypt-proxy2.nix
···
-
import ./make-test-python.nix (
-
{ pkgs, ... }:
-
let
-
localProxyPort = 43;
-
in
-
{
-
name = "dnscrypt-proxy2";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [ joachifm ];
-
};
+
{ lib, ... }:
+
let
+
localProxyPort = 43;
+
in
+
{
+
name = "dnscrypt-proxy2";
+
meta.maintainers = with lib.maintainers; [ joachifm ];
-
nodes = {
-
# A client running the recommended setup: DNSCrypt proxy as a forwarder
-
# for a caching DNS client.
-
client =
-
{ ... }:
-
{
-
security.apparmor.enable = true;
+
nodes = {
+
# A client running the recommended setup: DNSCrypt proxy as a forwarder
+
# for a caching DNS client.
+
client =
+
{ ... }:
+
{
+
security.apparmor.enable = true;
-
services.dnscrypt-proxy2.enable = true;
-
services.dnscrypt-proxy2.settings = {
-
listen_addresses = [ "127.0.0.1:${toString localProxyPort}" ];
-
sources.public-resolvers = {
-
urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
-
cache_file = "public-resolvers.md";
-
minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
-
refresh_delay = 72;
-
};
+
services.dnscrypt-proxy2.enable = true;
+
services.dnscrypt-proxy2.settings = {
+
listen_addresses = [ "127.0.0.1:${toString localProxyPort}" ];
+
sources.public-resolvers = {
+
urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
+
cache_file = "public-resolvers.md";
+
minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
+
refresh_delay = 72;
};
+
};
-
services.dnsmasq.enable = true;
-
services.dnsmasq.settings.server = [ "127.0.0.1#${toString localProxyPort}" ];
-
};
-
};
+
services.dnsmasq.enable = true;
+
services.dnsmasq.settings.server = [ "127.0.0.1#${toString localProxyPort}" ];
+
};
+
};
-
testScript = ''
-
client.wait_for_unit("dnsmasq")
-
client.wait_for_unit("dnscrypt-proxy2")
-
client.wait_until_succeeds("ss --numeric --udp --listening | grep -q ${toString localProxyPort}")
-
'';
-
}
-
)
+
testScript = ''
+
client.wait_for_unit("dnsmasq")
+
client.wait_for_unit("dnscrypt-proxy2")
+
client.wait_until_succeeds("ss --numeric --udp --listening | grep -q ${toString localProxyPort}")
+
'';
+
}
+522 -524
nixos/tests/docker-tools.nix
···
# this test creates a simple GNU image with docker tools and sees if it executes
-
import ./make-test-python.nix (
-
{ pkgs, ... }:
-
let
-
# nixpkgs#214434: dockerTools.buildImage fails to unpack base images
-
# containing duplicate layers when those duplicate tarballs
-
# appear under the manifest's 'Layers'. Docker can generate images
-
# like this even though dockerTools does not.
-
repeatedLayerTestImage =
-
let
-
# Rootfs diffs for layers 1 and 2 are identical (and empty)
-
layer1 = pkgs.dockerTools.buildImage { name = "empty"; };
-
layer2 = layer1.overrideAttrs (_: {
-
fromImage = layer1;
-
});
-
repeatedRootfsDiffs =
-
pkgs.runCommand "image-with-links.tar"
-
{
-
nativeBuildInputs = [ pkgs.jq ];
-
}
-
''
-
mkdir contents
-
tar -xf "${layer2}" -C contents
-
cd contents
-
first_rootfs=$(jq -r '.[0].Layers[0]' manifest.json)
-
second_rootfs=$(jq -r '.[0].Layers[1]' manifest.json)
-
target_rootfs=$(sha256sum "$first_rootfs" | cut -d' ' -f 1).tar
+
{ pkgs, ... }:
+
let
+
# nixpkgs#214434: dockerTools.buildImage fails to unpack base images
+
# containing duplicate layers when those duplicate tarballs
+
# appear under the manifest's 'Layers'. Docker can generate images
+
# like this even though dockerTools does not.
+
repeatedLayerTestImage =
+
let
+
# Rootfs diffs for layers 1 and 2 are identical (and empty)
+
layer1 = pkgs.dockerTools.buildImage { name = "empty"; };
+
layer2 = layer1.overrideAttrs (_: {
+
fromImage = layer1;
+
});
+
repeatedRootfsDiffs =
+
pkgs.runCommand "image-with-links.tar"
+
{
+
nativeBuildInputs = [ pkgs.jq ];
+
}
+
''
+
mkdir contents
+
tar -xf "${layer2}" -C contents
+
cd contents
+
first_rootfs=$(jq -r '.[0].Layers[0]' manifest.json)
+
second_rootfs=$(jq -r '.[0].Layers[1]' manifest.json)
+
target_rootfs=$(sha256sum "$first_rootfs" | cut -d' ' -f 1).tar
-
# Replace duplicated rootfs diffs with symlinks to one tarball
-
chmod -R ug+w .
-
mv "$first_rootfs" "$target_rootfs"
-
rm "$second_rootfs"
-
ln -s "../$target_rootfs" "$first_rootfs"
-
ln -s "../$target_rootfs" "$second_rootfs"
+
# Replace duplicated rootfs diffs with symlinks to one tarball
+
chmod -R ug+w .
+
mv "$first_rootfs" "$target_rootfs"
+
rm "$second_rootfs"
+
ln -s "../$target_rootfs" "$first_rootfs"
+
ln -s "../$target_rootfs" "$second_rootfs"
-
# Update manifest's layers to use the symlinks' target
-
cat manifest.json | \
-
jq ".[0].Layers[0] = \"$target_rootfs\"" |
-
jq ".[0].Layers[1] = \"$target_rootfs\"" > manifest.json.new
-
mv manifest.json.new manifest.json
+
# Update manifest's layers to use the symlinks' target
+
cat manifest.json | \
+
jq ".[0].Layers[0] = \"$target_rootfs\"" |
+
jq ".[0].Layers[1] = \"$target_rootfs\"" > manifest.json.new
+
mv manifest.json.new manifest.json
-
tar --sort=name --hard-dereference -cf $out .
-
'';
-
in
-
pkgs.dockerTools.buildImage {
-
fromImage = repeatedRootfsDiffs;
-
name = "repeated-layer-test";
-
tag = "latest";
-
copyToRoot = pkgs.bash;
-
# A runAsRoot script is required to force previous layers to be unpacked
-
runAsRoot = ''
-
echo 'runAsRoot has run.'
-
'';
-
};
-
-
chownTestImage = pkgs.dockerTools.streamLayeredImage {
-
name = "chown-test";
+
tar --sort=name --hard-dereference -cf $out .
+
'';
+
in
+
pkgs.dockerTools.buildImage {
+
fromImage = repeatedRootfsDiffs;
+
name = "repeated-layer-test";
tag = "latest";
-
enableFakechroot = true;
-
fakeRootCommands = ''
-
touch /testfile
-
chown 12345:12345 /testfile
+
copyToRoot = pkgs.bash;
+
# A runAsRoot script is required to force previous layers to be unpacked
+
runAsRoot = ''
+
echo 'runAsRoot has run.'
'';
-
config.Cmd = [
+
};
+
+
chownTestImage = pkgs.dockerTools.streamLayeredImage {
+
name = "chown-test";
+
tag = "latest";
+
enableFakechroot = true;
+
fakeRootCommands = ''
+
touch /testfile
+
chown 12345:12345 /testfile
+
'';
+
config.Cmd = [
+
"${pkgs.coreutils}/bin/stat"
+
"-c"
+
"%u:%g"
+
"/testfile"
+
];
+
};
+
+
nonRootTestImage = pkgs.dockerTools.streamLayeredImage {
+
name = "non-root-test";
+
tag = "latest";
+
uid = 1000;
+
gid = 1000;
+
uname = "user";
+
gname = "user";
+
config = {
+
User = "user";
+
Cmd = [
"${pkgs.coreutils}/bin/stat"
"-c"
"%u:%g"
-
"/testfile"
+
"${pkgs.coreutils}/bin/stat"
];
};
+
};
+
in
+
{
+
name = "docker-tools";
+
meta = with pkgs.lib.maintainers; {
+
maintainers = [
+
lnl7
+
roberth
+
];
+
};
-
nonRootTestImage = pkgs.dockerTools.streamLayeredImage {
-
name = "non-root-test";
-
tag = "latest";
-
uid = 1000;
-
gid = 1000;
-
uname = "user";
-
gname = "user";
-
config = {
-
User = "user";
-
Cmd = [
-
"${pkgs.coreutils}/bin/stat"
-
"-c"
-
"%u:%g"
-
"${pkgs.coreutils}/bin/stat"
-
];
+
nodes = {
+
docker =
+
{ ... }:
+
{
+
virtualisation = {
+
diskSize = 3072;
+
docker.enable = true;
+
};
};
-
};
-
in
-
{
-
name = "docker-tools";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [
-
lnl7
-
roberth
-
];
-
};
+
};
-
nodes = {
-
docker =
-
{ ... }:
-
{
-
virtualisation = {
-
diskSize = 3072;
-
docker.enable = true;
-
};
-
};
-
};
+
testScript = with pkgs.dockerTools; ''
+
unix_time_second1 = "1970-01-01T00:00:01Z"
-
testScript = with pkgs.dockerTools; ''
-
unix_time_second1 = "1970-01-01T00:00:01Z"
+
docker.wait_for_unit("sockets.target")
-
docker.wait_for_unit("sockets.target")
+
with subtest("includeStorePath"):
+
with subtest("assumption"):
+
docker.succeed("${examples.helloOnRoot} | docker load")
+
docker.succeed("docker run --rm hello | grep -i hello")
+
docker.succeed("docker image rm hello:latest")
-
with subtest("includeStorePath"):
-
with subtest("assumption"):
-
docker.succeed("${examples.helloOnRoot} | docker load")
-
docker.succeed("docker run --rm hello | grep -i hello")
-
docker.succeed("docker image rm hello:latest")
+
with subtest("includeStorePath = false; breaks example"):
+
docker.succeed("${examples.helloOnRootNoStore} | docker load")
+
docker.fail("docker run --rm hello | grep -i hello")
+
docker.succeed("docker image rm hello:latest")
+
with subtest("includeStorePath = false; breaks example (fakechroot)"):
+
docker.succeed("${examples.helloOnRootNoStoreFakechroot} | docker load")
+
docker.fail("docker run --rm hello | grep -i hello")
+
docker.succeed("docker image rm hello:latest")
-
with subtest("includeStorePath = false; breaks example"):
-
docker.succeed("${examples.helloOnRootNoStore} | docker load")
-
docker.fail("docker run --rm hello | grep -i hello")
-
docker.succeed("docker image rm hello:latest")
-
with subtest("includeStorePath = false; breaks example (fakechroot)"):
-
docker.succeed("${examples.helloOnRootNoStoreFakechroot} | docker load")
-
docker.fail("docker run --rm hello | grep -i hello")
-
docker.succeed("docker image rm hello:latest")
+
with subtest("Ensure ZERO paths are added to the store"):
+
docker.fail("${examples.helloOnRootNoStore} | ${pkgs.crane}/bin/crane export - - | tar t | grep 'nix/store/'")
+
with subtest("Ensure ZERO paths are added to the store (fakechroot)"):
+
docker.fail("${examples.helloOnRootNoStoreFakechroot} | ${pkgs.crane}/bin/crane export - - | tar t | grep 'nix/store/'")
-
with subtest("Ensure ZERO paths are added to the store"):
-
docker.fail("${examples.helloOnRootNoStore} | ${pkgs.crane}/bin/crane export - - | tar t | grep 'nix/store/'")
-
with subtest("Ensure ZERO paths are added to the store (fakechroot)"):
-
docker.fail("${examples.helloOnRootNoStoreFakechroot} | ${pkgs.crane}/bin/crane export - - | tar t | grep 'nix/store/'")
+
with subtest("includeStorePath = false; works with mounted store"):
+
docker.succeed("${examples.helloOnRootNoStore} | docker load")
+
docker.succeed("docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
+
docker.succeed("docker image rm hello:latest")
+
with subtest("includeStorePath = false; works with mounted store (fakechroot)"):
+
docker.succeed("${examples.helloOnRootNoStoreFakechroot} | docker load")
+
docker.succeed("docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
+
docker.succeed("docker image rm hello:latest")
-
with subtest("includeStorePath = false; works with mounted store"):
-
docker.succeed("${examples.helloOnRootNoStore} | docker load")
-
docker.succeed("docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
-
docker.succeed("docker image rm hello:latest")
-
with subtest("includeStorePath = false; works with mounted store (fakechroot)"):
-
docker.succeed("${examples.helloOnRootNoStoreFakechroot} | docker load")
-
docker.succeed("docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
-
docker.succeed("docker image rm hello:latest")
+
with subtest("Ensure Docker images use a stable date by default"):
+
docker.succeed(
+
"docker load --input='${examples.bash}'"
+
)
+
assert unix_time_second1 in docker.succeed(
+
"docker inspect ${examples.bash.imageName} "
+
+ "| ${pkgs.jq}/bin/jq -r .[].Created",
+
)
-
with subtest("Ensure Docker images use a stable date by default"):
-
docker.succeed(
-
"docker load --input='${examples.bash}'"
-
)
-
assert unix_time_second1 in docker.succeed(
-
"docker inspect ${examples.bash.imageName} "
-
+ "| ${pkgs.jq}/bin/jq -r .[].Created",
-
)
+
docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
+
# Check imageTag attribute matches image
+
docker.succeed("docker images --format '{{.Tag}}' | grep -F '${examples.bash.imageTag}'")
+
docker.succeed("docker rmi ${examples.bash.imageName}")
-
docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
-
# Check imageTag attribute matches image
-
docker.succeed("docker images --format '{{.Tag}}' | grep -F '${examples.bash.imageTag}'")
-
docker.succeed("docker rmi ${examples.bash.imageName}")
+
# The remaining combinations
+
with subtest("Ensure imageTag attribute matches image"):
+
docker.succeed(
+
"docker load --input='${examples.bashNoTag}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTag.imageTag}'"
+
)
+
docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
-
# The remaining combinations
-
with subtest("Ensure imageTag attribute matches image"):
-
docker.succeed(
-
"docker load --input='${examples.bashNoTag}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTag.imageTag}'"
-
)
-
docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
+
docker.succeed(
+
"docker load --input='${examples.bashNoTagLayered}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTagLayered.imageTag}'"
+
)
+
docker.succeed("docker rmi ${examples.bashNoTagLayered.imageName}:${examples.bashNoTagLayered.imageTag}")
-
docker.succeed(
-
"docker load --input='${examples.bashNoTagLayered}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTagLayered.imageTag}'"
-
)
-
docker.succeed("docker rmi ${examples.bashNoTagLayered.imageName}:${examples.bashNoTagLayered.imageTag}")
+
docker.succeed(
+
"${examples.bashNoTagStreamLayered} | docker load"
+
)
+
docker.succeed(
+
"docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTagStreamLayered.imageTag}'"
+
)
+
docker.succeed(
+
"docker rmi ${examples.bashNoTagStreamLayered.imageName}:${examples.bashNoTagStreamLayered.imageTag}"
+
)
-
docker.succeed(
-
"${examples.bashNoTagStreamLayered} | docker load"
-
)
-
docker.succeed(
-
"docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTagStreamLayered.imageTag}'"
-
)
-
docker.succeed(
-
"docker rmi ${examples.bashNoTagStreamLayered.imageName}:${examples.bashNoTagStreamLayered.imageTag}"
-
)
+
docker.succeed(
+
"docker load --input='${examples.nixLayered}'"
+
)
+
docker.succeed("docker images --format '{{.Tag}}' | grep -F '${examples.nixLayered.imageTag}'")
+
docker.succeed("docker rmi ${examples.nixLayered.imageName}")
-
docker.succeed(
-
"docker load --input='${examples.nixLayered}'"
-
)
-
docker.succeed("docker images --format '{{.Tag}}' | grep -F '${examples.nixLayered.imageTag}'")
-
docker.succeed("docker rmi ${examples.nixLayered.imageName}")
+
with subtest("Check that images with alternative compression schemas load"):
+
docker.succeed(
+
"docker load --input='${examples.bashZstdCompressed}'",
+
"docker rmi ${examples.bashZstdCompressed.imageName}",
+
)
+
docker.succeed(
+
"docker load --input='${examples.bashUncompressed}'",
+
"docker rmi ${examples.bashUncompressed.imageName}",
+
)
+
docker.succeed(
+
"docker load --input='${examples.bashLayeredUncompressed}'",
+
"docker rmi ${examples.bashLayeredUncompressed.imageName}",
+
)
+
docker.succeed(
+
"docker load --input='${examples.bashLayeredZstdCompressed}'",
+
"docker rmi ${examples.bashLayeredZstdCompressed.imageName}",
+
)
-
with subtest("Check that images with alternative compression schemas load"):
-
docker.succeed(
-
"docker load --input='${examples.bashZstdCompressed}'",
-
"docker rmi ${examples.bashZstdCompressed.imageName}",
-
)
-
docker.succeed(
-
"docker load --input='${examples.bashUncompressed}'",
-
"docker rmi ${examples.bashUncompressed.imageName}",
-
)
-
docker.succeed(
-
"docker load --input='${examples.bashLayeredUncompressed}'",
-
"docker rmi ${examples.bashLayeredUncompressed.imageName}",
-
)
-
docker.succeed(
-
"docker load --input='${examples.bashLayeredZstdCompressed}'",
-
"docker rmi ${examples.bashLayeredZstdCompressed.imageName}",
-
)
+
with subtest(
+
"Check if the nix store is correctly initialized by listing "
+
"dependencies of the installed Nix binary"
+
):
+
docker.succeed(
+
"docker load --input='${examples.nix}'",
+
"docker run --rm ${examples.nix.imageName} nix-store -qR ${pkgs.nix}",
+
"docker rmi ${examples.nix.imageName}",
+
)
-
with subtest(
-
"Check if the nix store is correctly initialized by listing "
-
"dependencies of the installed Nix binary"
-
):
-
docker.succeed(
-
"docker load --input='${examples.nix}'",
-
"docker run --rm ${examples.nix.imageName} nix-store -qR ${pkgs.nix}",
-
"docker rmi ${examples.nix.imageName}",
-
)
+
with subtest(
+
"Ensure (layered) nix store has correct permissions "
+
"and that the container starts when its process does not have uid 0"
+
):
+
docker.succeed(
+
"docker load --input='${examples.bashLayeredWithUser}'",
+
"docker run -u somebody --rm ${examples.bashLayeredWithUser.imageName} ${pkgs.bash}/bin/bash -c 'test 755 == $(stat --format=%a /nix) && test 755 == $(stat --format=%a /nix/store)'",
+
"docker rmi ${examples.bashLayeredWithUser.imageName}",
+
)
-
with subtest(
-
"Ensure (layered) nix store has correct permissions "
-
"and that the container starts when its process does not have uid 0"
-
):
-
docker.succeed(
-
"docker load --input='${examples.bashLayeredWithUser}'",
-
"docker run -u somebody --rm ${examples.bashLayeredWithUser.imageName} ${pkgs.bash}/bin/bash -c 'test 755 == $(stat --format=%a /nix) && test 755 == $(stat --format=%a /nix/store)'",
-
"docker rmi ${examples.bashLayeredWithUser.imageName}",
-
)
+
with subtest("The nix binary symlinks are intact"):
+
docker.succeed(
+
"docker load --input='${examples.nix}'",
+
"docker run --rm ${examples.nix.imageName} ${pkgs.bash}/bin/bash -c 'test nix == $(readlink ${pkgs.nix}/bin/nix-daemon)'",
+
"docker rmi ${examples.nix.imageName}",
+
)
-
with subtest("The nix binary symlinks are intact"):
-
docker.succeed(
-
"docker load --input='${examples.nix}'",
-
"docker run --rm ${examples.nix.imageName} ${pkgs.bash}/bin/bash -c 'test nix == $(readlink ${pkgs.nix}/bin/nix-daemon)'",
-
"docker rmi ${examples.nix.imageName}",
-
)
+
with subtest("The nix binary symlinks are intact when the image is layered"):
+
docker.succeed(
+
"docker load --input='${examples.nixLayered}'",
+
"docker run --rm ${examples.nixLayered.imageName} ${pkgs.bash}/bin/bash -c 'test nix == $(readlink ${pkgs.nix}/bin/nix-daemon)'",
+
"docker rmi ${examples.nixLayered.imageName}",
+
)
-
with subtest("The nix binary symlinks are intact when the image is layered"):
-
docker.succeed(
-
"docker load --input='${examples.nixLayered}'",
-
"docker run --rm ${examples.nixLayered.imageName} ${pkgs.bash}/bin/bash -c 'test nix == $(readlink ${pkgs.nix}/bin/nix-daemon)'",
-
"docker rmi ${examples.nixLayered.imageName}",
-
)
+
with subtest("The pullImage tool works"):
+
docker.succeed(
+
"docker load --input='${examples.testNixFromDockerHub}'",
+
"docker run --rm nix:2.2.1 nix-store --version",
+
"docker rmi nix:2.2.1",
+
)
-
with subtest("The pullImage tool works"):
-
docker.succeed(
-
"docker load --input='${examples.testNixFromDockerHub}'",
-
"docker run --rm nix:2.2.1 nix-store --version",
-
"docker rmi nix:2.2.1",
-
)
-
-
with subtest("runAsRoot and entry point work"):
-
docker.succeed(
-
"docker load --input='${examples.nginx}'",
-
"docker run --name nginx -d -p 8000:80 ${examples.nginx.imageName}",
-
)
-
docker.wait_until_succeeds("curl -f http://localhost:8000/")
-
docker.succeed(
-
"docker rm --force nginx",
-
"docker rmi '${examples.nginx.imageName}'",
-
)
+
with subtest("runAsRoot and entry point work"):
+
docker.succeed(
+
"docker load --input='${examples.nginx}'",
+
"docker run --name nginx -d -p 8000:80 ${examples.nginx.imageName}",
+
)
+
docker.wait_until_succeeds("curl -f http://localhost:8000/")
+
docker.succeed(
+
"docker rm --force nginx",
+
"docker rmi '${examples.nginx.imageName}'",
+
)
-
with subtest("A pulled image can be used as base image"):
-
docker.succeed(
-
"docker load --input='${examples.onTopOfPulledImage}'",
-
"docker run --rm ontopofpulledimage hello",
-
"docker rmi ontopofpulledimage",
-
)
+
with subtest("A pulled image can be used as base image"):
+
docker.succeed(
+
"docker load --input='${examples.onTopOfPulledImage}'",
+
"docker run --rm ontopofpulledimage hello",
+
"docker rmi ontopofpulledimage",
+
)
-
with subtest("Regression test for issue #34779"):
-
docker.succeed(
-
"docker load --input='${examples.runAsRootExtraCommands}'",
-
"docker run --rm runasrootextracommands cat extraCommands",
-
"docker run --rm runasrootextracommands cat runAsRoot",
-
"docker rmi '${examples.runAsRootExtraCommands.imageName}'",
-
)
+
with subtest("Regression test for issue #34779"):
+
docker.succeed(
+
"docker load --input='${examples.runAsRootExtraCommands}'",
+
"docker run --rm runasrootextracommands cat extraCommands",
+
"docker run --rm runasrootextracommands cat runAsRoot",
+
"docker rmi '${examples.runAsRootExtraCommands.imageName}'",
+
)
-
with subtest("Ensure Docker images can use an unstable date"):
-
docker.succeed(
-
"docker load --input='${examples.unstableDate}'"
-
)
-
assert unix_time_second1 not in docker.succeed(
-
"docker inspect ${examples.unstableDate.imageName} "
-
+ "| ${pkgs.jq}/bin/jq -r .[].Created"
-
)
+
with subtest("Ensure Docker images can use an unstable date"):
+
docker.succeed(
+
"docker load --input='${examples.unstableDate}'"
+
)
+
assert unix_time_second1 not in docker.succeed(
+
"docker inspect ${examples.unstableDate.imageName} "
+
+ "| ${pkgs.jq}/bin/jq -r .[].Created"
+
)
-
with subtest("Ensure Layered Docker images can use an unstable date"):
-
docker.succeed(
-
"docker load --input='${examples.unstableDateLayered}'"
-
)
-
assert unix_time_second1 not in docker.succeed(
-
"docker inspect ${examples.unstableDateLayered.imageName} "
-
+ "| ${pkgs.jq}/bin/jq -r .[].Created"
-
)
+
with subtest("Ensure Layered Docker images can use an unstable date"):
+
docker.succeed(
+
"docker load --input='${examples.unstableDateLayered}'"
+
)
+
assert unix_time_second1 not in docker.succeed(
+
"docker inspect ${examples.unstableDateLayered.imageName} "
+
+ "| ${pkgs.jq}/bin/jq -r .[].Created"
+
)
-
with subtest("Ensure Layered Docker images work"):
-
docker.succeed(
-
"docker load --input='${examples.layered-image}'",
-
"docker run --rm ${examples.layered-image.imageName}",
-
"docker run --rm ${examples.layered-image.imageName} cat extraCommands",
-
)
+
with subtest("Ensure Layered Docker images work"):
+
docker.succeed(
+
"docker load --input='${examples.layered-image}'",
+
"docker run --rm ${examples.layered-image.imageName}",
+
"docker run --rm ${examples.layered-image.imageName} cat extraCommands",
+
)
-
with subtest("Ensure images built on top of layered Docker images work"):
-
docker.succeed(
-
"docker load --input='${examples.layered-on-top}'",
-
"docker run --rm ${examples.layered-on-top.imageName}",
-
)
+
with subtest("Ensure images built on top of layered Docker images work"):
+
docker.succeed(
+
"docker load --input='${examples.layered-on-top}'",
+
"docker run --rm ${examples.layered-on-top.imageName}",
+
)
-
with subtest("Ensure layered images built on top of layered Docker images work"):
-
docker.succeed(
-
"docker load --input='${examples.layered-on-top-layered}'",
-
"docker run --rm ${examples.layered-on-top-layered.imageName}",
-
)
+
with subtest("Ensure layered images built on top of layered Docker images work"):
+
docker.succeed(
+
"docker load --input='${examples.layered-on-top-layered}'",
+
"docker run --rm ${examples.layered-on-top-layered.imageName}",
+
)
-
def set_of_layers(image_name):
-
return set(
-
docker.succeed(
-
f"docker inspect {image_name} "
-
+ "| ${pkgs.jq}/bin/jq -r '.[] | .RootFS.Layers | .[]'"
-
).split()
-
)
+
def set_of_layers(image_name):
+
return set(
+
docker.succeed(
+
f"docker inspect {image_name} "
+
+ "| ${pkgs.jq}/bin/jq -r '.[] | .RootFS.Layers | .[]'"
+
).split()
+
)
-
with subtest("Ensure layers are shared between images"):
-
docker.succeed(
-
"docker load --input='${examples.another-layered-image}'"
-
)
-
layers1 = set_of_layers("${examples.layered-image.imageName}")
-
layers2 = set_of_layers("${examples.another-layered-image.imageName}")
-
assert bool(layers1 & layers2)
+
with subtest("Ensure layers are shared between images"):
+
docker.succeed(
+
"docker load --input='${examples.another-layered-image}'"
+
)
+
layers1 = set_of_layers("${examples.layered-image.imageName}")
+
layers2 = set_of_layers("${examples.another-layered-image.imageName}")
+
assert bool(layers1 & layers2)
-
with subtest("Ensure order of layers is correct"):
-
docker.succeed(
-
"docker load --input='${examples.layersOrder}'"
-
)
+
with subtest("Ensure order of layers is correct"):
+
docker.succeed(
+
"docker load --input='${examples.layersOrder}'"
+
)
-
for index in 1, 2, 3:
-
assert f"layer{index}" in docker.succeed(
-
f"docker run --rm ${examples.layersOrder.imageName} cat /tmp/layer{index}"
-
)
+
for index in 1, 2, 3:
+
assert f"layer{index}" in docker.succeed(
+
f"docker run --rm ${examples.layersOrder.imageName} cat /tmp/layer{index}"
+
)
-
with subtest("Ensure layers unpacked in correct order before runAsRoot runs"):
-
assert "abc" in docker.succeed(
-
"docker load --input='${examples.layersUnpackOrder}'",
-
"docker run --rm ${examples.layersUnpackOrder.imageName} cat /layer-order"
-
)
+
with subtest("Ensure layers unpacked in correct order before runAsRoot runs"):
+
assert "abc" in docker.succeed(
+
"docker load --input='${examples.layersUnpackOrder}'",
+
"docker run --rm ${examples.layersUnpackOrder.imageName} cat /layer-order"
+
)
-
with subtest("Ensure repeated base layers handled by buildImage"):
-
docker.succeed(
-
"docker load --input='${repeatedLayerTestImage}'",
-
"docker run --rm ${repeatedLayerTestImage.imageName} /bin/bash -c 'exit 0'"
-
)
+
with subtest("Ensure repeated base layers handled by buildImage"):
+
docker.succeed(
+
"docker load --input='${repeatedLayerTestImage}'",
+
"docker run --rm ${repeatedLayerTestImage.imageName} /bin/bash -c 'exit 0'"
+
)
-
with subtest("Ensure environment variables are correctly inherited"):
-
docker.succeed(
-
"docker load --input='${examples.environmentVariables}'"
-
)
-
out = docker.succeed("docker run --rm ${examples.environmentVariables.imageName} env")
-
env = out.splitlines()
-
assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
-
assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
-
assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
+
with subtest("Ensure environment variables are correctly inherited"):
+
docker.succeed(
+
"docker load --input='${examples.environmentVariables}'"
+
)
+
out = docker.succeed("docker run --rm ${examples.environmentVariables.imageName} env")
+
env = out.splitlines()
+
assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
+
assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
+
assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
-
with subtest("Ensure environment variables of layered images are correctly inherited"):
-
docker.succeed(
-
"docker load --input='${examples.environmentVariablesLayered}'"
-
)
-
out = docker.succeed("docker run --rm ${examples.environmentVariablesLayered.imageName} env")
-
env = out.splitlines()
-
assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
-
assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
-
assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
+
with subtest("Ensure environment variables of layered images are correctly inherited"):
+
docker.succeed(
+
"docker load --input='${examples.environmentVariablesLayered}'"
+
)
+
out = docker.succeed("docker run --rm ${examples.environmentVariablesLayered.imageName} env")
+
env = out.splitlines()
+
assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
+
assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
+
assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
-
with subtest(
-
"Ensure inherited environment variables of layered images are correctly resolved"
-
):
-
# Read environment variables as stored in image config
-
config = docker.succeed(
-
"tar -xOf ${examples.environmentVariablesLayered} manifest.json | ${pkgs.jq}/bin/jq -r .[].Config"
-
).strip()
-
out = docker.succeed(
-
f"tar -xOf ${examples.environmentVariablesLayered} {config} | ${pkgs.jq}/bin/jq -r '.config.Env | .[]'"
-
)
-
env = out.splitlines()
-
assert (
-
sum(entry.startswith("LAST_LAYER") for entry in env) == 1
-
), "envvars overridden by child should be unique"
+
with subtest(
+
"Ensure inherited environment variables of layered images are correctly resolved"
+
):
+
# Read environment variables as stored in image config
+
config = docker.succeed(
+
"tar -xOf ${examples.environmentVariablesLayered} manifest.json | ${pkgs.jq}/bin/jq -r .[].Config"
+
).strip()
+
out = docker.succeed(
+
f"tar -xOf ${examples.environmentVariablesLayered} {config} | ${pkgs.jq}/bin/jq -r '.config.Env | .[]'"
+
)
+
env = out.splitlines()
+
assert (
+
sum(entry.startswith("LAST_LAYER") for entry in env) == 1
+
), "envvars overridden by child should be unique"
-
with subtest("Ensure image with only 2 layers can be loaded"):
-
docker.succeed(
-
"docker load --input='${examples.two-layered-image}'"
-
)
+
with subtest("Ensure image with only 2 layers can be loaded"):
+
docker.succeed(
+
"docker load --input='${examples.two-layered-image}'"
+
)
-
with subtest(
-
"Ensure the bulk layer doesn't miss store paths (regression test for #78744)"
-
):
-
docker.succeed(
-
"docker load --input='${pkgs.dockerTools.examples.bulk-layer}'",
-
# Ensure the two output paths (ls and hello) are in the layer
-
"docker run bulk-layer ls /bin/hello",
-
)
+
with subtest(
+
"Ensure the bulk layer doesn't miss store paths (regression test for #78744)"
+
):
+
docker.succeed(
+
"docker load --input='${pkgs.dockerTools.examples.bulk-layer}'",
+
# Ensure the two output paths (ls and hello) are in the layer
+
"docker run bulk-layer ls /bin/hello",
+
)
-
with subtest(
-
"Ensure the bulk layer with a base image respects the number of maxLayers"
-
):
-
docker.succeed(
-
"docker load --input='${pkgs.dockerTools.examples.layered-bulk-layer}'",
-
# Ensure the image runs correctly
-
"docker run layered-bulk-layer ls /bin/hello",
-
)
+
with subtest(
+
"Ensure the bulk layer with a base image respects the number of maxLayers"
+
):
+
docker.succeed(
+
"docker load --input='${pkgs.dockerTools.examples.layered-bulk-layer}'",
+
# Ensure the image runs correctly
+
"docker run layered-bulk-layer ls /bin/hello",
+
)
-
# Ensure the image has the correct number of layers
-
assert len(set_of_layers("layered-bulk-layer")) == 4
+
# Ensure the image has the correct number of layers
+
assert len(set_of_layers("layered-bulk-layer")) == 4
-
with subtest("Ensure only minimal paths are added to the store"):
-
# TODO: make an example that has no store paths, for example by making
-
# busybox non-self-referential.
+
with subtest("Ensure only minimal paths are added to the store"):
+
# TODO: make an example that has no store paths, for example by making
+
# busybox non-self-referential.
-
# This check tests that buildLayeredImage can build images that don't need a store.
-
docker.succeed(
-
"docker load --input='${pkgs.dockerTools.examples.no-store-paths}'"
-
)
+
# This check tests that buildLayeredImage can build images that don't need a store.
+
docker.succeed(
+
"docker load --input='${pkgs.dockerTools.examples.no-store-paths}'"
+
)
-
docker.succeed("docker run --rm no-store-paths ls / >/dev/console")
+
docker.succeed("docker run --rm no-store-paths ls / >/dev/console")
-
# If busybox isn't self-referential, we need this line
-
# docker.fail("docker run --rm no-store-paths ls /nix/store >/dev/console")
-
# However, it currently is self-referential, so we check that it is the
-
# only store path.
-
docker.succeed("diff <(docker run --rm no-store-paths ls /nix/store) <(basename ${pkgs.pkgsStatic.busybox}) >/dev/console")
+
# If busybox isn't self-referential, we need this line
+
# docker.fail("docker run --rm no-store-paths ls /nix/store >/dev/console")
+
# However, it currently is self-referential, so we check that it is the
+
# only store path.
+
docker.succeed("diff <(docker run --rm no-store-paths ls /nix/store) <(basename ${pkgs.pkgsStatic.busybox}) >/dev/console")
-
with subtest("Ensure buildLayeredImage does not change store path contents."):
-
docker.succeed(
-
"docker load --input='${pkgs.dockerTools.examples.filesInStore}'",
-
"docker run --rm file-in-store nix-store --verify --check-contents",
-
"docker run --rm file-in-store |& grep 'some data'",
-
)
+
with subtest("Ensure buildLayeredImage does not change store path contents."):
+
docker.succeed(
+
"docker load --input='${pkgs.dockerTools.examples.filesInStore}'",
+
"docker run --rm file-in-store nix-store --verify --check-contents",
+
"docker run --rm file-in-store |& grep 'some data'",
+
)
-
with subtest("Ensure cross compiled image can be loaded and has correct arch."):
-
docker.succeed(
-
"docker load --input='${pkgs.dockerTools.examples.cross}'",
-
)
-
assert (
-
docker.succeed(
-
"docker inspect ${pkgs.dockerTools.examples.cross.imageName} "
-
+ "| ${pkgs.jq}/bin/jq -r .[].Architecture"
-
).strip()
-
== "${if pkgs.stdenv.hostPlatform.system == "aarch64-linux" then "amd64" else "arm64"}"
-
)
+
with subtest("Ensure cross compiled image can be loaded and has correct arch."):
+
docker.succeed(
+
"docker load --input='${pkgs.dockerTools.examples.cross}'",
+
)
+
assert (
+
docker.succeed(
+
"docker inspect ${pkgs.dockerTools.examples.cross.imageName} "
+
+ "| ${pkgs.jq}/bin/jq -r .[].Architecture"
+
).strip()
+
== "${if pkgs.stdenv.hostPlatform.system == "aarch64-linux" then "amd64" else "arm64"}"
+
)
-
with subtest("buildLayeredImage doesn't dereference /nix/store symlink layers"):
-
docker.succeed(
-
"docker load --input='${examples.layeredStoreSymlink}'",
-
"docker run --rm ${examples.layeredStoreSymlink.imageName} bash -c 'test -L ${examples.layeredStoreSymlink.passthru.symlink}'",
-
"docker rmi ${examples.layeredStoreSymlink.imageName}",
-
)
+
with subtest("buildLayeredImage doesn't dereference /nix/store symlink layers"):
+
docker.succeed(
+
"docker load --input='${examples.layeredStoreSymlink}'",
+
"docker run --rm ${examples.layeredStoreSymlink.imageName} bash -c 'test -L ${examples.layeredStoreSymlink.passthru.symlink}'",
+
"docker rmi ${examples.layeredStoreSymlink.imageName}",
+
)
-
with subtest("buildImage supports registry/ prefix in image name"):
-
docker.succeed(
-
"docker load --input='${examples.prefixedImage}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Repository}}' | grep -F '${examples.prefixedImage.imageName}'"
-
)
+
with subtest("buildImage supports registry/ prefix in image name"):
+
docker.succeed(
+
"docker load --input='${examples.prefixedImage}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Repository}}' | grep -F '${examples.prefixedImage.imageName}'"
+
)
-
with subtest("buildLayeredImage supports registry/ prefix in image name"):
-
docker.succeed(
-
"docker load --input='${examples.prefixedLayeredImage}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Repository}}' | grep -F '${examples.prefixedLayeredImage.imageName}'"
-
)
+
with subtest("buildLayeredImage supports registry/ prefix in image name"):
+
docker.succeed(
+
"docker load --input='${examples.prefixedLayeredImage}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Repository}}' | grep -F '${examples.prefixedLayeredImage.imageName}'"
+
)
-
with subtest("buildLayeredImage supports running chown with fakeRootCommands"):
-
docker.succeed(
-
"docker load --input='${examples.layeredImageWithFakeRootCommands}'"
-
)
-
docker.succeed(
-
"docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
-
)
+
with subtest("buildLayeredImage supports running chown with fakeRootCommands"):
+
docker.succeed(
+
"docker load --input='${examples.layeredImageWithFakeRootCommands}'"
+
)
+
docker.succeed(
+
"docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
+
)
-
with subtest("Ensure docker load on merged images loads all of the constituent images"):
-
docker.succeed(
-
"docker load --input='${examples.mergedBashAndRedis}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bash.imageName}-${examples.bash.imageTag}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
-
)
-
docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
-
docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
-
docker.succeed("docker rmi ${examples.bash.imageName}")
-
docker.succeed("docker rmi ${examples.redis.imageName}")
+
with subtest("Ensure docker load on merged images loads all of the constituent images"):
+
docker.succeed(
+
"docker load --input='${examples.mergedBashAndRedis}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bash.imageName}-${examples.bash.imageTag}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
+
)
+
docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
+
docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
+
docker.succeed("docker rmi ${examples.bash.imageName}")
+
docker.succeed("docker rmi ${examples.redis.imageName}")
-
with subtest(
-
"Ensure docker load on merged images loads all of the constituent images (missing tags)"
-
):
-
docker.succeed(
-
"docker load --input='${examples.mergedBashNoTagAndRedis}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bashNoTag.imageName}-${examples.bashNoTag.imageTag}'"
-
)
-
docker.succeed(
-
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
-
)
-
# we need to explicitly specify the generated tag here
-
docker.succeed(
-
"docker run --rm ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag} bash --version"
-
)
-
docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
-
docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
-
docker.succeed("docker rmi ${examples.redis.imageName}")
+
with subtest(
+
"Ensure docker load on merged images loads all of the constituent images (missing tags)"
+
):
+
docker.succeed(
+
"docker load --input='${examples.mergedBashNoTagAndRedis}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bashNoTag.imageName}-${examples.bashNoTag.imageTag}'"
+
)
+
docker.succeed(
+
"docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
+
)
+
# we need to explicitly specify the generated tag here
+
docker.succeed(
+
"docker run --rm ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag} bash --version"
+
)
+
docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
+
docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
+
docker.succeed("docker rmi ${examples.redis.imageName}")
-
with subtest("mergeImages preserves owners of the original images"):
-
docker.succeed(
-
"docker load --input='${examples.mergedBashFakeRoot}'"
-
)
-
docker.succeed(
-
"docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
-
)
+
with subtest("mergeImages preserves owners of the original images"):
+
docker.succeed(
+
"docker load --input='${examples.mergedBashFakeRoot}'"
+
)
+
docker.succeed(
+
"docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
+
)
-
with subtest("The image contains store paths referenced by the fakeRootCommands output"):
-
docker.succeed(
-
"docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} /hello/bin/layeredImageWithFakeRootCommands-hello"
-
)
+
with subtest("The image contains store paths referenced by the fakeRootCommands output"):
+
docker.succeed(
+
"docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} /hello/bin/layeredImageWithFakeRootCommands-hello"
+
)
-
with subtest("mergeImage correctly deals with varying compression schemas in inputs"):
-
docker.succeed("docker load --input='${examples.mergeVaryingCompressor}'")
+
with subtest("mergeImage correctly deals with varying compression schemas in inputs"):
+
docker.succeed("docker load --input='${examples.mergeVaryingCompressor}'")
-
for sub_image, tag in [
-
("${examples.redis.imageName}", "${examples.redis.imageTag}"),
-
("${examples.bashUncompressed.imageName}", "${examples.bashUncompressed.imageTag}"),
-
("${examples.bashZstdCompressed.imageName}", "${examples.bashZstdCompressed.imageTag}"),
-
]:
-
docker.succeed(f"docker images --format '{{{{.Repository}}}}-{{{{.Tag}}}}' | grep -F '{sub_image}-{tag}'")
-
docker.succeed(f"docker rmi {sub_image}")
+
for sub_image, tag in [
+
("${examples.redis.imageName}", "${examples.redis.imageTag}"),
+
("${examples.bashUncompressed.imageName}", "${examples.bashUncompressed.imageTag}"),
+
("${examples.bashZstdCompressed.imageName}", "${examples.bashZstdCompressed.imageTag}"),
+
]:
+
docker.succeed(f"docker images --format '{{{{.Repository}}}}-{{{{.Tag}}}}' | grep -F '{sub_image}-{tag}'")
+
docker.succeed(f"docker rmi {sub_image}")
-
with subtest("exportImage produces a valid tarball"):
-
docker.succeed(
-
"tar -tf ${examples.exportBash} | grep '\./bin/bash' > /dev/null"
-
)
+
with subtest("exportImage produces a valid tarball"):
+
docker.succeed(
+
"tar -tf ${examples.exportBash} | grep '\./bin/bash' > /dev/null"
+
)
-
with subtest("layered image fakeRootCommands with fakechroot works"):
-
docker.succeed("${examples.imageViaFakeChroot} | docker load")
-
docker.succeed("docker run --rm image-via-fake-chroot | grep -i hello")
-
docker.succeed("docker image rm image-via-fake-chroot:latest")
+
with subtest("layered image fakeRootCommands with fakechroot works"):
+
docker.succeed("${examples.imageViaFakeChroot} | docker load")
+
docker.succeed("docker run --rm image-via-fake-chroot | grep -i hello")
+
docker.succeed("docker image rm image-via-fake-chroot:latest")
-
with subtest("Ensure bare paths in contents are loaded correctly"):
-
docker.succeed(
-
"docker load --input='${examples.build-image-with-path}'",
-
"docker run --rm build-image-with-path bash -c '[[ -e /hello.txt ]]'",
-
"docker rmi build-image-with-path",
-
)
-
docker.succeed(
-
"${examples.layered-image-with-path} | docker load",
-
"docker run --rm layered-image-with-path bash -c '[[ -e /hello.txt ]]'",
-
"docker rmi layered-image-with-path",
-
)
+
with subtest("Ensure bare paths in contents are loaded correctly"):
+
docker.succeed(
+
"docker load --input='${examples.build-image-with-path}'",
+
"docker run --rm build-image-with-path bash -c '[[ -e /hello.txt ]]'",
+
"docker rmi build-image-with-path",
+
)
+
docker.succeed(
+
"${examples.layered-image-with-path} | docker load",
+
"docker run --rm layered-image-with-path bash -c '[[ -e /hello.txt ]]'",
+
"docker rmi layered-image-with-path",
+
)
-
with subtest("Ensure correct architecture is present in manifests."):
-
docker.succeed("""
-
docker load --input='${examples.build-image-with-architecture}'
-
docker inspect build-image-with-architecture \
-
| ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
-
docker rmi build-image-with-architecture
-
""")
-
docker.succeed("""
-
${examples.layered-image-with-architecture} | docker load
-
docker inspect layered-image-with-architecture \
-
| ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
-
docker rmi layered-image-with-architecture
-
""")
+
with subtest("Ensure correct architecture is present in manifests."):
+
docker.succeed("""
+
docker load --input='${examples.build-image-with-architecture}'
+
docker inspect build-image-with-architecture \
+
| ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
+
docker rmi build-image-with-architecture
+
""")
+
docker.succeed("""
+
${examples.layered-image-with-architecture} | docker load
+
docker inspect layered-image-with-architecture \
+
| ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
+
docker rmi layered-image-with-architecture
+
""")
-
with subtest("etc"):
-
docker.succeed("${examples.etc} | docker load")
-
docker.succeed("docker run --rm etc | grep localhost")
-
docker.succeed("docker image rm etc:latest")
+
with subtest("etc"):
+
docker.succeed("${examples.etc} | docker load")
+
docker.succeed("docker run --rm etc | grep localhost")
+
docker.succeed("docker image rm etc:latest")
-
with subtest("image-with-certs"):
-
docker.succeed("<${examples.image-with-certs} docker load")
-
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-bundle.crt")
-
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-certificates.crt")
-
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/pki/tls/certs/ca-bundle.crt")
-
docker.succeed("docker image rm image-with-certs:latest")
+
with subtest("image-with-certs"):
+
docker.succeed("<${examples.image-with-certs} docker load")
+
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-bundle.crt")
+
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-certificates.crt")
+
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/pki/tls/certs/ca-bundle.crt")
+
docker.succeed("docker image rm image-with-certs:latest")
-
with subtest("streamLayeredImage: chown is persistent in fakeRootCommands"):
-
docker.succeed(
-
"${chownTestImage} | docker load",
-
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
-
)
+
with subtest("streamLayeredImage: chown is persistent in fakeRootCommands"):
+
docker.succeed(
+
"${chownTestImage} | docker load",
+
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
+
)
-
with subtest("streamLayeredImage: with non-root user"):
-
docker.succeed(
-
"${nonRootTestImage} | docker load",
-
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
-
)
-
'';
-
}
-
)
+
with subtest("streamLayeredImage: with non-root user"):
+
docker.succeed(
+
"${nonRootTestImage} | docker load",
+
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
+
)
+
'';
+
}
+1 -1
nixos/tests/dovecot.nix
···
-
import ./make-test-python.nix {
+
{
name = "dovecot";
nodes.machine =
+1 -1
nixos/tests/early-mount-options.nix
···
# Test for https://github.com/NixOS/nixpkgs/pull/193469
-
import ./make-test-python.nix {
+
{
name = "early-mount-options";
nodes.machine = {
+31 -35
nixos/tests/earlyoom.nix
···
-
import ./make-test-python.nix (
-
{ lib, ... }:
-
{
-
name = "earlyoom";
-
meta = {
-
maintainers = with lib.maintainers; [
-
ncfavier
-
oxalica
-
];
-
};
+
{ lib, ... }:
+
{
+
name = "earlyoom";
+
meta.maintainers = with lib.maintainers; [
+
ncfavier
+
oxalica
+
];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
# Limit VM resource usage.
-
virtualisation.memorySize = 1024;
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
# Limit VM resource usage.
+
virtualisation.memorySize = 1024;
-
services.earlyoom = {
-
enable = true;
-
# Use SIGKILL, or `tail` will catch SIGTERM and exit successfully.
-
freeMemKillThreshold = 90;
-
};
+
services.earlyoom = {
+
enable = true;
+
# Use SIGKILL, or `tail` will catch SIGTERM and exit successfully.
+
freeMemKillThreshold = 90;
+
};
-
systemd.services.testbloat = {
-
description = "Create a lot of memory pressure";
-
serviceConfig = {
-
ExecStart = "${pkgs.coreutils}/bin/tail /dev/zero";
-
};
+
systemd.services.testbloat = {
+
description = "Create a lot of memory pressure";
+
serviceConfig = {
+
ExecStart = "${pkgs.coreutils}/bin/tail /dev/zero";
};
};
+
};
-
testScript = ''
-
machine.wait_for_unit("earlyoom.service")
+
testScript = ''
+
machine.wait_for_unit("earlyoom.service")
-
with subtest("earlyoom should kill the bad service"):
-
machine.fail("systemctl start --wait testbloat.service")
-
assert machine.get_unit_info("testbloat.service")["Result"] == "signal"
-
output = machine.succeed('journalctl -u earlyoom.service -b0')
-
assert 'low memory! at or below SIGKILL limits' in output
-
'';
-
}
-
)
+
with subtest("earlyoom should kill the bad service"):
+
machine.fail("systemctl start --wait testbloat.service")
+
assert machine.get_unit_info("testbloat.service")["Result"] == "signal"
+
output = machine.succeed('journalctl -u earlyoom.service -b0')
+
assert 'low memory! at or below SIGKILL limits' in output
+
'';
+
}
+151 -156
nixos/tests/etcd/etcd-cluster.nix
···
# This test runs simple etcd cluster
-
import ../make-test-python.nix (
-
{ pkgs, ... }:
-
let
+
{ lib, pkgs, ... }:
+
let
+
runWithOpenSSL =
+
file: cmd:
+
pkgs.runCommand file {
+
buildInputs = [ pkgs.openssl ];
+
} cmd;
-
runWithOpenSSL =
-
file: cmd:
-
pkgs.runCommand file {
-
buildInputs = [ pkgs.openssl ];
-
} cmd;
+
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+
ca_pem = runWithOpenSSL "ca.pem" ''
+
openssl req \
+
-x509 -new -nodes -key ${ca_key} \
+
-days 10000 -out $out -subj "/CN=etcd-ca"
+
'';
+
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
+
etcd_csr = runWithOpenSSL "etcd.csr" ''
+
openssl req \
+
-new -key ${etcd_key} \
+
-out $out -subj "/CN=etcd" \
+
-config ${openssl_cnf}
+
'';
+
etcd_cert = runWithOpenSSL "etcd.pem" ''
+
openssl x509 \
+
-req -in ${etcd_csr} \
+
-CA ${ca_pem} -CAkey ${ca_key} \
+
-CAcreateserial -out $out \
+
-days 365 -extensions v3_req \
+
-extfile ${openssl_cnf}
+
'';
-
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
-
ca_pem = runWithOpenSSL "ca.pem" ''
-
openssl req \
-
-x509 -new -nodes -key ${ca_key} \
-
-days 10000 -out $out -subj "/CN=etcd-ca"
-
'';
-
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
-
etcd_csr = runWithOpenSSL "etcd.csr" ''
-
openssl req \
-
-new -key ${etcd_key} \
-
-out $out -subj "/CN=etcd" \
-
-config ${openssl_cnf}
-
'';
-
etcd_cert = runWithOpenSSL "etcd.pem" ''
-
openssl x509 \
-
-req -in ${etcd_csr} \
-
-CA ${ca_pem} -CAkey ${ca_key} \
-
-CAcreateserial -out $out \
-
-days 365 -extensions v3_req \
-
-extfile ${openssl_cnf}
-
'';
+
etcd_client_key = runWithOpenSSL "etcd-client-key.pem" "openssl genrsa -out $out 2048";
-
etcd_client_key = runWithOpenSSL "etcd-client-key.pem" "openssl genrsa -out $out 2048";
+
etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
+
openssl req \
+
-new -key ${etcd_client_key} \
+
-out $out -subj "/CN=etcd-client" \
+
-config ${client_openssl_cnf}
+
'';
-
etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
-
openssl req \
-
-new -key ${etcd_client_key} \
-
-out $out -subj "/CN=etcd-client" \
-
-config ${client_openssl_cnf}
-
'';
-
-
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
-
openssl x509 \
-
-req -in ${etcd_client_csr} \
-
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-
-out $out -days 365 -extensions v3_req \
-
-extfile ${client_openssl_cnf}
-
'';
+
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
+
openssl x509 \
+
-req -in ${etcd_client_csr} \
+
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
+
-out $out -days 365 -extensions v3_req \
+
-extfile ${client_openssl_cnf}
+
'';
-
openssl_cnf = pkgs.writeText "openssl.cnf" ''
-
ions = v3_req
-
distinguished_name = req_distinguished_name
-
[req_distinguished_name]
-
[ v3_req ]
-
basicConstraints = CA:FALSE
-
keyUsage = digitalSignature, keyEncipherment
-
extendedKeyUsage = serverAuth, clientAuth
-
subjectAltName = @alt_names
-
[alt_names]
-
DNS.1 = node1
-
DNS.2 = node2
-
DNS.3 = node3
-
IP.1 = 127.0.0.1
-
'';
+
openssl_cnf = pkgs.writeText "openssl.cnf" ''
+
ions = v3_req
+
distinguished_name = req_distinguished_name
+
[req_distinguished_name]
+
[ v3_req ]
+
basicConstraints = CA:FALSE
+
keyUsage = digitalSignature, keyEncipherment
+
extendedKeyUsage = serverAuth, clientAuth
+
subjectAltName = @alt_names
+
[alt_names]
+
DNS.1 = node1
+
DNS.2 = node2
+
DNS.3 = node3
+
IP.1 = 127.0.0.1
+
'';
-
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
-
ions = v3_req
-
distinguished_name = req_distinguished_name
-
[req_distinguished_name]
-
[ v3_req ]
-
basicConstraints = CA:FALSE
-
keyUsage = digitalSignature, keyEncipherment
-
extendedKeyUsage = clientAuth
-
'';
+
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
+
ions = v3_req
+
distinguished_name = req_distinguished_name
+
[req_distinguished_name]
+
[ v3_req ]
+
basicConstraints = CA:FALSE
+
keyUsage = digitalSignature, keyEncipherment
+
extendedKeyUsage = clientAuth
+
'';
-
nodeConfig = {
-
services = {
-
etcd = {
-
enable = true;
-
keyFile = etcd_key;
-
certFile = etcd_cert;
-
trustedCaFile = ca_pem;
-
clientCertAuth = true;
-
listenClientUrls = [ "https://127.0.0.1:2379" ];
-
listenPeerUrls = [ "https://0.0.0.0:2380" ];
-
};
+
nodeConfig = {
+
services = {
+
etcd = {
+
enable = true;
+
keyFile = etcd_key;
+
certFile = etcd_cert;
+
trustedCaFile = ca_pem;
+
clientCertAuth = true;
+
listenClientUrls = [ "https://127.0.0.1:2379" ];
+
listenPeerUrls = [ "https://0.0.0.0:2380" ];
};
+
};
-
environment.variables = {
-
ETCD_CERT_FILE = "${etcd_client_cert}";
-
ETCD_KEY_FILE = "${etcd_client_key}";
-
ETCD_CA_FILE = "${ca_pem}";
-
ETCDCTL_ENDPOINTS = "https://127.0.0.1:2379";
-
ETCDCTL_CACERT = "${ca_pem}";
-
ETCDCTL_CERT = "${etcd_cert}";
-
ETCDCTL_KEY = "${etcd_key}";
-
};
+
environment.variables = {
+
ETCD_CERT_FILE = "${etcd_client_cert}";
+
ETCD_KEY_FILE = "${etcd_client_key}";
+
ETCD_CA_FILE = "${ca_pem}";
+
ETCDCTL_ENDPOINTS = "https://127.0.0.1:2379";
+
ETCDCTL_CACERT = "${ca_pem}";
+
ETCDCTL_CERT = "${etcd_cert}";
+
ETCDCTL_KEY = "${etcd_key}";
+
};
-
networking.firewall.allowedTCPPorts = [ 2380 ];
-
};
-
in
-
{
-
name = "etcd-cluster";
+
networking.firewall.allowedTCPPorts = [ 2380 ];
+
};
+
in
+
{
+
name = "etcd-cluster";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [ offline ];
-
};
+
meta.maintainers = with lib.maintainers; [ offline ];
-
nodes = {
-
node1 =
-
{ ... }:
-
{
-
require = [ nodeConfig ];
-
services.etcd = {
-
initialCluster = [
-
"node1=https://node1:2380"
-
"node2=https://node2:2380"
-
];
-
initialAdvertisePeerUrls = [ "https://node1:2380" ];
-
};
+
nodes = {
+
node1 =
+
{ ... }:
+
{
+
require = [ nodeConfig ];
+
services.etcd = {
+
initialCluster = [
+
"node1=https://node1:2380"
+
"node2=https://node2:2380"
+
];
+
initialAdvertisePeerUrls = [ "https://node1:2380" ];
};
+
};
-
node2 =
-
{ ... }:
-
{
-
require = [ nodeConfig ];
-
services.etcd = {
-
initialCluster = [
-
"node1=https://node1:2380"
-
"node2=https://node2:2380"
-
];
-
initialAdvertisePeerUrls = [ "https://node2:2380" ];
-
};
+
node2 =
+
{ ... }:
+
{
+
require = [ nodeConfig ];
+
services.etcd = {
+
initialCluster = [
+
"node1=https://node1:2380"
+
"node2=https://node2:2380"
+
];
+
initialAdvertisePeerUrls = [ "https://node2:2380" ];
};
+
};
-
node3 =
-
{ ... }:
-
{
-
require = [ nodeConfig ];
-
services.etcd = {
-
initialCluster = [
-
"node1=https://node1:2380"
-
"node2=https://node2:2380"
-
"node3=https://node3:2380"
-
];
-
initialAdvertisePeerUrls = [ "https://node3:2380" ];
-
initialClusterState = "existing";
-
};
+
node3 =
+
{ ... }:
+
{
+
require = [ nodeConfig ];
+
services.etcd = {
+
initialCluster = [
+
"node1=https://node1:2380"
+
"node2=https://node2:2380"
+
"node3=https://node3:2380"
+
];
+
initialAdvertisePeerUrls = [ "https://node3:2380" ];
+
initialClusterState = "existing";
};
-
};
+
};
+
};
-
testScript = ''
-
with subtest("should start etcd cluster"):
-
node1.start()
-
node2.start()
-
node1.wait_for_unit("etcd.service")
-
node2.wait_for_unit("etcd.service")
-
node2.wait_until_succeeds("etcdctl endpoint status")
-
node1.succeed("etcdctl put /foo/bar 'Hello world'")
-
node2.succeed("etcdctl get /foo/bar | grep 'Hello world'")
+
testScript = ''
+
with subtest("should start etcd cluster"):
+
node1.start()
+
node2.start()
+
node1.wait_for_unit("etcd.service")
+
node2.wait_for_unit("etcd.service")
+
node2.wait_until_succeeds("etcdctl endpoint status")
+
node1.succeed("etcdctl put /foo/bar 'Hello world'")
+
node2.succeed("etcdctl get /foo/bar | grep 'Hello world'")
-
with subtest("should add another member"):
-
node1.wait_until_succeeds("etcdctl member add node3 --peer-urls=https://node3:2380")
-
node3.start()
-
node3.wait_for_unit("etcd.service")
-
node3.wait_until_succeeds("etcdctl member list | grep 'node3'")
-
node3.succeed("etcdctl endpoint status")
+
with subtest("should add another member"):
+
node1.wait_until_succeeds("etcdctl member add node3 --peer-urls=https://node3:2380")
+
node3.start()
+
node3.wait_for_unit("etcd.service")
+
node3.wait_until_succeeds("etcdctl member list | grep 'node3'")
+
node3.succeed("etcdctl endpoint status")
-
with subtest("should survive member crash"):
-
node3.crash()
-
node1.succeed("etcdctl endpoint status")
-
node1.succeed("etcdctl put /foo/bar 'Hello degraded world'")
-
node1.succeed("etcdctl get /foo/bar | grep 'Hello degraded world'")
-
'';
-
}
-
)
+
with subtest("should survive member crash"):
+
node3.crash()
+
node1.succeed("etcdctl endpoint status")
+
node1.succeed("etcdctl put /foo/bar 'Hello degraded world'")
+
node1.succeed("etcdctl get /foo/bar | grep 'Hello degraded world'")
+
'';
+
}
+18 -26
nixos/tests/etcd/etcd.nix
···
# This test runs simple etcd node
-
import ../make-test-python.nix (
-
{ pkgs, ... }:
-
{
-
name = "etcd";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [ offline ];
-
};
+
{ lib, ... }:
+
{
+
name = "etcd";
+
meta.maintainers = with lib.maintainers; [ offline ];
-
nodes = {
-
node =
-
{ ... }:
-
{
-
services.etcd.enable = true;
-
};
-
};
+
nodes.node = {
+
services.etcd.enable = true;
+
};
-
testScript = ''
-
with subtest("should start etcd node"):
-
node.start()
-
node.wait_for_unit("etcd.service")
-
# Add additional wait for actual readiness
-
node.wait_until_succeeds("etcdctl endpoint health")
+
testScript = ''
+
with subtest("should start etcd node"):
+
node.start()
+
node.wait_for_unit("etcd.service")
+
# Add additional wait for actual readiness
+
node.wait_until_succeeds("etcdctl endpoint health")
-
with subtest("should write and read some values to etcd"):
-
node.succeed("etcdctl put /foo/bar 'Hello world'")
-
node.succeed("etcdctl get /foo/bar | grep 'Hello world'")
-
'';
-
}
-
)
+
with subtest("should write and read some values to etcd"):
+
node.succeed("etcdctl put /foo/bar 'Hello world'")
+
node.succeed("etcdctl get /foo/bar | grep 'Hello world'")
+
'';
+
}
+136 -138
nixos/tests/fcitx5/default.nix
···
-
import ../make-test-python.nix (
-
{ lib, ... }:
-
rec {
-
name = "fcitx5";
-
meta.maintainers = with lib.maintainers; [ nevivurn ];
+
{ lib, ... }:
+
{
+
name = "fcitx5";
+
meta.maintainers = with lib.maintainers; [ nevivurn ];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
imports = [
-
../common/user-account.nix
-
];
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
imports = [
+
../common/user-account.nix
+
];
-
environment.systemPackages = [
-
# To avoid clashing with xfce4-terminal
-
pkgs.alacritty
-
];
+
environment.systemPackages = [
+
# To avoid clashing with xfce4-terminal
+
pkgs.alacritty
+
];
-
services.displayManager.autoLogin = {
-
enable = true;
-
user = "alice";
-
};
+
services.displayManager.autoLogin = {
+
enable = true;
+
user = "alice";
+
};
-
services.xserver = {
-
enable = true;
-
displayManager.lightdm.enable = true;
-
desktopManager.xfce.enable = true;
-
};
+
services.xserver = {
+
enable = true;
+
displayManager.lightdm.enable = true;
+
desktopManager.xfce.enable = true;
+
};
-
i18n.inputMethod = {
-
enable = true;
-
type = "fcitx5";
-
fcitx5.addons = [
-
pkgs.fcitx5-chinese-addons
-
pkgs.fcitx5-hangul
-
pkgs.fcitx5-m17n
-
pkgs.fcitx5-mozc
-
];
-
fcitx5.settings = {
-
globalOptions = {
-
"Hotkey"."EnumerateSkipFirst" = "False";
-
"Hotkey/TriggerKeys"."0" = "Control+space";
-
"Hotkey/EnumerateForwardKeys"."0" = "Alt+Shift_L";
-
"Hotkey/EnumerateBackwardKeys"."0" = "Alt+Shift_R";
+
i18n.inputMethod = {
+
enable = true;
+
type = "fcitx5";
+
fcitx5.addons = [
+
pkgs.fcitx5-chinese-addons
+
pkgs.fcitx5-hangul
+
pkgs.fcitx5-m17n
+
pkgs.fcitx5-mozc
+
];
+
fcitx5.settings = {
+
globalOptions = {
+
"Hotkey"."EnumerateSkipFirst" = "False";
+
"Hotkey/TriggerKeys"."0" = "Control+space";
+
"Hotkey/EnumerateForwardKeys"."0" = "Alt+Shift_L";
+
"Hotkey/EnumerateBackwardKeys"."0" = "Alt+Shift_R";
+
};
+
inputMethod = {
+
"GroupOrder" = {
+
"0" = "NixOS_test";
+
};
+
"Groups/0" = {
+
"Default Layout" = "us";
+
"DefaultIM" = "wbx";
+
"Name" = "NixOS_test";
};
-
inputMethod = {
-
"GroupOrder" = {
-
"0" = "NixOS_test";
-
};
-
"Groups/0" = {
-
"Default Layout" = "us";
-
"DefaultIM" = "wbx";
-
"Name" = "NixOS_test";
-
};
-
"Groups/0/Items/0" = {
-
"Name" = "keyboard-us";
-
};
-
"Groups/0/Items/1" = {
-
"Layout" = "us";
-
"Name" = "wbx";
-
};
-
"Groups/0/Items/2" = {
-
"Layout" = "us";
-
"Name" = "hangul";
-
};
-
"Groups/0/Items/3" = {
-
"Layout" = "us";
-
"Name" = "m17n_sa_harvard-kyoto";
-
};
-
"Groups/0/Items/4" = {
-
"Layout" = "us";
-
"Name" = "mozc";
-
};
+
"Groups/0/Items/0" = {
+
"Name" = "keyboard-us";
+
};
+
"Groups/0/Items/1" = {
+
"Layout" = "us";
+
"Name" = "wbx";
+
};
+
"Groups/0/Items/2" = {
+
"Layout" = "us";
+
"Name" = "hangul";
+
};
+
"Groups/0/Items/3" = {
+
"Layout" = "us";
+
"Name" = "m17n_sa_harvard-kyoto";
+
};
+
"Groups/0/Items/4" = {
+
"Layout" = "us";
+
"Name" = "mozc";
};
};
};
};
+
};
-
testScript =
-
{ nodes, ... }:
-
let
-
user = nodes.machine.users.users.alice;
-
xauth = "${user.home}/.Xauthority";
-
in
-
''
-
start_all()
+
testScript =
+
{ nodes, ... }:
+
let
+
user = nodes.machine.users.users.alice;
+
xauth = "${user.home}/.Xauthority";
+
in
+
''
+
start_all()
-
machine.wait_for_x()
-
machine.wait_for_file("${xauth}")
-
machine.succeed("xauth merge ${xauth}")
-
machine.sleep(5)
+
machine.wait_for_x()
+
machine.wait_for_file("${xauth}")
+
machine.succeed("xauth merge ${xauth}")
+
machine.sleep(5)
-
machine.wait_until_succeeds("pgrep fcitx5")
-
machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
-
machine.sleep(1)
+
machine.wait_until_succeeds("pgrep fcitx5")
+
machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
+
machine.sleep(1)
-
machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
-
machine.wait_for_window("alice@machine")
+
machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
+
machine.wait_for_window("alice@machine")
-
machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
-
machine.sleep(10)
+
machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
+
machine.sleep(10)
-
### Type on terminal
-
machine.send_chars("echo ")
-
machine.sleep(1)
+
### Type on terminal
+
machine.send_chars("echo ")
+
machine.sleep(1)
-
### Start fcitx Unicode input
-
machine.send_key("ctrl-alt-shift-u")
-
machine.sleep(1)
+
### Start fcitx Unicode input
+
machine.send_key("ctrl-alt-shift-u")
+
machine.sleep(1)
-
### Search for smiling face
-
machine.send_chars("smil")
-
machine.sleep(1)
+
### Search for smiling face
+
machine.send_chars("smil")
+
machine.sleep(1)
-
### Navigate to the second one
-
machine.send_key("tab")
-
machine.sleep(1)
+
### Navigate to the second one
+
machine.send_key("tab")
+
machine.sleep(1)
-
### Choose it
-
machine.send_key("\n")
-
machine.sleep(1)
+
### Choose it
+
machine.send_key("\n")
+
machine.sleep(1)
-
### Start fcitx language input
-
machine.send_key("ctrl-spc")
-
machine.sleep(1)
+
### Start fcitx language input
+
machine.send_key("ctrl-spc")
+
machine.sleep(1)
-
### Default wubi, enter 一下
-
machine.send_chars("gggh ")
-
machine.sleep(1)
+
### Default wubi, enter 一下
+
machine.send_chars("gggh ")
+
machine.sleep(1)
-
### Switch to Hangul
-
machine.send_key("alt-shift")
-
machine.sleep(1)
+
### Switch to Hangul
+
machine.send_key("alt-shift")
+
machine.sleep(1)
-
### Enter 한
-
machine.send_chars("gks")
-
machine.sleep(1)
+
### Enter 한
+
machine.send_chars("gks")
+
machine.sleep(1)
-
### Switch to Harvard Kyoto
-
machine.send_key("alt-shift")
-
machine.sleep(1)
+
### Switch to Harvard Kyoto
+
machine.send_key("alt-shift")
+
machine.sleep(1)
-
### Enter क
-
machine.send_chars("ka")
-
machine.sleep(1)
+
### Enter क
+
machine.send_chars("ka")
+
machine.sleep(1)
-
### Switch to Mozc
-
machine.send_key("alt-shift")
-
machine.sleep(1)
+
### Switch to Mozc
+
machine.send_key("alt-shift")
+
machine.sleep(1)
-
### Enter か
-
machine.send_chars("ka\n")
-
machine.sleep(1)
+
### Enter か
+
machine.send_chars("ka\n")
+
machine.sleep(1)
-
### Turn off Fcitx
-
machine.send_key("ctrl-spc")
-
machine.sleep(1)
+
### Turn off Fcitx
+
machine.send_key("ctrl-spc")
+
machine.sleep(1)
-
### Redirect typed characters to a file
-
machine.send_chars(" > fcitx_test.out\n")
-
machine.sleep(1)
-
machine.screenshot("terminal_chars")
+
### Redirect typed characters to a file
+
machine.send_chars(" > fcitx_test.out\n")
+
machine.sleep(1)
+
machine.screenshot("terminal_chars")
-
### Verify that file contents are as expected
-
file_content = machine.succeed("cat ${user.home}/fcitx_test.out")
-
assert file_content == "☺一下한कか\n", f'output does not match input:\n{file_content}'
-
'';
-
}
-
)
+
### Verify that file contents are as expected
+
file_content = machine.succeed("cat ${user.home}/fcitx_test.out")
+
assert file_content == "☺一下한कか\n", f'output does not match input:\n{file_content}'
+
'';
+
}
+4 -11
nixos/tests/ferretdb.nix
···
-
{
-
system ? builtins.currentSystem,
-
pkgs ? import ../.. { inherit system; },
-
...
-
}:
+
{ runTest, pkgs }:
let
-
lib = pkgs.lib;
+
inherit (pkgs) lib;
testScript = ''
machine.start()
machine.wait_for_unit("ferretdb.service")
···
machine.succeed("mongosh --eval 'use myNewDatabase;' --eval 'db.myCollection.insertOne( { x: 1 } );'")
'';
in
-
with import ../lib/testing-python.nix { inherit system; };
{
-
-
postgresql = makeTest {
+
postgresql = runTest {
inherit testScript;
name = "ferretdb-postgresql";
meta.maintainers = with lib.maintainers; [ julienmalka ];
···
environment.systemPackages = with pkgs; [ mongosh ];
};
};
-
-
sqlite = makeTest {
+
sqlite = runTest {
inherit testScript;
name = "ferretdb-sqlite";
meta.maintainers = with lib.maintainers; [ julienmalka ];
+95 -97
nixos/tests/firewall.nix
···
# Test the firewall module.
-
import ./make-test-python.nix (
-
{ pkgs, nftables, ... }:
-
{
-
name = "firewall" + pkgs.lib.optionalString nftables "-nftables";
-
meta = with pkgs.lib.maintainers; {
-
maintainers = [
-
rvfg
-
garyguo
-
];
-
};
+
{ lib, nftables, ... }:
+
{
+
name = "firewall" + lib.optionalString nftables "-nftables";
+
meta = with lib.maintainers; {
+
maintainers = [
+
rvfg
+
garyguo
+
];
+
};
-
nodes = {
-
walled =
-
{ ... }:
-
{
-
networking.firewall = {
-
enable = true;
-
logRefusedPackets = true;
-
# Syntax smoke test, not actually verified otherwise
-
allowedTCPPorts = [
-
25
-
993
-
8005
-
];
+
nodes = {
+
walled =
+
{ ... }:
+
{
+
networking.firewall = {
+
enable = true;
+
logRefusedPackets = true;
+
# Syntax smoke test, not actually verified otherwise
+
allowedTCPPorts = [
+
25
+
993
+
8005
+
];
+
allowedTCPPortRanges = [
+
{
+
from = 980;
+
to = 1000;
+
}
+
{
+
from = 990;
+
to = 1010;
+
}
+
{
+
from = 8000;
+
to = 8010;
+
}
+
];
+
interfaces.eth0 = {
+
allowedTCPPorts = [ 10003 ];
allowedTCPPortRanges = [
{
-
from = 980;
-
to = 1000;
-
}
-
{
-
from = 990;
-
to = 1010;
+
from = 10000;
+
to = 10005;
}
+
];
+
};
+
interfaces.eth3 = {
+
allowedUDPPorts = [ 10003 ];
+
allowedUDPPortRanges = [
{
-
from = 8000;
-
to = 8010;
+
from = 10000;
+
to = 10005;
}
];
-
interfaces.eth0 = {
-
allowedTCPPorts = [ 10003 ];
-
allowedTCPPortRanges = [
-
{
-
from = 10000;
-
to = 10005;
-
}
-
];
-
};
-
interfaces.eth3 = {
-
allowedUDPPorts = [ 10003 ];
-
allowedUDPPortRanges = [
-
{
-
from = 10000;
-
to = 10005;
-
}
-
];
-
};
-
};
-
networking.nftables.enable = nftables;
-
services.httpd.enable = true;
-
services.httpd.adminAddr = "foo@example.org";
-
-
specialisation.different-config.configuration = {
-
networking.firewall.rejectPackets = true;
};
};
+
networking.nftables.enable = nftables;
+
services.httpd.enable = true;
+
services.httpd.adminAddr = "foo@example.org";
-
attacker =
-
{ ... }:
-
{
-
services.httpd.enable = true;
-
services.httpd.adminAddr = "foo@example.org";
-
networking.firewall.enable = false;
+
specialisation.different-config.configuration = {
+
networking.firewall.rejectPackets = true;
};
-
};
+
};
-
testScript =
-
{ nodes, ... }:
-
let
-
unit = if nftables then "nftables" else "firewall";
-
in
-
''
-
start_all()
+
attacker =
+
{ ... }:
+
{
+
services.httpd.enable = true;
+
services.httpd.adminAddr = "foo@example.org";
+
networking.firewall.enable = false;
+
};
+
};
-
walled.wait_for_unit("${unit}")
-
walled.wait_for_unit("httpd")
-
attacker.wait_for_unit("network.target")
+
testScript =
+
{ nodes, ... }:
+
let
+
unit = if nftables then "nftables" else "firewall";
+
in
+
''
+
start_all()
-
# Local connections should still work.
-
walled.succeed("curl -v http://localhost/ >&2")
+
walled.wait_for_unit("${unit}")
+
walled.wait_for_unit("httpd")
+
attacker.wait_for_unit("network.target")
-
# Connections to the firewalled machine should fail, but ping should succeed.
-
attacker.fail("curl --fail --connect-timeout 2 http://walled/ >&2")
-
attacker.succeed("ping -c 1 walled >&2")
+
# Local connections should still work.
+
walled.succeed("curl -v http://localhost/ >&2")
-
# Outgoing connections/pings should still work.
-
walled.succeed("curl -v http://attacker/ >&2")
-
walled.succeed("ping -c 1 attacker >&2")
+
# Connections to the firewalled machine should fail, but ping should succeed.
+
attacker.fail("curl --fail --connect-timeout 2 http://walled/ >&2")
+
attacker.succeed("ping -c 1 walled >&2")
-
# Open tcp port 80 at runtime
-
walled.succeed("nixos-firewall-tool open tcp 80")
-
attacker.succeed("curl -v http://walled/ >&2")
+
# Outgoing connections/pings should still work.
+
walled.succeed("curl -v http://attacker/ >&2")
+
walled.succeed("ping -c 1 attacker >&2")
-
# Reset the firewall
-
walled.succeed("nixos-firewall-tool reset")
-
attacker.fail("curl --fail --connect-timeout 2 http://walled/ >&2")
+
# Open tcp port 80 at runtime
+
walled.succeed("nixos-firewall-tool open tcp 80")
+
attacker.succeed("curl -v http://walled/ >&2")
-
# If we stop the firewall, then connections should succeed.
-
walled.stop_job("${unit}")
-
attacker.succeed("curl -v http://walled/ >&2")
+
# Reset the firewall
+
walled.succeed("nixos-firewall-tool reset")
+
attacker.fail("curl --fail --connect-timeout 2 http://walled/ >&2")
-
# Check whether activation of a new configuration reloads the firewall.
-
walled.succeed(
-
"/run/booted-system/specialisation/different-config/bin/switch-to-configuration test 2>&1 | grep -qF ${unit}.service"
-
)
-
'';
-
}
-
)
+
# If we stop the firewall, then connections should succeed.
+
walled.stop_job("${unit}")
+
attacker.succeed("curl -v http://walled/ >&2")
+
+
# Check whether activation of a new configuration reloads the firewall.
+
walled.succeed(
+
"/run/booted-system/specialisation/different-config/bin/switch-to-configuration test 2>&1 | grep -qF ${unit}.service"
+
)
+
'';
+
}
+46 -56
nixos/tests/flannel.nix
···
-
import ./make-test-python.nix (
-
{ lib, ... }:
-
{
-
name = "flannel";
+
{ lib, ... }:
+
{
+
name = "flannel";
-
meta = with lib.maintainers; {
-
maintainers = [ offline ];
-
};
+
meta.maintainers = with lib.maintainers; [ offline ];
-
nodes =
-
let
-
flannelConfig =
-
{ pkgs, ... }:
-
{
-
services.flannel = {
-
enable = true;
-
backend = {
-
Type = "udp";
-
Port = 8285;
-
};
-
network = "10.1.0.0/16";
-
iface = "eth1";
-
etcd.endpoints = [ "http://etcd:2379" ];
-
};
-
-
networking.firewall.allowedUDPPorts = [ 8285 ];
+
nodes =
+
let
+
flannelConfig = {
+
services.flannel = {
+
enable = true;
+
backend = {
+
Type = "udp";
+
Port = 8285;
};
-
in
-
{
-
etcd =
-
{ ... }:
-
{
-
services = {
-
etcd = {
-
enable = true;
-
listenClientUrls = [ "http://0.0.0.0:2379" ]; # requires ip-address for binding
-
listenPeerUrls = [ "http://0.0.0.0:2380" ]; # requires ip-address for binding
-
advertiseClientUrls = [ "http://etcd:2379" ];
-
initialAdvertisePeerUrls = [ "http://etcd:2379" ];
-
initialCluster = [ "etcd=http://etcd:2379" ];
-
};
-
};
+
network = "10.1.0.0/16";
+
iface = "eth1";
+
etcd.endpoints = [ "http://etcd:2379" ];
+
};
-
networking.firewall.allowedTCPPorts = [ 2379 ];
-
};
+
networking.firewall.allowedUDPPorts = [ 8285 ];
+
};
+
in
+
{
+
etcd = {
+
services.etcd = {
+
enable = true;
+
listenClientUrls = [ "http://0.0.0.0:2379" ]; # requires ip-address for binding
+
listenPeerUrls = [ "http://0.0.0.0:2380" ]; # requires ip-address for binding
+
advertiseClientUrls = [ "http://etcd:2379" ];
+
initialAdvertisePeerUrls = [ "http://etcd:2379" ];
+
initialCluster = [ "etcd=http://etcd:2379" ];
+
};
-
node1 = flannelConfig;
-
node2 = flannelConfig;
+
networking.firewall.allowedTCPPorts = [ 2379 ];
};
-
testScript = ''
-
start_all()
+
node1 = flannelConfig;
+
node2 = flannelConfig;
+
};
+
+
testScript = ''
+
start_all()
-
node1.wait_for_unit("flannel.service")
-
node2.wait_for_unit("flannel.service")
+
node1.wait_for_unit("flannel.service")
+
node2.wait_for_unit("flannel.service")
-
node1.wait_until_succeeds("ip l show dev flannel0")
-
ip1 = node1.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
-
node2.wait_until_succeeds("ip l show dev flannel0")
-
ip2 = node2.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+
node1.wait_until_succeeds("ip l show dev flannel0")
+
ip1 = node1.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+
node2.wait_until_succeeds("ip l show dev flannel0")
+
ip2 = node2.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
-
node1.wait_until_succeeds(f"ping -c 1 {ip2}")
-
node2.wait_until_succeeds(f"ping -c 1 {ip1}")
-
'';
-
}
-
)
+
node1.wait_until_succeeds(f"ping -c 1 {ip2}")
+
node2.wait_until_succeeds(f"ping -c 1 {ip1}")
+
'';
+
}
+25 -27
nixos/tests/freshrss/caddy-sqlite.nix
···
-
import ../make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "freshrss-caddy-sqlite";
-
meta.maintainers = with lib.maintainers; [
-
etu
-
stunkymonkey
-
];
+
{ lib, ... }:
+
{
+
name = "freshrss-caddy-sqlite";
+
meta.maintainers = with lib.maintainers; [
+
etu
+
stunkymonkey
+
];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
services.freshrss = {
-
enable = true;
-
baseUrl = "http://localhost";
-
passwordFile = pkgs.writeText "password" "secret";
-
dataDir = "/srv/freshrss";
-
webserver = "caddy";
-
virtualHost = "freshrss:80";
-
};
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
services.freshrss = {
+
enable = true;
+
baseUrl = "http://localhost";
+
passwordFile = pkgs.writeText "password" "secret";
+
dataDir = "/srv/freshrss";
+
webserver = "caddy";
+
virtualHost = "freshrss:80";
};
+
};
-
testScript = ''
-
machine.wait_for_unit("multi-user.target")
-
machine.wait_for_open_port(80)
-
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://localhost:80/i/")
-
assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
-
'';
-
}
-
)
+
testScript = ''
+
machine.wait_for_unit("multi-user.target")
+
machine.wait_for_open_port(80)
+
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://localhost:80/i/")
+
assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
+
'';
+
}
+7 -7
nixos/tests/freshrss/default.nix
···
-
{ system, pkgs, ... }:
+
{ runTest }:
{
-
extensions = import ./extensions.nix { inherit system pkgs; };
-
http-auth = import ./http-auth.nix { inherit system pkgs; };
-
none-auth = import ./none-auth.nix { inherit system pkgs; };
-
pgsql = import ./pgsql.nix { inherit system pkgs; };
-
nginx-sqlite = import ./nginx-sqlite.nix { inherit system pkgs; };
-
caddy-sqlite = import ./caddy-sqlite.nix { inherit system pkgs; };
+
extensions = runTest ./extensions.nix;
+
http-auth = runTest ./http-auth.nix;
+
none-auth = runTest ./none-auth.nix;
+
pgsql = runTest ./pgsql.nix;
+
nginx-sqlite = runTest ./nginx-sqlite.nix;
+
caddy-sqlite = runTest ./caddy-sqlite.nix;
}
+18 -21
nixos/tests/freshrss/extensions.nix
···
-
import ../make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "freshrss-extensions";
+
{
+
name = "freshrss-extensions";
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
services.freshrss = {
-
enable = true;
-
baseUrl = "http://localhost";
-
authType = "none";
-
extensions = [ pkgs.freshrss-extensions.youtube ];
-
};
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
services.freshrss = {
+
enable = true;
+
baseUrl = "http://localhost";
+
authType = "none";
+
extensions = [ pkgs.freshrss-extensions.youtube ];
};
+
};
-
testScript = ''
-
machine.wait_for_unit("multi-user.target")
-
machine.wait_for_open_port(80)
-
response = machine.succeed("curl -vvv -s http://localhost:80/i/?c=extension")
-
assert '<span class="ext_name disabled">YouTube Video Feed</span>' in response, "Extension not present in extensions page."
-
'';
-
}
-
)
+
testScript = ''
+
machine.wait_for_unit("multi-user.target")
+
machine.wait_for_open_port(80)
+
response = machine.succeed("curl -vvv -s http://localhost:80/i/?c=extension")
+
assert '<span class="ext_name disabled">YouTube Video Feed</span>' in response, "Extension not present in extensions page."
+
'';
+
}
+19 -23
nixos/tests/freshrss/http-auth.nix
···
-
import ../make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "freshrss-http-auth";
-
meta.maintainers = with lib.maintainers; [ mattchrist ];
+
{ lib, ... }:
+
{
+
name = "freshrss-http-auth";
+
meta.maintainers = with lib.maintainers; [ mattchrist ];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
services.freshrss = {
-
enable = true;
-
baseUrl = "http://localhost";
-
dataDir = "/srv/freshrss";
-
authType = "http_auth";
-
};
-
};
+
nodes.machine = {
+
services.freshrss = {
+
enable = true;
+
baseUrl = "http://localhost";
+
dataDir = "/srv/freshrss";
+
authType = "http_auth";
+
};
+
};
-
testScript = ''
-
machine.wait_for_unit("multi-user.target")
-
machine.wait_for_open_port(80)
-
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' -H 'Remote-User: testuser' http://localhost:80/i/")
-
assert 'Account: testuser' in response, "http_auth method didn't work."
-
'';
-
}
-
)
+
testScript = ''
+
machine.wait_for_unit("multi-user.target")
+
machine.wait_for_open_port(80)
+
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' -H 'Remote-User: testuser' http://localhost:80/i/")
+
assert 'Account: testuser' in response, "http_auth method didn't work."
+
'';
+
}
+23 -25
nixos/tests/freshrss/nginx-sqlite.nix
···
-
import ../make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "freshrss-nginx-sqlite";
-
meta.maintainers = with lib.maintainers; [
-
etu
-
stunkymonkey
-
];
+
{ lib, ... }:
+
{
+
name = "freshrss-nginx-sqlite";
+
meta.maintainers = with lib.maintainers; [
+
etu
+
stunkymonkey
+
];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
services.freshrss = {
-
enable = true;
-
baseUrl = "http://localhost";
-
passwordFile = pkgs.writeText "password" "secret";
-
dataDir = "/srv/freshrss";
-
};
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
services.freshrss = {
+
enable = true;
+
baseUrl = "http://localhost";
+
passwordFile = pkgs.writeText "password" "secret";
+
dataDir = "/srv/freshrss";
};
+
};
-
testScript = ''
-
machine.wait_for_unit("multi-user.target")
-
machine.wait_for_open_port(80)
-
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://localhost:80/i/")
-
assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
-
'';
-
}
-
)
+
testScript = ''
+
machine.wait_for_unit("multi-user.target")
+
machine.wait_for_open_port(80)
+
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://localhost:80/i/")
+
assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
+
'';
+
}
+18 -22
nixos/tests/freshrss/none-auth.nix
···
-
import ../make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "freshrss-none-auth";
-
meta.maintainers = with lib.maintainers; [ mattchrist ];
+
{ lib, ... }:
+
{
+
name = "freshrss-none-auth";
+
meta.maintainers = with lib.maintainers; [ mattchrist ];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
services.freshrss = {
-
enable = true;
-
baseUrl = "http://localhost";
-
authType = "none";
-
};
-
};
+
nodes.machine = {
+
services.freshrss = {
+
enable = true;
+
baseUrl = "http://localhost";
+
authType = "none";
+
};
+
};
-
testScript = ''
-
machine.wait_for_unit("multi-user.target")
-
machine.wait_for_open_port(80)
-
response = machine.succeed("curl -vvv -s http://localhost:80/i/")
-
assert '<title> · FreshRSS</title>' in response, "FreshRSS stream page didn't load successfully"
-
'';
-
}
-
)
+
testScript = ''
+
machine.wait_for_unit("multi-user.target")
+
machine.wait_for_open_port(80)
+
response = machine.succeed("curl -vvv -s http://localhost:80/i/")
+
assert '<title> · FreshRSS</title>' in response, "FreshRSS stream page didn't load successfully"
+
'';
+
}
+46 -48
nixos/tests/freshrss/pgsql.nix
···
-
import ../make-test-python.nix (
-
{ lib, pkgs, ... }:
-
{
-
name = "freshrss-pgsql";
-
meta.maintainers = with lib.maintainers; [
-
etu
-
stunkymonkey
-
];
+
{ lib, ... }:
+
{
+
name = "freshrss-pgsql";
+
meta.maintainers = with lib.maintainers; [
+
etu
+
stunkymonkey
+
];
-
nodes.machine =
-
{ pkgs, ... }:
-
{
-
services.freshrss = {
-
enable = true;
-
baseUrl = "http://localhost";
-
passwordFile = pkgs.writeText "password" "secret";
-
dataDir = "/srv/freshrss";
-
database = {
-
type = "pgsql";
-
port = 5432;
-
user = "freshrss";
-
passFile = pkgs.writeText "db-password" "db-secret";
-
};
+
nodes.machine =
+
{ pkgs, ... }:
+
{
+
services.freshrss = {
+
enable = true;
+
baseUrl = "http://localhost";
+
passwordFile = pkgs.writeText "password" "secret";
+
dataDir = "/srv/freshrss";
+
database = {
+
type = "pgsql";
+
port = 5432;
+
user = "freshrss";
+
passFile = pkgs.writeText "db-password" "db-secret";
};
+
};
-
services.postgresql = {
-
enable = true;
-
ensureDatabases = [ "freshrss" ];
-
ensureUsers = [
-
{
-
name = "freshrss";
-
ensureDBOwnership = true;
-
}
-
];
-
initialScript = pkgs.writeText "postgresql-password" ''
-
CREATE ROLE freshrss WITH LOGIN PASSWORD 'db-secret' CREATEDB;
-
'';
-
};
+
services.postgresql = {
+
enable = true;
+
ensureDatabases = [ "freshrss" ];
+
ensureUsers = [
+
{
+
name = "freshrss";
+
ensureDBOwnership = true;
+
}
+
];
+
initialScript = pkgs.writeText "postgresql-password" ''
+
CREATE ROLE freshrss WITH LOGIN PASSWORD 'db-secret' CREATEDB;
+
'';
+
};
-
systemd.services."freshrss-config" = {
-
requires = [ "postgresql.target" ];
-
after = [ "postgresql.target" ];
-
};
+
systemd.services."freshrss-config" = {
+
requires = [ "postgresql.target" ];
+
after = [ "postgresql.target" ];
};
+
};
-
testScript = ''
-
machine.wait_for_unit("multi-user.target")
-
machine.wait_for_open_port(5432)
-
machine.wait_for_open_port(80)
-
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://localhost:80/i/")
-
assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
-
'';
-
}
-
)
+
testScript = ''
+
machine.wait_for_unit("multi-user.target")
+
machine.wait_for_open_port(5432)
+
machine.wait_for_open_port(80)
+
response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://localhost:80/i/")
+
assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
+
'';
+
}
+12 -19
nixos/tests/fsck.nix
···
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
systemdStage1 ? false,
-
}:
+
{ systemdStage1, ... }:
-
import ./make-test-python.nix {
+
{
name = "fsck";
-
nodes.machine =
-
{ lib, ... }:
-
{
-
virtualisation.emptyDiskImages = [ 1 ];
+
nodes.machine = {
+
virtualisation.emptyDiskImages = [ 1 ];
-
virtualisation.fileSystems = {
-
"/mnt" = {
-
device = "/dev/vdb";
-
fsType = "ext4";
-
autoFormat = true;
-
};
+
virtualisation.fileSystems = {
+
"/mnt" = {
+
device = "/dev/vdb";
+
fsType = "ext4";
+
autoFormat = true;
};
+
};
-
boot.initrd.systemd.enable = systemdStage1;
-
};
+
boot.initrd.systemd.enable = systemdStage1;
+
};
testScript =
{ nodes, ... }:
+16 -32
nixos/tests/gemstash.nix
···
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../.. { inherit system config; },
-
}:
-
-
with import ../lib/testing-python.nix { inherit system pkgs; };
-
with pkgs.lib;
-
+
{ runTest, pkgs }:
let
-
common_meta = {
-
maintainers = [ maintainers.viraptor ];
-
};
+
inherit (pkgs) lib;
in
{
-
gemstash_works = makeTest {
+
gemstash_works = runTest {
name = "gemstash-works";
-
meta = common_meta;
+
meta.maintainers = with lib.maintainers; [ viraptor ];
-
nodes.machine =
-
{ config, pkgs, ... }:
-
{
-
services.gemstash = {
-
enable = true;
-
};
-
};
+
nodes.machine = {
+
services.gemstash.enable = true;
+
};
# gemstash responds to http requests
testScript = ''
···
'';
};
-
gemstash_custom_port = makeTest {
+
gemstash_custom_port = runTest {
name = "gemstash-custom-port";
-
meta = common_meta;
+
meta.maintainers = with lib.maintainers; [ viraptor ];
-
nodes.machine =
-
{ config, pkgs, ... }:
-
{
-
services.gemstash = {
-
enable = true;
-
openFirewall = true;
-
settings = {
-
bind = "tcp://0.0.0.0:12345";
-
};
+
nodes.machine = {
+
services.gemstash = {
+
enable = true;
+
openFirewall = true;
+
settings = {
+
bind = "tcp://0.0.0.0:12345";
};
};
+
};
# gemstash responds to http requests
testScript = ''