nixos/slurm: update test, add test for enableStools

* Add pure submit host to test 'enableStools'
* Disable client.enable on control machine

Changed files
+18 -5
nixos
tests
+18 -5
nixos/tests/slurm.nix
···
import ./make-test.nix ({ pkgs, ... }:
let mungekey = "mungeverryweakkeybuteasytointegratoinatest";
slurmconfig = {
-
client.enable = true;
controlMachine = "control";
nodeName = ''
control
···
# TODO slrumd port and slurmctld port should be configurations and
# automatically allowed by the firewall.
networking.firewall.enable = false;
-
services.slurm = slurmconfig;
+
services.slurm = {
+
client.enable = true;
+
} // slurmconfig;
};
in {
+
control =
{ config, pkgs, ...}:
{
···
server.enable = true;
} // slurmconfig;
};
+
+
submit =
+
{ config, pkgs, ...}:
+
{
+
networking.firewall.enable = false;
+
services.slurm = {
+
enableStools = true;
+
} // slurmconfig;
+
};
+
node1 = computeNode;
node2 = computeNode;
node3 = computeNode;
};
+
testScript =
''
startAll;
# Set up authentification across the cluster
-
foreach my $node (($control,$node1,$node2,$node3))
+
foreach my $node (($submit,$control,$node1,$node2,$node3))
{
$node->waitForUnit("default.target");
···
};
subtest "can_start_slurmd", sub {
-
foreach my $node (($control,$node1,$node2,$node3))
+
foreach my $node (($node1,$node2,$node3))
{
$node->succeed("systemctl restart slurmd.service");
$node->waitForUnit("slurmd");
···
subtest "run_distributed_command", sub {
# Run `hostname` on 3 nodes of the partition (so on all the 3 nodes).
# The output must contain the 3 different names
-
$control->succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq");
+
$submit->succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq");
};
'';
})