···
+
throwSystem = throw "RKE2: Unsupported system: ${pkgs.stdenv.hostPlatform.system}";
+
aarch64-linux = rke2.images-core-linux-arm64-tar-zst;
+
x86_64-linux = rke2.images-core-linux-amd64-tar-zst;
+
.${pkgs.stdenv.hostPlatform.system} or throwSystem;
+
aarch64-linux = rke2.images-canal-linux-arm64-tar-zst;
+
x86_64-linux = rke2.images-canal-linux-amd64-tar-zst;
+
.${pkgs.stdenv.hostPlatform.system} or throwSystem;
+
helloImage = pkgs.dockerTools.buildImage {
+
name = "test.local/hello";
+
copyToRoot = pkgs.buildEnv {
+
name = "rke2-hello-image-env";
+
# A daemonset that responds 'hello' on port 8000
networkTestDaemonset = pkgs.writeText "test.yml" ''
···
+
image: test.local/hello:local
+
command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo hello"]
tokenFile = pkgs.writeText "token" "p@s$w0rd";
+
agentTokenFile = pkgs.writeText "agent-token" "agentP@s$w0rd";
+
# Let flannel use eth1 to enable inter-node communication in tests
+
canalConfig = pkgs.writeText "rke2-canal-config.yaml" ''
+
apiVersion: helm.cattle.io/v1
name = "${rke2.name}-multi-node";
meta.maintainers = rke2.meta.maintainers;
+
# Setup image archives to be imported by rke2
+
systemd.tmpfiles.settings."10-rke2" = {
+
"/var/lib/rancher/rke2/agent/images/rke2-images-core.tar.zst" = {
+
"L+".argument = "${coreImages}";
+
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.tar.zst" = {
+
"L+".argument = "${canalImages}";
+
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
+
"L+".argument = "${helloImage}";
+
# Copy the canal config so that rke2 can write the remaining default values to it
+
"/var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml" = {
+
"C".argument = "${canalConfig}";
+
networking.firewall.allowedUDPPorts = [ 8472 ];
+
networking.firewall.allowedTCPPorts = [
+
# Canal CNI health checks
+
# RKE2 needs more resources than the default
+
virtualisation.cores = 4;
+
virtualisation.memorySize = 4096;
+
virtualisation.diskSize = 8092;
+
# Without nodeIP the apiserver starts with the wrong service IP family
+
nodeIP = config.networking.primaryIPAddress;
+
"rke2-snapshot-controller"
+
"rke2-snapshot-controller-crd"
+
"rke2-snapshot-validation-webhook"
+
# Setup image archives to be imported by rke2
+
systemd.tmpfiles.settings."10-rke2" = {
+
"/var/lib/rancher/rke2/agent/images/rke2-images-core.linux-amd64.tar.zst" = {
+
"L+".argument = "${coreImages}";
+
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.linux-amd64.tar.zst" = {
+
"L+".argument = "${canalImages}";
+
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
+
"L+".argument = "${helloImage}";
+
"/var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml" = {
+
"C".argument = "${canalConfig}";
+
# Canal CNI health checks
+
networking.firewall.allowedTCPPorts = [ 9099 ];
+
networking.firewall.allowedUDPPorts = [ 8472 ];
+
# The agent node can work with less resources
+
virtualisation.memorySize = 2048;
+
virtualisation.diskSize = 8092;
+
tokenFile = agentTokenFile;
+
serverAddr = "https://${nodes.server.networking.primaryIPAddress}:9345";
+
nodeIP = config.networking.primaryIPAddress;
···
kubectl = "${pkgs.kubectl}/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml";
jq = "${pkgs.jq}/bin/jq";
+
server.wait_for_unit("rke2-server")
+
agent.wait_for_unit("rke2-agent")
+
# Wait for the agent to be ready
+
server.wait_until_succeeds(r"""${kubectl} wait --for='jsonpath={.status.conditions[?(@.type=="Ready")].status}=True' nodes/agent""")
+
server.succeed("${kubectl} cluster-info")
+
server.wait_until_succeeds("${kubectl} get serviceaccount default")
# Now create a pod on each node via a daemonset and verify they can talk to each other.
+
server.succeed("${kubectl} apply -f ${networkTestDaemonset}")
+
server.wait_until_succeeds(
f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
+
pods = server.succeed("${kubectl} get po -o json | ${jq} '.items[].metadata.name' -r").splitlines()
+
server.succeed(f"${kubectl} get po {n} -o json | ${jq} '.status.podIP' -cr").strip() for n in pods
+
# Verify each node can ping each pod ip
+
# The CNI sometimes needs a little time
+
server.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
+
agent.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
+
# Verify the server can exec into the pod
+
# resp = server.succeed(f"${kubectl} exec {pod} -- socat TCP:{pod_ip}:8000 -")
+
# assert resp.strip() == "hello", f"Unexpected response from hello daemonset: {resp.strip()}"