1{ system ? builtins.currentSystem }:
2
3with import ../lib/testing.nix { inherit system; };
4with import ../lib/qemu-flags.nix;
5with pkgs.lib;
6
7let
8 redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
9 kind = "Pod";
10 apiVersion = "v1";
11 metadata.name = "redis";
12 metadata.labels.name = "redis";
13 spec.containers = [{
14 name = "redis";
15 image = "redis";
16 args = ["--bind" "0.0.0.0"];
17 imagePullPolicy = "Never";
18 ports = [{
19 name = "redis-server";
20 containerPort = 6379;
21 }];
22 }];
23 });
24
25 redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
26 kind = "Service";
27 apiVersion = "v1";
28 metadata.name = "redis";
29 spec = {
30 ports = [{port = 6379; targetPort = 6379;}];
31 selector = {name = "redis";};
32 };
33 });
34
35 redisImage = pkgs.dockerTools.buildImage {
36 name = "redis";
37 tag = "latest";
38 contents = pkgs.redis;
39 config.Entrypoint = "/bin/redis-server";
40 };
41
42 testSimplePod = ''
43 $kubernetes->execute("docker load < ${redisImage}");
44 $kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}");
45 $kubernetes->succeed("kubectl create -f ${redisService}");
46 $kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running");
47 $kubernetes->succeed("nc -z \$\(dig \@10.10.0.1 redis.default.svc.cluster.local +short\) 6379");
48 '';
49in {
50 # This test runs kubernetes on a single node
51 trivial = makeTest {
52 name = "kubernetes-trivial";
53
54 nodes = {
55 kubernetes =
56 { config, pkgs, lib, nodes, ... }:
57 {
58 virtualisation.memorySize = 768;
59 virtualisation.diskSize = 2048;
60
61 programs.bash.enableCompletion = true;
62 environment.systemPackages = with pkgs; [ netcat bind ];
63
64 services.kubernetes.roles = ["master" "node"];
65 virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
66
67 networking.bridges.cbr0.interfaces = [];
68 networking.interfaces.cbr0 = {};
69 };
70 };
71
72 testScript = ''
73 startAll;
74
75 $kubernetes->waitUntilSucceeds("kubectl get nodes | grep kubernetes | grep Ready");
76
77 ${testSimplePod}
78 '';
79 };
80
81 cluster = let
82 runWithOpenSSL = file: cmd: pkgs.runCommand file {
83 buildInputs = [ pkgs.openssl ];
84 } cmd;
85
86 ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
87 ca_pem = runWithOpenSSL "ca.pem" ''
88 openssl req \
89 -x509 -new -nodes -key ${ca_key} \
90 -days 10000 -out $out -subj "/CN=etcd-ca"
91 '';
92 etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
93 etcd_csr = runWithOpenSSL "etcd.csr" ''
94 openssl req \
95 -new -key ${etcd_key} \
96 -out $out -subj "/CN=etcd" \
97 -config ${openssl_cnf}
98 '';
99 etcd_cert = runWithOpenSSL "etcd.pem" ''
100 openssl x509 \
101 -req -in ${etcd_csr} \
102 -CA ${ca_pem} -CAkey ${ca_key} \
103 -CAcreateserial -out $out \
104 -days 365 -extensions v3_req \
105 -extfile ${openssl_cnf}
106 '';
107
108 etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
109 "openssl genrsa -out $out 2048";
110
111 etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
112 openssl req \
113 -new -key ${etcd_client_key} \
114 -out $out -subj "/CN=etcd-client" \
115 -config ${client_openssl_cnf}
116 '';
117
118 etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
119 openssl x509 \
120 -req -in ${etcd_client_csr} \
121 -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
122 -out $out -days 365 -extensions v3_req \
123 -extfile ${client_openssl_cnf}
124 '';
125
126 apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048";
127
128 apiserver_csr = runWithOpenSSL "apiserver.csr" ''
129 openssl req \
130 -new -key ${apiserver_key} \
131 -out $out -subj "/CN=kube-apiserver" \
132 -config ${apiserver_cnf}
133 '';
134
135 apiserver_cert = runWithOpenSSL "apiserver.pem" ''
136 openssl x509 \
137 -req -in ${apiserver_csr} \
138 -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
139 -out $out -days 365 -extensions v3_req \
140 -extfile ${apiserver_cnf}
141 '';
142
143 worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
144
145 worker_csr = runWithOpenSSL "worker.csr" ''
146 openssl req \
147 -new -key ${worker_key} \
148 -out $out -subj "/CN=kube-worker" \
149 -config ${worker_cnf}
150 '';
151
152 worker_cert = runWithOpenSSL "worker.pem" ''
153 openssl x509 \
154 -req -in ${worker_csr} \
155 -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
156 -out $out -days 365 -extensions v3_req \
157 -extfile ${worker_cnf}
158 '';
159
160 openssl_cnf = pkgs.writeText "openssl.cnf" ''
161 [req]
162 req_extensions = v3_req
163 distinguished_name = req_distinguished_name
164 [req_distinguished_name]
165 [ v3_req ]
166 basicConstraints = CA:FALSE
167 keyUsage = digitalSignature, keyEncipherment
168 extendedKeyUsage = serverAuth
169 subjectAltName = @alt_names
170 [alt_names]
171 DNS.1 = etcd1
172 DNS.2 = etcd2
173 DNS.3 = etcd3
174 IP.1 = 127.0.0.1
175 '';
176
177 client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
178 [req]
179 req_extensions = v3_req
180 distinguished_name = req_distinguished_name
181 [req_distinguished_name]
182 [ v3_req ]
183 basicConstraints = CA:FALSE
184 keyUsage = digitalSignature, keyEncipherment
185 extendedKeyUsage = clientAuth
186 '';
187
188 apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
189 [req]
190 req_extensions = v3_req
191 distinguished_name = req_distinguished_name
192 [req_distinguished_name]
193 [ v3_req ]
194 basicConstraints = CA:FALSE
195 keyUsage = nonRepudiation, digitalSignature, keyEncipherment
196 subjectAltName = @alt_names
197 [alt_names]
198 DNS.1 = kubernetes
199 DNS.2 = kubernetes.default
200 DNS.3 = kubernetes.default.svc
201 DNS.4 = kubernetes.default.svc.cluster.local
202 IP.1 = 10.10.10.1
203 '';
204
205 worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
206 [req]
207 req_extensions = v3_req
208 distinguished_name = req_distinguished_name
209 [req_distinguished_name]
210 [ v3_req ]
211 basicConstraints = CA:FALSE
212 keyUsage = nonRepudiation, digitalSignature, keyEncipherment
213 subjectAltName = @alt_names
214 [alt_names]
215 DNS.1 = kubeWorker1
216 DNS.2 = kubeWorker2
217 '';
218
219 etcdNodeConfig = {
220 virtualisation.memorySize = 128;
221
222 services = {
223 etcd = {
224 enable = true;
225 keyFile = etcd_key;
226 certFile = etcd_cert;
227 trustedCaFile = ca_pem;
228 peerClientCertAuth = true;
229 listenClientUrls = ["https://0.0.0.0:2379"];
230 listenPeerUrls = ["https://0.0.0.0:2380"];
231 };
232 };
233
234 environment.variables = {
235 ETCDCTL_CERT_FILE = "${etcd_client_cert}";
236 ETCDCTL_KEY_FILE = "${etcd_client_key}";
237 ETCDCTL_CA_FILE = "${ca_pem}";
238 ETCDCTL_PEERS = "https://127.0.0.1:2379";
239 };
240
241 networking.firewall.allowedTCPPorts = [ 2379 2380 ];
242 };
243
244 kubeConfig = {
245 virtualisation.diskSize = 2048;
246 programs.bash.enableCompletion = true;
247
248 services.flannel = {
249 enable = true;
250 network = "10.10.0.0/16";
251 iface = "eth1";
252 etcd = {
253 endpoints = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
254 keyFile = etcd_client_key;
255 certFile = etcd_client_cert;
256 caFile = ca_pem;
257 };
258 };
259
260 # vxlan
261 networking.firewall.allowedUDPPorts = [ 8472 ];
262
263 systemd.services.docker.after = ["flannel.service"];
264 systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env";
265 virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET";
266
267 services.kubernetes.verbose = true;
268 services.kubernetes.etcd = {
269 servers = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
270 keyFile = etcd_client_key;
271 certFile = etcd_client_cert;
272 caFile = ca_pem;
273 };
274
275 environment.systemPackages = [ pkgs.bind pkgs.tcpdump pkgs.utillinux ];
276 };
277
278 kubeMasterConfig = {pkgs, ...}: {
279 require = [kubeConfig];
280
281 # kube apiserver
282 networking.firewall.allowedTCPPorts = [ 443 ];
283
284 virtualisation.memorySize = 512;
285
286 services.kubernetes = {
287 roles = ["master"];
288 scheduler.leaderElect = true;
289 controllerManager.leaderElect = true;
290
291 apiserver = {
292 publicAddress = "0.0.0.0";
293 advertiseAddress = "192.168.1.8";
294 tlsKeyFile = apiserver_key;
295 tlsCertFile = apiserver_cert;
296 clientCaFile = ca_pem;
297 kubeletClientCaFile = ca_pem;
298 kubeletClientKeyFile = worker_key;
299 kubeletClientCertFile = worker_cert;
300 };
301 };
302 };
303
304 kubeWorkerConfig = { pkgs, ... }: {
305 require = [kubeConfig];
306
307 virtualisation.memorySize = 512;
308
309 # kubelet
310 networking.firewall.allowedTCPPorts = [ 10250 ];
311
312 services.kubernetes = {
313 roles = ["node"];
314 kubeconfig = {
315 server = "https://kubernetes:443";
316 caFile = ca_pem;
317 certFile = worker_cert;
318 keyFile = worker_key;
319 };
320 kubelet = {
321 tlsKeyFile = worker_key;
322 tlsCertFile = worker_cert;
323 };
324 };
325 };
326 in makeTest {
327 name = "kubernetes-cluster";
328
329 nodes = {
330 etcd1 = { config, pkgs, nodes, ... }: {
331 require = [etcdNodeConfig];
332 services.etcd = {
333 advertiseClientUrls = ["https://etcd1:2379"];
334 initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
335 initialAdvertisePeerUrls = ["https://etcd1:2380"];
336 };
337 };
338
339 etcd2 = { config, pkgs, ... }: {
340 require = [etcdNodeConfig];
341 services.etcd = {
342 advertiseClientUrls = ["https://etcd2:2379"];
343 initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
344 initialAdvertisePeerUrls = ["https://etcd2:2380"];
345 };
346 };
347
348 etcd3 = { config, pkgs, ... }: {
349 require = [etcdNodeConfig];
350 services.etcd = {
351 advertiseClientUrls = ["https://etcd3:2379"];
352 initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
353 initialAdvertisePeerUrls = ["https://etcd3:2380"];
354 };
355 };
356
357 kubeMaster1 = { config, pkgs, lib, nodes, ... }: {
358 require = [kubeMasterConfig];
359 };
360
361 kubeMaster2 = { config, pkgs, lib, nodes, ... }: {
362 require = [kubeMasterConfig];
363 };
364
365 # Kubernetes TCP load balancer
366 kubernetes = { config, pkgs, ... }: {
367 # kubernetes
368 networking.firewall.allowedTCPPorts = [ 443 ];
369
370 services.haproxy.enable = true;
371 services.haproxy.config = ''
372 global
373 log 127.0.0.1 local0 notice
374 user haproxy
375 group haproxy
376
377 defaults
378 log global
379 retries 2
380 timeout connect 3000
381 timeout server 5000
382 timeout client 5000
383
384 listen kubernetes
385 bind 0.0.0.0:443
386 mode tcp
387 option ssl-hello-chk
388 balance roundrobin
389 server kube-master-1 kubeMaster1:443 check
390 server kube-master-2 kubeMaster2:443 check
391 '';
392 };
393
394 kubeWorker1 = { config, pkgs, lib, nodes, ... }: {
395 require = [kubeWorkerConfig];
396 };
397
398 kubeWorker2 = { config, pkgs, lib, nodes, ... }: {
399 require = [kubeWorkerConfig];
400 };
401 };
402
403 testScript = ''
404 startAll;
405
406 ${testSimplePod}
407 '';
408 };
409}