1# Test a minimal hbase cluster
2{ pkgs, ... }:
3import ../make-test-python.nix (
4 {
5 hadoop ? pkgs.hadoop,
6 hbase ? pkgs.hbase,
7 ...
8 }:
9 with pkgs.lib;
10 {
11 name = "hadoop-hbase";
12
13 nodes =
14 let
15 coreSite = {
16 "fs.defaultFS" = "hdfs://namenode:8020";
17 };
18 defOpts = {
19 enable = true;
20 openFirewall = true;
21 };
22 zookeeperQuorum = "zookeeper";
23 in
24 {
25 zookeeper =
26 { ... }:
27 {
28 services.zookeeper.enable = true;
29 networking.firewall.allowedTCPPorts = [ 2181 ];
30 };
31 namenode =
32 { ... }:
33 {
34 services.hadoop = {
35 hdfs = {
36 namenode = defOpts // {
37 formatOnInit = true;
38 };
39 };
40 inherit coreSite;
41 };
42 };
43 datanode =
44 { ... }:
45 {
46 virtualisation.diskSize = 8192;
47 services.hadoop = {
48 hdfs.datanode = defOpts;
49 inherit coreSite;
50 };
51 };
52
53 master =
54 { ... }:
55 {
56 services.hadoop = {
57 inherit coreSite;
58 hbase = {
59 inherit zookeeperQuorum;
60 master = defOpts // {
61 initHDFS = true;
62 };
63 };
64 };
65 };
66 regionserver =
67 { ... }:
68 {
69 services.hadoop = {
70 inherit coreSite;
71 hbase = {
72 inherit zookeeperQuorum;
73 regionServer = defOpts;
74 };
75 };
76 };
77 thrift =
78 { ... }:
79 {
80 services.hadoop = {
81 inherit coreSite;
82 hbase = {
83 inherit zookeeperQuorum;
84 thrift = defOpts;
85 };
86 };
87 };
88 rest =
89 { ... }:
90 {
91 services.hadoop = {
92 inherit coreSite;
93 hbase = {
94 inherit zookeeperQuorum;
95 rest = defOpts;
96 };
97 };
98 };
99 };
100
101 testScript = ''
102 start_all()
103
104 # wait for HDFS cluster
105 namenode.wait_for_unit("hdfs-namenode")
106 namenode.wait_for_unit("network.target")
107 namenode.wait_for_open_port(8020)
108 namenode.wait_for_open_port(9870)
109 datanode.wait_for_unit("hdfs-datanode")
110 datanode.wait_for_unit("network.target")
111 datanode.wait_for_open_port(9864)
112 datanode.wait_for_open_port(9866)
113 datanode.wait_for_open_port(9867)
114
115 # wait for ZK
116 zookeeper.wait_for_unit("zookeeper")
117 zookeeper.wait_for_open_port(2181)
118
119 # wait for HDFS cluster to be RW
120 datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
121
122 # wait for HBase to start up
123 master.wait_for_unit("hbase-master")
124 regionserver.wait_for_unit("hbase-regionserver")
125
126 assert "1 active master, 0 backup masters, 1 servers" in master.succeed("echo status | HADOOP_USER_NAME=hbase hbase shell -n")
127 regionserver.wait_until_succeeds("echo \"create 't1','f1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
128 assert "NAME => 'f1'" in regionserver.succeed("echo \"describe 't1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
129
130 rest.wait_for_open_port(8080)
131 assert "${hbase.version}" in regionserver.succeed("curl http://rest:8080/version/cluster")
132
133 thrift.wait_for_open_port(9090)
134 '';
135
136 meta.maintainers = with maintainers; [ illustris ];
137 }
138)