1# To run the test on the unfree ELK use the folllowing command:
2# cd path/to/nixpkgs
3# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-6
4
5{ system ? builtins.currentSystem,
6 config ? {},
7 pkgs ? import ../.. { inherit system config; },
8}:
9
10let
11 inherit (pkgs) lib;
12
13 esUrl = "http://localhost:9200";
14
15 mkElkTest = name : elk :
16 import ./make-test-python.nix ({
17 inherit name;
18 meta = with pkgs.lib.maintainers; {
19 maintainers = [ eelco offline basvandijk ];
20 };
21 nodes = {
22 one =
23 { pkgs, lib, ... }: {
24 # Not giving the machine at least 2060MB results in elasticsearch failing with the following error:
25 #
26 # OpenJDK 64-Bit Server VM warning:
27 # INFO: os::commit_memory(0x0000000085330000, 2060255232, 0)
28 # failed; error='Cannot allocate memory' (errno=12)
29 #
30 # There is insufficient memory for the Java Runtime Environment to continue.
31 # Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.
32 #
33 # When setting this to 2500 I got "Kernel panic - not syncing: Out of
34 # memory: compulsory panic_on_oom is enabled" so lets give it even a
35 # bit more room:
36 virtualisation.memorySize = 3000;
37
38 # For querying JSON objects returned from elasticsearch and kibana.
39 environment.systemPackages = [ pkgs.jq ];
40
41 services = {
42
43 journalbeat = {
44 enable = elk ? journalbeat;
45 package = elk.journalbeat;
46 extraConfig = pkgs.lib.mkOptionDefault (''
47 logging:
48 to_syslog: true
49 level: warning
50 metrics.enabled: false
51 output.elasticsearch:
52 hosts: [ "127.0.0.1:9200" ]
53 journalbeat.inputs:
54 - paths: []
55 seek: cursor
56 '');
57 };
58
59 filebeat = {
60 enable = elk ? filebeat;
61 package = elk.filebeat;
62 inputs.journald.id = "everything";
63
64 inputs.log = {
65 enabled = true;
66 paths = [
67 "/var/lib/filebeat/test"
68 ];
69 };
70
71 settings = {
72 logging.level = "info";
73 };
74 };
75
76 metricbeat = {
77 enable = true;
78 package = elk.metricbeat;
79 modules.system = {
80 metricsets = ["cpu" "load" "memory" "network" "process" "process_summary" "uptime" "socket_summary"];
81 enabled = true;
82 period = "5s";
83 processes = [".*"];
84 cpu.metrics = ["percentages" "normalized_percentages"];
85 core.metrics = ["percentages"];
86 };
87 settings = {
88 output.elasticsearch = {
89 hosts = ["127.0.0.1:9200"];
90 };
91 };
92 };
93
94 logstash = {
95 enable = true;
96 package = elk.logstash;
97 inputConfig = ''
98 exec { command => "echo -n flowers" interval => 1 type => "test" }
99 exec { command => "echo -n dragons" interval => 1 type => "test" }
100 '';
101 filterConfig = ''
102 if [message] =~ /dragons/ {
103 drop {}
104 }
105 '';
106 outputConfig = ''
107 file {
108 path => "/tmp/logstash.out"
109 codec => line { format => "%{message}" }
110 }
111 elasticsearch {
112 hosts => [ "${esUrl}" ]
113 }
114 '';
115 };
116
117 elasticsearch = {
118 enable = true;
119 package = elk.elasticsearch;
120 };
121
122 kibana = {
123 enable = true;
124 package = elk.kibana;
125 };
126
127 elasticsearch-curator = {
128 enable = true;
129 actionYAML = ''
130 ---
131 actions:
132 1:
133 action: delete_indices
134 description: >-
135 Delete indices older than 1 second (based on index name), for logstash-
136 prefixed indices. Ignore the error if the filter does not result in an
137 actionable list of indices (ignore_empty_list) and exit cleanly.
138 options:
139 allow_ilm_indices: true
140 ignore_empty_list: True
141 disable_action: False
142 filters:
143 - filtertype: pattern
144 kind: prefix
145 value: logstash-
146 - filtertype: age
147 source: name
148 direction: older
149 timestring: '%Y.%m.%d'
150 unit: seconds
151 unit_count: 1
152 '';
153 };
154 };
155 };
156 };
157
158 passthru.elkPackages = elk;
159 testScript =
160 let
161 valueObject = lib.optionalString (lib.versionAtLeast elk.elasticsearch.version "7") ".value";
162 in ''
163 import json
164
165
166 def expect_hits(message):
167 dictionary = {"query": {"match": {"message": message}}}
168 return (
169 "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
170 + "-H 'Content-Type: application/json' "
171 + "-d '{}' ".format(json.dumps(dictionary))
172 + " | tee /dev/console"
173 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
174 )
175
176
177 def expect_no_hits(message):
178 dictionary = {"query": {"match": {"message": message}}}
179 return (
180 "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
181 + "-H 'Content-Type: application/json' "
182 + "-d '{}' ".format(json.dumps(dictionary))
183 + " | tee /dev/console"
184 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} == 0 end'"
185 )
186
187
188 def has_metricbeat():
189 dictionary = {"query": {"match": {"event.dataset": {"query": "system.cpu"}}}}
190 return (
191 "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
192 + "-H 'Content-Type: application/json' "
193 + "-d '{}' ".format(json.dumps(dictionary))
194 + " | tee /dev/console"
195 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
196 )
197
198
199 start_all()
200
201 one.wait_for_unit("elasticsearch.service")
202 one.wait_for_open_port(9200)
203
204 # Continue as long as the status is not "red". The status is probably
205 # "yellow" instead of "green" because we are using a single elasticsearch
206 # node which elasticsearch considers risky.
207 #
208 # TODO: extend this test with multiple elasticsearch nodes
209 # and see if the status turns "green".
210 one.wait_until_succeeds(
211 "curl --silent --show-error --fail-with-body '${esUrl}/_cluster/health'"
212 + " | jq -es 'if . == [] then null else .[] | .status != \"red\" end'"
213 )
214
215 with subtest("Perform some simple logstash tests"):
216 one.wait_for_unit("logstash.service")
217 one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
218 one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
219
220 with subtest("Kibana is healthy"):
221 one.wait_for_unit("kibana.service")
222 one.wait_until_succeeds(
223 "curl --silent --show-error --fail-with-body 'http://localhost:5601/api/status'"
224 + " | jq -es 'if . == [] then null else .[] | .status.overall.state == \"green\" end'"
225 )
226
227 with subtest("Metricbeat is running"):
228 one.wait_for_unit("metricbeat.service")
229
230 with subtest("Metricbeat metrics arrive in elasticsearch"):
231 one.wait_until_succeeds(has_metricbeat())
232
233 with subtest("Logstash messages arive in elasticsearch"):
234 one.wait_until_succeeds(expect_hits("flowers"))
235 one.wait_until_succeeds(expect_no_hits("dragons"))
236
237 '' + lib.optionalString (elk ? journalbeat) ''
238 with subtest(
239 "A message logged to the journal is ingested by elasticsearch via journalbeat"
240 ):
241 one.wait_for_unit("journalbeat.service")
242 one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
243 one.wait_until_succeeds(
244 expect_hits("Supercalifragilisticexpialidocious")
245 )
246 '' + lib.optionalString (elk ? filebeat) ''
247 with subtest(
248 "A message logged to the journal is ingested by elasticsearch via filebeat"
249 ):
250 one.wait_for_unit("filebeat.service")
251 one.execute("echo 'Superdupercalifragilisticexpialidocious' | systemd-cat")
252 one.wait_until_succeeds(
253 expect_hits("Superdupercalifragilisticexpialidocious")
254 )
255 one.execute(
256 "echo 'SuperdupercalifragilisticexpialidociousIndeed' >> /var/lib/filebeat/test"
257 )
258 one.wait_until_succeeds(
259 expect_hits("SuperdupercalifragilisticexpialidociousIndeed")
260 )
261 '' + ''
262 with subtest("Elasticsearch-curator works"):
263 one.systemctl("stop logstash")
264 one.systemctl("start elasticsearch-curator")
265 one.wait_until_succeeds(
266 '! curl --silent --show-error --fail-with-body "${esUrl}/_cat/indices" | grep logstash | grep ^'
267 )
268 '';
269 }) { inherit pkgs system; };
270in {
271 ELK-6 = mkElkTest "elk-6-oss" {
272 name = "elk-6-oss";
273 elasticsearch = pkgs.elasticsearch6-oss;
274 logstash = pkgs.logstash6-oss;
275 kibana = pkgs.kibana6-oss;
276 journalbeat = pkgs.journalbeat6;
277 metricbeat = pkgs.metricbeat6;
278 };
279 # We currently only package upstream binaries.
280 # Feel free to package an SSPL licensed source-based package!
281 # ELK-7 = mkElkTest "elk-7-oss" {
282 # name = "elk-7";
283 # elasticsearch = pkgs.elasticsearch7-oss;
284 # logstash = pkgs.logstash7-oss;
285 # kibana = pkgs.kibana7-oss;
286 # filebeat = pkgs.filebeat7;
287 # metricbeat = pkgs.metricbeat7;
288 # };
289 unfree = lib.dontRecurseIntoAttrs {
290 ELK-6 = mkElkTest "elk-6" {
291 elasticsearch = pkgs.elasticsearch6;
292 logstash = pkgs.logstash6;
293 kibana = pkgs.kibana6;
294 journalbeat = pkgs.journalbeat6;
295 metricbeat = pkgs.metricbeat6;
296 };
297 ELK-7 = mkElkTest "elk-7" {
298 elasticsearch = pkgs.elasticsearch7;
299 logstash = pkgs.logstash7;
300 kibana = pkgs.kibana7;
301 filebeat = pkgs.filebeat7;
302 metricbeat = pkgs.metricbeat7;
303 };
304 };
305}