1# To run the test on the unfree ELK use the following command:
2# cd path/to/nixpkgs
3# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-6
4
5{ system ? builtins.currentSystem,
6 config ? {},
7 pkgs ? import ../.. { inherit system config; },
8}:
9
10let
11 inherit (pkgs) lib;
12
13 esUrl = "http://localhost:9200";
14
15 mkElkTest = name : elk :
16 import ./make-test-python.nix ({
17 inherit name;
18 meta = with pkgs.lib.maintainers; {
19 maintainers = [ eelco offline basvandijk ];
20 };
21 nodes = {
22 one =
23 { pkgs, lib, ... }: {
24 # Not giving the machine at least 2060MB results in elasticsearch failing with the following error:
25 #
26 # OpenJDK 64-Bit Server VM warning:
27 # INFO: os::commit_memory(0x0000000085330000, 2060255232, 0)
28 # failed; error='Cannot allocate memory' (errno=12)
29 #
30 # There is insufficient memory for the Java Runtime Environment to continue.
31 # Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.
32 #
33 # When setting this to 2500 I got "Kernel panic - not syncing: Out of
34 # memory: compulsory panic_on_oom is enabled" so lets give it even a
35 # bit more room:
36 virtualisation.memorySize = 3000;
37
38 # For querying JSON objects returned from elasticsearch and kibana.
39 environment.systemPackages = [ pkgs.jq ];
40
41 services = {
42
43 journalbeat = {
44 enable = elk ? journalbeat;
45 package = elk.journalbeat;
46 extraConfig = pkgs.lib.mkOptionDefault (''
47 logging:
48 to_syslog: true
49 level: warning
50 metrics.enabled: false
51 output.elasticsearch:
52 hosts: [ "127.0.0.1:9200" ]
53 journalbeat.inputs:
54 - paths: []
55 seek: cursor
56 '');
57 };
58
59 filebeat = {
60 enable = elk ? filebeat;
61 package = elk.filebeat;
62 inputs.journald.id = "everything";
63
64 inputs.log = {
65 enabled = true;
66 paths = [
67 "/var/lib/filebeat/test"
68 ];
69 };
70
71 settings = {
72 logging.level = "info";
73 };
74 };
75
76 metricbeat = {
77 enable = true;
78 package = elk.metricbeat;
79 modules.system = {
80 metricsets = ["cpu" "load" "memory" "network" "process" "process_summary" "uptime" "socket_summary"];
81 enabled = true;
82 period = "5s";
83 processes = [".*"];
84 cpu.metrics = ["percentages" "normalized_percentages"];
85 core.metrics = ["percentages"];
86 };
87 settings = {
88 output.elasticsearch = {
89 hosts = ["127.0.0.1:9200"];
90 };
91 };
92 };
93
94 logstash = {
95 enable = true;
96 package = elk.logstash;
97 inputConfig = ''
98 exec { command => "echo -n flowers" interval => 1 type => "test" }
99 exec { command => "echo -n dragons" interval => 1 type => "test" }
100 '';
101 filterConfig = ''
102 if [message] =~ /dragons/ {
103 drop {}
104 }
105 '';
106 outputConfig = ''
107 file {
108 path => "/tmp/logstash.out"
109 codec => line { format => "%{message}" }
110 }
111 elasticsearch {
112 hosts => [ "${esUrl}" ]
113 }
114 '';
115 };
116
117 elasticsearch = {
118 enable = true;
119 package = elk.elasticsearch;
120 };
121
122 elasticsearch-curator = {
123 enable = true;
124 actionYAML = ''
125 ---
126 actions:
127 1:
128 action: delete_indices
129 description: >-
130 Delete indices older than 1 second (based on index name), for logstash-
131 prefixed indices. Ignore the error if the filter does not result in an
132 actionable list of indices (ignore_empty_list) and exit cleanly.
133 options:
134 allow_ilm_indices: true
135 ignore_empty_list: True
136 disable_action: False
137 filters:
138 - filtertype: pattern
139 kind: prefix
140 value: logstash-
141 - filtertype: age
142 source: name
143 direction: older
144 timestring: '%Y.%m.%d'
145 unit: seconds
146 unit_count: 1
147 '';
148 };
149 };
150 };
151 };
152
153 passthru.elkPackages = elk;
154 testScript =
155 let
156 valueObject = lib.optionalString (lib.versionAtLeast elk.elasticsearch.version "7") ".value";
157 in ''
158 import json
159
160
161 def expect_hits(message):
162 dictionary = {"query": {"match": {"message": message}}}
163 return (
164 "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
165 + "-H 'Content-Type: application/json' "
166 + "-d '{}' ".format(json.dumps(dictionary))
167 + " | tee /dev/console"
168 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
169 )
170
171
172 def expect_no_hits(message):
173 dictionary = {"query": {"match": {"message": message}}}
174 return (
175 "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
176 + "-H 'Content-Type: application/json' "
177 + "-d '{}' ".format(json.dumps(dictionary))
178 + " | tee /dev/console"
179 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} == 0 end'"
180 )
181
182
183 def has_metricbeat():
184 dictionary = {"query": {"match": {"event.dataset": {"query": "system.cpu"}}}}
185 return (
186 "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
187 + "-H 'Content-Type: application/json' "
188 + "-d '{}' ".format(json.dumps(dictionary))
189 + " | tee /dev/console"
190 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
191 )
192
193
194 start_all()
195
196 one.wait_for_unit("elasticsearch.service")
197 one.wait_for_open_port(9200)
198
199 # Continue as long as the status is not "red". The status is probably
200 # "yellow" instead of "green" because we are using a single elasticsearch
201 # node which elasticsearch considers risky.
202 #
203 # TODO: extend this test with multiple elasticsearch nodes
204 # and see if the status turns "green".
205 one.wait_until_succeeds(
206 "curl --silent --show-error --fail-with-body '${esUrl}/_cluster/health'"
207 + " | jq -es 'if . == [] then null else .[] | .status != \"red\" end'"
208 )
209
210 with subtest("Perform some simple logstash tests"):
211 one.wait_for_unit("logstash.service")
212 one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
213 one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
214
215 with subtest("Metricbeat is running"):
216 one.wait_for_unit("metricbeat.service")
217
218 with subtest("Metricbeat metrics arrive in elasticsearch"):
219 one.wait_until_succeeds(has_metricbeat())
220
221 with subtest("Logstash messages arive in elasticsearch"):
222 one.wait_until_succeeds(expect_hits("flowers"))
223 one.wait_until_succeeds(expect_no_hits("dragons"))
224
225 '' + lib.optionalString (elk ? journalbeat) ''
226 with subtest(
227 "A message logged to the journal is ingested by elasticsearch via journalbeat"
228 ):
229 one.wait_for_unit("journalbeat.service")
230 one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
231 one.wait_until_succeeds(
232 expect_hits("Supercalifragilisticexpialidocious")
233 )
234 '' + lib.optionalString (elk ? filebeat) ''
235 with subtest(
236 "A message logged to the journal is ingested by elasticsearch via filebeat"
237 ):
238 one.wait_for_unit("filebeat.service")
239 one.execute("echo 'Superdupercalifragilisticexpialidocious' | systemd-cat")
240 one.wait_until_succeeds(
241 expect_hits("Superdupercalifragilisticexpialidocious")
242 )
243 one.execute(
244 "echo 'SuperdupercalifragilisticexpialidociousIndeed' >> /var/lib/filebeat/test"
245 )
246 one.wait_until_succeeds(
247 expect_hits("SuperdupercalifragilisticexpialidociousIndeed")
248 )
249 '' + ''
250 with subtest("Elasticsearch-curator works"):
251 one.systemctl("stop logstash")
252 one.systemctl("start elasticsearch-curator")
253 one.wait_until_succeeds(
254 '! curl --silent --show-error --fail-with-body "${esUrl}/_cat/indices" | grep logstash | grep ^'
255 )
256 '';
257 }) { inherit pkgs system; };
258in {
259 # We currently only package upstream binaries.
260 # Feel free to package an SSPL licensed source-based package!
261 # ELK-7 = mkElkTest "elk-7-oss" {
262 # name = "elk-7";
263 # elasticsearch = pkgs.elasticsearch7-oss;
264 # logstash = pkgs.logstash7-oss;
265 # filebeat = pkgs.filebeat7;
266 # metricbeat = pkgs.metricbeat7;
267 # };
268 unfree = lib.dontRecurseIntoAttrs {
269 ELK-7 = mkElkTest "elk-7" {
270 elasticsearch = pkgs.elasticsearch7;
271 logstash = pkgs.logstash7;
272 filebeat = pkgs.filebeat7;
273 metricbeat = pkgs.metricbeat7;
274 };
275 };
276}