at master 11 kB view raw
1# To run the test on the unfree ELK use the following command: 2# cd path/to/nixpkgs 3# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-7 4 5{ 6 system ? builtins.currentSystem, 7 config ? { }, 8 pkgs ? import ../.. { inherit system config; }, 9}: 10 11let 12 inherit (pkgs) lib; 13 14 esUrl = "http://localhost:9200"; 15 16 mkElkTest = 17 name: elk: 18 import ./make-test-python.nix ({ 19 inherit name; 20 meta = with pkgs.lib.maintainers; { 21 maintainers = [ 22 offline 23 basvandijk 24 ]; 25 }; 26 nodes = { 27 one = 28 { pkgs, lib, ... }: 29 { 30 # Not giving the machine at least 2060MB results in elasticsearch failing with the following error: 31 # 32 # OpenJDK 64-Bit Server VM warning: 33 # INFO: os::commit_memory(0x0000000085330000, 2060255232, 0) 34 # failed; error='Cannot allocate memory' (errno=12) 35 # 36 # There is insufficient memory for the Java Runtime Environment to continue. 37 # Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory. 38 # 39 # When setting this to 2500 I got "Kernel panic - not syncing: Out of 40 # memory: compulsory panic_on_oom is enabled" so lets give it even a 41 # bit more room: 42 virtualisation.memorySize = 3000; 43 44 # For querying JSON objects returned from elasticsearch and kibana. 45 environment.systemPackages = [ pkgs.jq ]; 46 47 services = { 48 49 journalbeat = { 50 enable = elk ? journalbeat; 51 package = elk.journalbeat; 52 extraConfig = pkgs.lib.mkOptionDefault ('' 53 logging: 54 to_syslog: true 55 level: warning 56 metrics.enabled: false 57 output.elasticsearch: 58 hosts: [ "127.0.0.1:9200" ] 59 journalbeat.inputs: 60 - paths: [] 61 seek: cursor 62 ''); 63 }; 64 65 filebeat = { 66 enable = elk ? filebeat; 67 package = elk.filebeat; 68 inputs.journald.id = "everything"; 69 70 inputs.log = { 71 enabled = true; 72 paths = [ 73 "/var/lib/filebeat/test" 74 ]; 75 }; 76 77 settings = { 78 logging.level = "info"; 79 }; 80 }; 81 82 metricbeat = { 83 enable = true; 84 package = elk.metricbeat; 85 modules.system = { 86 metricsets = [ 87 "cpu" 88 "load" 89 "memory" 90 "network" 91 "process" 92 "process_summary" 93 "uptime" 94 "socket_summary" 95 ]; 96 enabled = true; 97 period = "5s"; 98 processes = [ ".*" ]; 99 cpu.metrics = [ 100 "percentages" 101 "normalized_percentages" 102 ]; 103 core.metrics = [ "percentages" ]; 104 }; 105 settings = { 106 output.elasticsearch = { 107 hosts = [ "127.0.0.1:9200" ]; 108 }; 109 }; 110 }; 111 112 logstash = { 113 enable = true; 114 package = elk.logstash; 115 inputConfig = '' 116 exec { command => "echo -n flowers" interval => 1 type => "test" } 117 exec { command => "echo -n dragons" interval => 1 type => "test" } 118 ''; 119 filterConfig = '' 120 if [message] =~ /dragons/ { 121 drop {} 122 } 123 ''; 124 outputConfig = '' 125 file { 126 path => "/tmp/logstash.out" 127 codec => line { format => "%{message}" } 128 } 129 elasticsearch { 130 hosts => [ "${esUrl}" ] 131 } 132 ''; 133 }; 134 135 elasticsearch = { 136 enable = true; 137 package = elk.elasticsearch; 138 }; 139 140 elasticsearch-curator = { 141 enable = elk ? elasticsearch-curator; 142 actionYAML = '' 143 --- 144 actions: 145 1: 146 action: delete_indices 147 description: >- 148 Delete indices older than 1 second (based on index name), for logstash- 149 prefixed indices. Ignore the error if the filter does not result in an 150 actionable list of indices (ignore_empty_list) and exit cleanly. 151 options: 152 allow_ilm_indices: true 153 ignore_empty_list: True 154 disable_action: False 155 filters: 156 - filtertype: pattern 157 kind: prefix 158 value: logstash- 159 - filtertype: age 160 source: name 161 direction: older 162 timestring: '%Y.%m.%d' 163 unit: seconds 164 unit_count: 1 165 ''; 166 }; 167 }; 168 }; 169 }; 170 171 passthru.elkPackages = elk; 172 testScript = 173 let 174 valueObject = lib.optionalString (lib.versionAtLeast elk.elasticsearch.version "7") ".value"; 175 in 176 '' 177 import json 178 179 180 def expect_hits(message): 181 dictionary = {"query": {"match": {"message": message}}} 182 return ( 183 "curl --silent --show-error --fail-with-body '${esUrl}/_search' " 184 + "-H 'Content-Type: application/json' " 185 + "-d '{}' ".format(json.dumps(dictionary)) 186 + " | tee /dev/console" 187 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'" 188 ) 189 190 191 def expect_no_hits(message): 192 dictionary = {"query": {"match": {"message": message}}} 193 return ( 194 "curl --silent --show-error --fail-with-body '${esUrl}/_search' " 195 + "-H 'Content-Type: application/json' " 196 + "-d '{}' ".format(json.dumps(dictionary)) 197 + " | tee /dev/console" 198 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} == 0 end'" 199 ) 200 201 202 def has_metricbeat(): 203 dictionary = {"query": {"match": {"event.dataset": {"query": "system.cpu"}}}} 204 return ( 205 "curl --silent --show-error --fail-with-body '${esUrl}/_search' " 206 + "-H 'Content-Type: application/json' " 207 + "-d '{}' ".format(json.dumps(dictionary)) 208 + " | tee /dev/console" 209 + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'" 210 ) 211 212 213 start_all() 214 215 one.wait_for_unit("elasticsearch.service") 216 one.wait_for_open_port(9200) 217 218 # Continue as long as the status is not "red". The status is probably 219 # "yellow" instead of "green" because we are using a single elasticsearch 220 # node which elasticsearch considers risky. 221 # 222 # TODO: extend this test with multiple elasticsearch nodes 223 # and see if the status turns "green". 224 one.wait_until_succeeds( 225 "curl --silent --show-error --fail-with-body '${esUrl}/_cluster/health'" 226 + " | jq -es 'if . == [] then null else .[] | .status != \"red\" end'" 227 ) 228 229 with subtest("Perform some simple logstash tests"): 230 one.wait_for_unit("logstash.service") 231 one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers") 232 one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons") 233 234 with subtest("Metricbeat is running"): 235 one.wait_for_unit("metricbeat.service") 236 237 with subtest("Metricbeat metrics arrive in elasticsearch"): 238 one.wait_until_succeeds(has_metricbeat()) 239 240 with subtest("Logstash messages arive in elasticsearch"): 241 one.wait_until_succeeds(expect_hits("flowers")) 242 one.wait_until_succeeds(expect_no_hits("dragons")) 243 244 '' 245 + lib.optionalString (elk ? journalbeat) '' 246 with subtest( 247 "A message logged to the journal is ingested by elasticsearch via journalbeat" 248 ): 249 one.wait_for_unit("journalbeat.service") 250 one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat") 251 one.wait_until_succeeds( 252 expect_hits("Supercalifragilisticexpialidocious") 253 ) 254 '' 255 + lib.optionalString (elk ? filebeat) '' 256 with subtest( 257 "A message logged to the journal is ingested by elasticsearch via filebeat" 258 ): 259 one.wait_for_unit("filebeat.service") 260 one.execute("echo 'Superdupercalifragilisticexpialidocious' | systemd-cat") 261 one.wait_until_succeeds( 262 expect_hits("Superdupercalifragilisticexpialidocious") 263 ) 264 one.execute( 265 "echo 'SuperdupercalifragilisticexpialidociousIndeed' >> /var/lib/filebeat/test" 266 ) 267 one.wait_until_succeeds( 268 expect_hits("SuperdupercalifragilisticexpialidociousIndeed") 269 ) 270 '' 271 + lib.optionalString (elk ? elasticsearch-curator) '' 272 with subtest("Elasticsearch-curator works"): 273 one.systemctl("stop logstash") 274 one.systemctl("start elasticsearch-curator") 275 one.wait_until_succeeds( 276 '! curl --silent --show-error --fail-with-body "${esUrl}/_cat/indices" | grep logstash | grep ^' 277 ) 278 ''; 279 }) { inherit pkgs system; }; 280in 281{ 282 # We currently only package upstream binaries. 283 # Feel free to package an SSPL licensed source-based package! 284 # ELK-7 = mkElkTest "elk-7-oss" { 285 # name = "elk-7"; 286 # elasticsearch = pkgs.elasticsearch7-oss; 287 # logstash = pkgs.logstash7-oss; 288 # filebeat = pkgs.filebeat7; 289 # metricbeat = pkgs.metricbeat7; 290 # }; 291 unfree = lib.dontRecurseIntoAttrs { 292 ELK-7 = mkElkTest "elk-7" { 293 elasticsearch = pkgs.elasticsearch7; 294 logstash = pkgs.logstash7; 295 filebeat = pkgs.filebeat7; 296 metricbeat = pkgs.metricbeat7; 297 }; 298 }; 299}