treewide: replace `<command> | systemd-cat` with `systemd-cat <command>`

The former swallows exit codes, the latter doesn't.

K900 9843bbbe 223c8a6e

+1 -1
nixos/tests/castopod.nix
···
castopod.succeed("curl -s http://localhost/cp-install | grep 'Create your Super Admin account' > /dev/null")
with subtest("Create superadmin and log in"):
-
castopod.succeed("PYTHONUNBUFFERED=1 test-runner | systemd-cat -t test-runner")
+
castopod.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner")
'';
})
+12 -12
nixos/tests/hadoop/hadoop.nix
···
nn2.succeed("systemctl stop hdfs-zkfc")
# Initialize zookeeper for failover controller
-
nn1.succeed("sudo -u hdfs hdfs zkfc -formatZK 2>&1 | systemd-cat")
+
nn1.succeed("sudo -u hdfs systemd-cat hdfs zkfc -formatZK")
# Format NN1 and start it
-
nn1.succeed("sudo -u hdfs hadoop namenode -format 2>&1 | systemd-cat")
+
nn1.succeed("sudo -u hdfs systemd-cat hadoop namenode -format")
nn1.succeed("systemctl start hdfs-namenode")
nn1.wait_for_open_port(9870)
nn1.wait_for_open_port(8022)
nn1.wait_for_open_port(8020)
# Bootstrap NN2 from NN1 and start it
-
nn2.succeed("sudo -u hdfs hdfs namenode -bootstrapStandby 2>&1 | systemd-cat")
+
nn2.succeed("sudo -u hdfs systemd-cat hdfs namenode -bootstrapStandby")
nn2.succeed("systemctl start hdfs-namenode")
nn2.wait_for_open_port(9870)
nn2.wait_for_open_port(8022)
nn2.wait_for_open_port(8020)
-
nn1.succeed("netstat -tulpne | systemd-cat")
+
nn1.succeed("systemd-cat netstat -tulpne")
# Start failover controllers
nn1.succeed("systemctl start hdfs-zkfc")
···
# DN should have started by now, but confirm anyway
dn1.wait_for_unit("hdfs-datanode")
# Print states of namenodes
-
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+
client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
# Wait for cluster to exit safemode
client.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
-
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+
client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
# test R/W
client.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
···
# Test NN failover
nn1.succeed("systemctl stop hdfs-namenode")
assert "active" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+
client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
nn1.succeed("systemctl start hdfs-namenode")
···
nn1.wait_for_open_port(8022)
nn1.wait_for_open_port(8020)
assert "standby" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+
client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
#### YARN tests ####
···
nm1.wait_for_open_port(8042)
nm1.wait_for_open_port(8040)
client.wait_until_succeeds("yarn node -list | grep Nodes:1")
-
client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
-
client.succeed("sudo -u yarn yarn node -list | systemd-cat")
+
client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
+
client.succeed("sudo -u yarn systemd-cat yarn node -list")
# Test RM failover
rm1.succeed("systemctl stop yarn-resourcemanager")
assert "standby" not in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-
client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+
client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
rm1.succeed("systemctl start yarn-resourcemanager")
rm1.wait_for_unit("yarn-resourcemanager")
rm1.wait_for_open_port(8088)
assert "standby" in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-
client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+
client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
assert "Estimated value of Pi is" in client.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
assert "SUCCEEDED" in client.succeed("yarn application -list -appStates FINISHED")
+2 -2
nixos/tests/hadoop/hdfs.nix
···
namenode.wait_for_unit("hdfs-namenode")
namenode.wait_for_unit("network.target")
namenode.wait_for_open_port(8020)
-
namenode.succeed("ss -tulpne | systemd-cat")
-
namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
+
namenode.succeed("systemd-cat ss -tulpne")
+
namenode.succeed("systemd-cat cat /etc/hadoop*/hdfs-site.xml")
namenode.wait_for_open_port(9870)
datanode.wait_for_unit("hdfs-datanode")
+2 -2
nixos/tests/iscsi-multipath-root.nix
···
initiatorAuto.succeed("umount /mnt")
initiatorAuto.succeed("systemctl restart multipathd")
-
initiatorAuto.succeed("multipath -ll | systemd-cat")
+
initiatorAuto.succeed("systemd-cat multipath -ll")
# Install our RootDisk machine to 123456, the alias to the device that multipath is now managing
initiatorAuto.succeed("mount /dev/mapper/123456 /mnt")
···
initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login")
initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login")
initiatorRootDisk.succeed("systemctl restart multipathd")
-
initiatorRootDisk.succeed("multipath -ll | systemd-cat")
+
initiatorRootDisk.succeed("systemd-cat multipath -ll")
# Verify we can write and sync the root disk
initiatorRootDisk.succeed("mkdir /scratch")
+1 -1
nixos/tests/vaultwarden.nix
···
)
with subtest("use the web interface to sign up, log in, and save a password"):
-
server.succeed("PYTHONUNBUFFERED=1 test-runner | systemd-cat -t test-runner")
+
server.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner")
with subtest("log in with the cli"):
key = client.succeed(