variant: fcos version: 1.5.0 passwd: users: - name: core ssh_authorized_keys: %{ for _, ssh_key in ssh_keys ~} - ${ssh_key} %{ endfor ~} storage: directories: # Config dir - path: /var/home/core/.config user: name: core group: name: core %{ for _, path in config_dirs ~} - path: /var/home/core/.config/${path} user: name: core group: name: core %{ endfor ~} # Systemd user dir - path: /var/home/core/.config/systemd user: name: core group: name: core - path: /var/home/core/.config/systemd/user user: name: core group: name: core # For local binaries - path: /var/home/core/.local/bin user: name: core group: name: core # Backup snapshot mounting points - path: /var/mnt/snapshots/backblaze - path: /var/mnt/snapshots/storj links: # Enable Podman socket for Traefik - path: /var/home/core/.config/systemd/user/timers.target.wants/podman.socket target: /usr/lib/systemd/user/podman.socket user: name: core group: name: core # Enable http and https sockets for traefik - path: /var/home/core/.config/systemd/user/timers.target.wants/http.socket target: /var/home/core/.config/systemd/user/http.socket user: name: core group: name: core - path: /var/home/core/.config/systemd/user/timers.target.wants/https.socket target: /var/home/core/.config/systemd/user/https.socket user: name: core group: name: core - path: /var/home/core/.config/systemd/user/timers.target.wants/imaps.socket target: /var/home/core/.config/systemd/user/imaps.socket user: name: core group: name: core - path: /var/home/core/.config/systemd/user/timers.target.wants/smtps.socket target: /var/home/core/.config/systemd/user/smtps.socket user: name: core group: name: core - path: /var/home/core/.config/systemd/user/timers.target.wants/ldaps.socket target: /var/home/core/.config/systemd/user/ldaps.socket user: name: core group: name: core # Enable Podman auto updates - path: /var/home/core/.config/systemd/user/timers.target.wants/podman-auto-update.timer target: /usr/lib/systemd/user/podman-auto-update.timer user: name: core group: name: core # Hometown timezone - path: /etc/localtime target: ../usr/share/zoneinfo/Europe/Belgrade # Enable systemd_exporter - path: /var/home/core/.config/systemd/user/default.target.wants/systemd-exporter.service target: /var/home/core/.config/systemd/user/systemd-exporter.service user: name: core group: name: core # Enable node_exporter - path: /var/home/core/.config/systemd/user/default.target.wants/node-exporter.service target: /var/home/core/.config/systemd/user/node-exporter.service user: name: core group: name: core files: # http and https sockets for traefik - path: /var/home/core/.config/systemd/user/http.socket contents: inline: | [Socket] ListenStream=${ip}:8080 FileDescriptorName=web Service=traefik.service [Install] WantedBy=sockets.target user: name: core group: name: core - path: /var/home/core/.config/systemd/user/https.socket contents: inline: | [Socket] ListenStream=${ip}:8443 ListenDatagram=${ip}:8443 FileDescriptorName=websecure Service=traefik.service [Install] WantedBy=sockets.target user: name: core group: name: core - path: /var/home/core/.config/systemd/user/imaps.socket contents: inline: | [Socket] ListenStream=${ip}:8993 FileDescriptorName=imaps Service=traefik.service [Install] WantedBy=sockets.target user: name: core group: name: core - path: /var/home/core/.config/systemd/user/smtps.socket contents: inline: | [Socket] ListenStream=${ip}:8465 FileDescriptorName=smtps Service=traefik.service [Install] WantedBy=sockets.target user: name: core group: name: core - path: /var/home/core/.config/systemd/user/ldaps.socket contents: inline: | [Socket] ListenStream=${ip}:8636 FileDescriptorName=ldaps Service=traefik.service [Install] WantedBy=sockets.target user: name: core group: name: core - path: /etc/containers/storage.conf contents: inline: | [storage] driver = "overlay" rootless_storage_path = "/var/mnt/docker/$USER" # Configs block %{ for name, content in config_files ~} - path: /var/home/core/.config/${name} contents: inline: | ${indent(10, content)} user: name: core group: name: core %{ endfor ~} # Enable linger so containers can continue to run even after core user logouts - path: /var/lib/systemd/linger/core # Set machine hostname - path: /etc/hostname contents: inline: ${hostname} # Configure iSCSI target - path: /etc/iscsi/iscsid.conf overwrite: true contents: inline: | node.startup = automatic isns.address = ${truenas_ip} isns.port = 3260 # Import Step CA root certificate - path: /etc/pki/ca-trust/source/anchors/step-online-ca.pem contents: inline: | ${indent(10, root_ca)} # Enable zram swap - path: /etc/systemd/zram-generator.conf contents: inline: | [zram0] - path: /etc/sysctl.d/90-bbr.conf contents: inline: | net.core.default_qdisc = cake net.ipv4.tcp_congestion_control = bbr - path: /etc/sysctl.d/90-inotify.conf contents: inline: | fs.inotify.max_user_instances = 1024 - path: /etc/ssh/sshd_config.d/20-increase-max-auth-tries.conf mode: 0644 contents: inline: | # I have a lot of SSH keys, unfortunately Terraform hasn't "IdentitiesOnly yes" alternative, so it just tries # all keys on machine, reaching the limit. MaxAuthTries 10 # Firewall configuration - path: /etc/sysconfig/nftables.conf overwrite: true mode: 0644 contents: inline: | include "/etc/nftables/main.nft" - path: /etc/nftables/main.nft overwrite: true mode: 0644 contents: inline: | #!/usr/sbin/nft -f flush ruleset table inet filter { chain input { type filter hook input priority filter; policy drop; # allow established/related connections ct state {established, related} accept # early drop of invalid connections ct state invalid drop # allow from loopback iifname lo accept # allow icmp ip protocol icmp accept ip6 nexthdr icmpv6 counter accept # allow ssh tcp dport ssh accept tcp dport 2222 accept # allow http and https tcp dport 8080 accept tcp dport 8443 accept udp dport 8443 accept tcp dport 9443 accept udp dport 9443 accept # allow imaps, smtps and ldaps tcp dport 8993 accept tcp dport 8465 accept tcp dport 8636 accept # allow livekit udp dport 59000-60000 accept udp dport 3478 accept tcp dport 7881 accept # allow plex udp dport { 32410, 32412, 32413, 32414 } accept # allow minecraft bds udp dport 19132 accept # allow truenas to report metrics using graphite ip saddr 192.168.100.3 tcp dport 2003 accept ip saddr 192.168.100.3 udp dport 2003 accept # allow plugs to access mqtt broker ip saddr 192.168.100.1 tcp dport 1883 accept } chain forward { type filter hook forward priority filter; policy drop; ct state {established, related} accept ct state invalid drop ct status dnat accept } chain output { type filter hook output priority 0; policy accept; } } # download systemd_exporter and create systemd user service - path: /var/home/core/.local/bin/systemd_exporter.tar.gz contents: source: https://github.com/prometheus-community/systemd_exporter/releases/download/v0.7.0/systemd_exporter-0.7.0.linux-amd64.tar.gz verification: hash: sha256-2d995ca20249aeeac8f507173176ce5d162f17470a98ca66e289c85b388480c3 user: name: core group: name: core - path: /var/home/core/.config/systemd/user/systemd-exporter.service contents: inline: | [Unit] Description=Systemd Exporter [Service] Type=simple WorkingDirectory=/var/home/core/.local/bin ExecStartPre=/usr/bin/tar -xvf systemd_exporter.tar.gz systemd_exporter-0.7.0.linux-amd64/systemd_exporter --strip-components=1 ExecStart=/var/home/core/.local/bin/systemd_exporter [Install] WantedBy=default.target user: name: core group: name: core # download node_exporter and create systemd user service - path: /var/home/core/.local/bin/node_exporter.tar.gz contents: source: https://github.com/prometheus/node_exporter/releases/download/v1.10.2/node_exporter-1.10.2.linux-amd64.tar.gz verification: hash: sha256-c46e5b6f53948477ff3a19d97c58307394a29fe64a01905646f026ddc32cb65b user: name: core group: name: core - path: /var/home/core/.config/systemd/user/node-exporter.service contents: inline: | [Unit] Description=Node Exporter [Service] Type=simple WorkingDirectory=/var/home/core/.local/bin ExecStartPre=/usr/bin/tar -xvf node_exporter.tar.gz node_exporter-1.10.2.linux-amd64/node_exporter --strip-components=1 ExecStart=/var/home/core/.local/bin/node_exporter --collector.systemd --collector.processes [Install] WantedBy=default.target systemd: units: - name: install-additional-software.service enabled: true contents: | [Unit] Description=Additional software installer Wants=network-online.target After=network-online.target Before=zincati.service ConditionPathExists=!/var/lib/%N.stamp [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/bin/rpm-ostree install --allow-inactive --assumeyes --reboot qemu-guest-agent restic btop nvtop ExecStart=/bin/touch /var/lib/%N.stamp [Install] WantedBy=multi-user.target - name: iscsi.service enabled: true - name: attach-iscsi-disk.service enabled: true contents: | [Unit] Description=Attach iSCSI disk ConditionFirstBoot=yes Wants=network-online.target After=network-online.target iscsi.service [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/sbin/iscsiadm -m discovery -t sendtargets -p ${truenas_ip} ExecStart=/usr/sbin/iscsiadm -m node -T ${truenas_iqn} -p ${truenas_ip} --login ExecStart=/usr/sbin/lvmdevices --adddev /dev/sda ExecStart=/usr/sbin/vgchange -ay [Install] WantedBy=multi-user.target - name: var-mnt-docker.mount enabled: true contents: | [Unit] Description=Mount docker directory Before=remote-fs.target Wants=network-online.target iscsi.service [Mount] What=/dev/vg0/lv0 Where=/var/mnt/docker Type=xfs Options=_netdev [Install] WantedBy=remote-fs.target - name: podman-fix-selinux-context.service enabled: true contents: | [Unit] Description=Fix SELinux context for Podman storage Requires=var-mnt-docker.mount After=var-mnt-docker.mount ConditionPathExists=!/var/lib/%N.stamp [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/bin/chcon -R -t container_file_t /var/mnt/docker/core ExecStart=/bin/touch /var/lib/%N.stamp [Install] WantedBy=multi-user.target - name: var-mnt-observability.mount enabled: true contents: | [Unit] Description=Mount observability directory Before=remote-fs.target [Mount] What=${truenas_ip}:/mnt/spool/observability Where=/var/mnt/observability Type=nfs [Install] WantedBy=remote-fs.target - name: var-mnt-media.mount enabled: true contents: | [Unit] Description=Mount media directory Before=remote-fs.target [Mount] What=${truenas_ip}:/mnt/spool/media Where=/var/mnt/media Type=nfs [Install] WantedBy=remote-fs.target - name: var-mnt-personal.mount enabled: true contents: | [Unit] Description=Mount personal directory Before=remote-fs.target [Mount] What=${truenas_ip}:/mnt/spool/personal Where=/var/mnt/personal Type=nfs [Install] WantedBy=remote-fs.target - name: nftables.service enabled: true - name: restic-backblaze.service contents: | [Unit] Description=Backup Docker data to Backblaze Wants=network-online.target After=network-online.target iscsi.service [Service] Type=oneshot LoadCredential=restic-b2-account-id LoadCredential=restic-b2-account-key LoadCredential=restic-password Environment=RESTIC_PASSWORD_FILE=%d/restic-password # Create LVM snapshot and do backup ExecStart=/bin/bash -c "lvremove -y vg0/restic-backblaze || true" ExecStart=/usr/sbin/lvcreate --size 1G --snapshot --name restic-backblaze vg0/lv0 ExecStart=/usr/bin/mount -o nouuid /dev/vg0/restic-backblaze /mnt/snapshots/backblaze ExecStart=/bin/bash -c "export B2_ACCOUNT_ID=$(cat $CREDENTIALS_DIRECTORY/restic-b2-account-id); export B2_ACCOUNT_KEY=$(cat $CREDENTIALS_DIRECTORY/restic-b2-account-key); restic -r b2:krasovsky-homelab:app-data --verbose backup /var/mnt/docker/app_data" ExecStart=/usr/bin/umount /mnt/snapshots/backblaze ExecStart=/usr/sbin/lvremove -y vg0/restic-backblaze [Install] WantedBy=multi-user.target - name: restic-backblaze.timer enabled: true contents: | [Unit] Description=Daily backup to Backblaze [Timer] OnCalendar=daily RandomizedDelaySec=3600 Unit=restic-backblaze.service [Install] WantedBy=timers.target - name: restic-storj.service contents: | [Unit] Description=Backup Docker data to Storj Wants=network-online.target After=network-online.target iscsi.service [Service] Type=oneshot LoadCredential=restic-aws-access-key-id LoadCredential=restic-aws-secret-access-key LoadCredential=restic-password Environment=RESTIC_PASSWORD_FILE=%d/restic-password # Create LVM snapshot and do backup ExecStart=/bin/bash -c "lvremove -y vg0/restic-storj || true" ExecStart=/usr/sbin/lvcreate --size 1G --snapshot --name restic-storj vg0/lv0 ExecStart=/usr/bin/mount -o nouuid /dev/vg0/restic-storj /mnt/snapshots/storj ExecStart=/bin/bash -c "export AWS_ACCESS_KEY_ID=$(cat $CREDENTIALS_DIRECTORY/restic-aws-access-key-id); export AWS_SECRET_ACCESS_KEY=$(cat $CREDENTIALS_DIRECTORY/restic-aws-secret-access-key); restic -r s3:https://gateway.eu1.storjshare.io/homelab-backup/app-data --verbose backup /var/mnt/docker/app_data" ExecStart=/usr/bin/umount /mnt/snapshots/storj ExecStart=/usr/sbin/lvremove -y vg0/restic-storj [Install] WantedBy=multi-user.target - name: restic-storj.timer enabled: true contents: | [Unit] Description=Daily backup to Storj [Timer] OnCalendar=daily RandomizedDelaySec=3600 Unit=restic-storj.service [Install] WantedBy=timers.target kernel_arguments: should_exist: - pcie_aspm.policy=powersupersave ifname=infra:${mac_address} ip=${ip}::${gateway}:${mask}:${hostname}:infra:none:${nameserver}