my nix configs for my servers and desktop

Compare changes

Choose any two refs to compare.

-10
README.md
···
- Vaultwarden password manager
- Beszel
-
### 🥧 Morax (Raspberry Pi 4)
-
**Hardware**: Raspberry Pi 4
-
-
**Services**:
-
- Pi-hole DNS filtering
-
- Speedtest monitoring (every 10 minutes)
-
- Headscale connection
-
-
**Notes**: Direct gigabit connection from router - looking to add more services to utilize bandwidth
-
### 🍎 Gabriel
**Hardware**: M4 16gb Mac Mini
+8
common/bluetooth.nix
···
+
{ config, lib ,... }:
+
+
{
+
hardware.bluetooth.enable = true;
+
hardware.bluetooth.powerOnBoot = true;
+
+
services.blueman.enable = true;
+
}
+16 -2
common/desktop/core.nix
···
#ghostty
kitty
vscode
-
inputs.zen-browser.packages."${system}".default
fastfetch
hyfetch
sway-contrib.grimshot
-
discord
pamixer
+
+
firefox
+
chromium
+
kpcli
+
eyedropper
+
krita
+
xfce.thunar
+
libreoffice
+
signal-desktop
+
haruna
+
+
inputs.zen-browser.packages."${system}".default
+
];
+
+
fonts.packages = [
+
pkgs.nerd-fonts.fira-code
];
environment.sessionVariables.NIXOS_OZONE_WL = "1";
+3
common/desktop/sway.nix
···
mako
];
+
services.dbus.enable = true;
+
programs.sway = {
enable = true;
wrapperFeatures.gtk = true;
+
package = pkgs.swayfx;
};
environment.sessionVariables = {
+1 -1
common/nvidia.nix
···
# supported GPUs is at:
# https://github.com/NVIDIA/open-gpu-kernel-modules#compatible-gpus
# Only available from driver 515.43.04+
-
open = false;
+
open = true;
# Enable the Nvidia settings menu,
# accessible via `nvidia-settings`.
+67
common/python-cuda-dev.nix
···
+
{
+
description = "A Nix-flake-based PyTorch development environment";
+
+
# CUDA binaries are cached by the community.
+
nixConfig = {
+
extra-substituters = [
+
"https://nix-community.cachix.org"
+
];
+
extra-trusted-public-keys = [
+
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
+
];
+
};
+
+
inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.*.tar.gz";
+
+
outputs = {
+
self,
+
nixpkgs,
+
}: let
+
supportedSystems = ["x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin"];
+
forEachSupportedSystem = f:
+
nixpkgs.lib.genAttrs supportedSystems (system:
+
f {
+
pkgs = import nixpkgs {
+
inherit system;
+
config.allowUnfree = true;
+
};
+
});
+
in {
+
devShells = forEachSupportedSystem ({pkgs}: let
+
libs = [
+
# PyTorch and Numpy depends on the following libraries.
+
pkgs.cudaPackages.cudatoolkit
+
pkgs.cudaPackages.cudnn
+
pkgs.stdenv.cc.cc.lib
+
pkgs.zlib
+
+
# PyTorch also needs to know where your local "lib/libcuda.so" lives.
+
# If you're not on NixOS, you should provide the right path (likely
+
# another one).
+
"/run/opengl-driver"
+
];
+
in {
+
default = pkgs.mkShell {
+
packages = [
+
pkgs.python312
+
pkgs.python312Packages.venvShellHook
+
];
+
+
env = {
+
CC = "${pkgs.gcc}/bin/gcc"; # For `torch.compile`.
+
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath libs;
+
};
+
+
venvDir = ".venv";
+
postVenvCreation = ''
+
# This is run only when creating the virtual environment.
+
pip install torch==2.5.1 numpy==2.2.2
+
'';
+
postShellHook = ''
+
# This is run every time you enter the devShell.
+
python3 -c "import torch; print('CUDA available' if torch.cuda.is_available() else 'CPU only')"
+
'';
+
};
+
});
+
};
+
}
+8 -1
common/services.nix
···
-
{ config, pkgs, ... }:
+
{ config, pkgs, lib, ... }:
{
# system packages + services
environment.systemPackages = with pkgs; [
···
zfs
nixos-generators
sqlite
+
bun
+
unzip
];
services.openssh.enable = true;
services.printing.enable = true;
services.tailscale.enable = true;
services.tailscale.useRoutingFeatures = "both";
+
services.tailscale.authKeyFile = lib.mkIf (config ? age && config.age ? secrets)
+
config.age.secrets."headscale-authkey".path;
+
services.tailscale.extraUpFlags = [
+
"--login-server=https://headscale.nekomimi.pet"
+
];
}
+253 -45
flake.lock
···
"systems": "systems"
},
"locked": {
-
"lastModified": 1747575206,
-
"narHash": "sha256-NwmAFuDUO/PFcgaGGr4j3ozG9Pe5hZ/ogitWhY+D81k=",
+
"lastModified": 1760836749,
+
"narHash": "sha256-wyT7Pl6tMFbFrs8Lk/TlEs81N6L+VSybPfiIgzU8lbQ=",
"owner": "ryantm",
"repo": "agenix",
-
"rev": "4835b1dc898959d8547a871ef484930675cb47f1",
+
"rev": "2f0f812f69f3eb4140157fe15e12739adf82e32a",
"type": "github"
},
"original": {
···
"nixpkgs": "nixpkgs_2"
},
"locked": {
-
"lastModified": 1748080874,
-
"narHash": "sha256-sUebEzAkrY8Aq5G0GHFyRddmRNGP/a2iTtV7ISNvi/c=",
+
"lastModified": 1760953099,
+
"narHash": "sha256-sOKx2YcHa+lWEvaEOIGqLN2WWk1Wf5z6KM02tdfhMtw=",
"owner": "catppuccin",
"repo": "nix",
-
"rev": "0ba11b12be81f0849a89ed17ab635164ea8f0112",
+
"rev": "f5b21876888265d2fee7fb0640d1b66a1c1c6503",
"type": "github"
},
"original": {
···
"type": "github"
}
},
+
"disko": {
+
"inputs": {
+
"nixpkgs": "nixpkgs_3"
+
},
+
"locked": {
+
"lastModified": 1736864502,
+
"narHash": "sha256-ItkIZyebGvNH2dK9jVGzJHGPtb6BSWLN8Gmef16NeY0=",
+
"owner": "nix-community",
+
"repo": "disko",
+
"rev": "0141aabed359f063de7413f80d906e1d98c0c123",
+
"type": "github"
+
},
+
"original": {
+
"owner": "nix-community",
+
"ref": "v1.11.0",
+
"repo": "disko",
+
"type": "github"
+
}
+
},
+
"flake-compat": {
+
"locked": {
+
"lastModified": 1696426674,
+
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
+
"owner": "edolstra",
+
"repo": "flake-compat",
+
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
+
"type": "github"
+
},
+
"original": {
+
"owner": "edolstra",
+
"repo": "flake-compat",
+
"type": "github"
+
}
+
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
···
"systems": "systems_3"
},
"locked": {
+
"lastModified": 1731533236,
+
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
+
"owner": "numtide",
+
"repo": "flake-utils",
+
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
+
"type": "github"
+
},
+
"original": {
+
"owner": "numtide",
+
"repo": "flake-utils",
+
"type": "github"
+
}
+
},
+
"flake-utils_3": {
+
"inputs": {
+
"systems": "systems_5"
+
},
+
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
···
]
},
"locked": {
-
"lastModified": 1748737919,
-
"narHash": "sha256-5kvBbLYdp+n7Ftanjcs6Nv+UO6sBhelp6MIGJ9nWmjQ=",
+
"lastModified": 1761235135,
+
"narHash": "sha256-cux9xeceLIER1lBxUa1gMafkz7gg5ntcUmJBynWdBWI=",
"owner": "nix-community",
"repo": "home-manager",
-
"rev": "5675a9686851d9626560052a032c4e14e533c1fa",
+
"rev": "0adf9ba3f567da2d53af581a857aacf671aaa547",
"type": "github"
},
"original": {
···
]
},
"locked": {
-
"lastModified": 1743604125,
-
"narHash": "sha256-ZD61DNbsBt1mQbinAaaEqKaJk2RFo9R/j+eYWeGMx7A=",
+
"lastModified": 1752603129,
+
"narHash": "sha256-S+wmHhwNQ5Ru689L2Gu8n1OD6s9eU9n9mD827JNR+kw=",
"owner": "nix-community",
"repo": "home-manager",
-
"rev": "180fd43eea296e62ae68e079fcf56aba268b9a1a",
+
"rev": "e8c19a3cec2814c754f031ab3ae7316b64da085b",
"type": "github"
},
"original": {
···
"lix": {
"flake": false,
"locked": {
-
"lastModified": 1746827285,
-
"narHash": "sha256-hsFe4Tsqqg4l+FfQWphDtjC79WzNCZbEFhHI8j2KJzw=",
-
"rev": "47aad376c87e2e65967f17099277428e4b3f8e5a",
+
"lastModified": 1753223229,
+
"narHash": "sha256-tkT4aCZZE6IEmjYotOzKKa2rV3pGpH3ZREeQn7ACgdU=",
+
"rev": "7ac20fc47cf2f1b7469c7a2f379e5a3a51a6789a",
"type": "tarball",
-
"url": "https://git.lix.systems/api/v1/repos/lix-project/lix/archive/47aad376c87e2e65967f17099277428e4b3f8e5a.tar.gz?rev=47aad376c87e2e65967f17099277428e4b3f8e5a"
+
"url": "https://git.lix.systems/api/v1/repos/lix-project/lix/archive/7ac20fc47cf2f1b7469c7a2f379e5a3a51a6789a.tar.gz?rev=7ac20fc47cf2f1b7469c7a2f379e5a3a51a6789a"
},
"original": {
"type": "tarball",
-
"url": "https://git.lix.systems/lix-project/lix/archive/2.93.0.tar.gz"
+
"url": "https://git.lix.systems/lix-project/lix/archive/release-2.93.tar.gz"
}
},
"lix-module": {
···
]
},
"locked": {
-
"lastModified": 1746838955,
-
"narHash": "sha256-11R4K3iAx4tLXjUs+hQ5K90JwDABD/XHhsM9nkeS5N8=",
-
"rev": "cd2a9c028df820a83ca2807dc6c6e7abc3dfa7fc",
+
"lastModified": 1753282722,
+
"narHash": "sha256-KYMUrTV7H/RR5/HRnjV5R3rRIuBXMemyJzTLi50NFTs=",
+
"rev": "46a9e8fcfe4be72b4c7c8082ee11d2c42da1e873",
"type": "tarball",
-
"url": "https://git.lix.systems/api/v1/repos/lix-project/nixos-module/archive/cd2a9c028df820a83ca2807dc6c6e7abc3dfa7fc.tar.gz"
+
"url": "https://git.lix.systems/api/v1/repos/lix-project/nixos-module/archive/46a9e8fcfe4be72b4c7c8082ee11d2c42da1e873.tar.gz?rev=46a9e8fcfe4be72b4c7c8082ee11d2c42da1e873"
},
"original": {
"type": "tarball",
-
"url": "https://git.lix.systems/lix-project/nixos-module/archive/2.93.0.tar.gz"
+
"url": "https://git.lix.systems/lix-project/nixos-module/archive/2.93.3-1.tar.gz"
+
}
+
},
+
"microvm": {
+
"inputs": {
+
"flake-utils": "flake-utils_2",
+
"nixpkgs": [
+
"nixpkgs"
+
],
+
"spectrum": "spectrum"
+
},
+
"locked": {
+
"lastModified": 1760574296,
+
"narHash": "sha256-S3gIp6Wd9vQ2RYDxcbHM2CIYgDtogbwzSdu38WABKaQ=",
+
"owner": "astro",
+
"repo": "microvm.nix",
+
"rev": "42628f7c61b02d385ce2cb1f66f9be333ac20140",
+
"type": "github"
+
},
+
"original": {
+
"owner": "astro",
+
"repo": "microvm.nix",
+
"type": "github"
}
},
"nixos-hardware": {
"locked": {
-
"lastModified": 1748942041,
-
"narHash": "sha256-HEu2gTct7nY0tAPRgBtqYepallryBKR1U8B4v2zEEqA=",
+
"lastModified": 1760958188,
+
"narHash": "sha256-2m1S4jl+GEDtlt2QqeHil8Ny456dcGSKJAM7q3j/BFU=",
"owner": "nixos",
"repo": "nixos-hardware",
-
"rev": "fc7c4714125cfaa19b048e8aaf86b9c53e04d853",
+
"rev": "d6645c340ef7d821602fd2cd199e8d1eed10afbc",
"type": "github"
},
"original": {
···
},
"nixpkgs": {
"locked": {
-
"lastModified": 1745391562,
-
"narHash": "sha256-sPwcCYuiEopaafePqlG826tBhctuJsLx/mhKKM5Fmjo=",
+
"lastModified": 1754028485,
+
"narHash": "sha256-IiiXB3BDTi6UqzAZcf2S797hWEPCRZOwyNThJIYhUfk=",
"owner": "NixOS",
"repo": "nixpkgs",
-
"rev": "8a2f738d9d1f1d986b5a4cd2fd2061a7127237d7",
+
"rev": "59e69648d345d6e8fef86158c555730fa12af9de",
"type": "github"
},
"original": {
"owner": "NixOS",
-
"ref": "nixos-unstable",
+
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
+
"nixpkgs-stable": {
+
"locked": {
+
"lastModified": 1748437600,
+
"narHash": "sha256-hYKMs3ilp09anGO7xzfGs3JqEgUqFMnZ8GMAqI6/k04=",
+
"owner": "NixOS",
+
"repo": "nixpkgs",
+
"rev": "7282cb574e0607e65224d33be8241eae7cfe0979",
+
"type": "github"
+
},
+
"original": {
+
"id": "nixpkgs",
+
"ref": "nixos-25.05",
+
"type": "indirect"
+
}
+
},
+
"nixpkgs-unstable": {
+
"locked": {
+
"lastModified": 1723637854,
+
"narHash": "sha256-med8+5DSWa2UnOqtdICndjDAEjxr5D7zaIiK4pn0Q7c=",
+
"owner": "NixOS",
+
"repo": "nixpkgs",
+
"rev": "c3aa7b8938b17aebd2deecf7be0636000d62a2b9",
+
"type": "github"
+
},
+
"original": {
+
"id": "nixpkgs",
+
"ref": "nixos-unstable",
+
"type": "indirect"
+
}
+
},
"nixpkgs_2": {
"locked": {
-
"lastModified": 1744463964,
-
"narHash": "sha256-LWqduOgLHCFxiTNYi3Uj5Lgz0SR+Xhw3kr/3Xd0GPTM=",
+
"lastModified": 1760524057,
+
"narHash": "sha256-EVAqOteLBFmd7pKkb0+FIUyzTF61VKi7YmvP1tw4nEw=",
"owner": "NixOS",
"repo": "nixpkgs",
-
"rev": "2631b0b7abcea6e640ce31cd78ea58910d31e650",
+
"rev": "544961dfcce86422ba200ed9a0b00dd4b1486ec5",
"type": "github"
},
"original": {
···
},
"nixpkgs_3": {
"locked": {
-
"lastModified": 1748162331,
-
"narHash": "sha256-rqc2RKYTxP3tbjA+PB3VMRQNnjesrT0pEofXQTrMsS8=",
+
"lastModified": 1736241350,
+
"narHash": "sha256-CHd7yhaDigUuJyDeX0SADbTM9FXfiWaeNyY34FL1wQU=",
+
"owner": "NixOS",
+
"repo": "nixpkgs",
+
"rev": "8c9fd3e564728e90829ee7dbac6edc972971cd0f",
+
"type": "github"
+
},
+
"original": {
+
"owner": "NixOS",
+
"ref": "nixpkgs-unstable",
+
"repo": "nixpkgs",
+
"type": "github"
+
}
+
},
+
"nixpkgs_4": {
+
"locked": {
+
"lastModified": 1761016216,
+
"narHash": "sha256-G/iC4t/9j/52i/nm+0/4ybBmAF4hzR8CNHC75qEhjHo=",
"owner": "nixos",
"repo": "nixpkgs",
-
"rev": "7c43f080a7f28b2774f3b3f43234ca11661bf334",
+
"rev": "481cf557888e05d3128a76f14c76397b7d7cc869",
"type": "github"
},
"original": {
···
"type": "github"
}
},
-
"nixpkgs_4": {
+
"nixpkgs_5": {
"locked": {
"lastModified": 1682134069,
"narHash": "sha256-TnI/ZXSmRxQDt2sjRYK/8j8iha4B4zP2cnQCZZ3vp7k=",
···
"type": "indirect"
}
},
+
"proxmox-nixos": {
+
"inputs": {
+
"flake-compat": "flake-compat",
+
"nixpkgs-stable": "nixpkgs-stable",
+
"nixpkgs-unstable": "nixpkgs-unstable",
+
"utils": "utils"
+
},
+
"locked": {
+
"lastModified": 1758650077,
+
"narHash": "sha256-ZeRtJimtk0Faiq7DPZEQNGipda3TaR4QXp0TAzu934Q=",
+
"owner": "SaumonNet",
+
"repo": "proxmox-nixos",
+
"rev": "ce8768f43b4374287cd8b88d8fa9c0061e749d9a",
+
"type": "github"
+
},
+
"original": {
+
"owner": "SaumonNet",
+
"repo": "proxmox-nixos",
+
"type": "github"
+
}
+
},
"root": {
"inputs": {
"agenix": "agenix",
"catppuccin": "catppuccin",
+
"disko": "disko",
"home-manager": "home-manager_2",
"lix-module": "lix-module",
+
"microvm": "microvm",
"nixos-hardware": "nixos-hardware",
-
"nixpkgs": "nixpkgs_3",
+
"nixpkgs": "nixpkgs_4",
+
"proxmox-nixos": "proxmox-nixos",
"vscode-server": "vscode-server",
"zen-browser": "zen-browser"
}
},
+
"spectrum": {
+
"flake": false,
+
"locked": {
+
"lastModified": 1759482047,
+
"narHash": "sha256-H1wiXRQHxxPyMMlP39ce3ROKCwI5/tUn36P8x6dFiiQ=",
+
"ref": "refs/heads/main",
+
"rev": "c5d5786d3dc938af0b279c542d1e43bce381b4b9",
+
"revCount": 996,
+
"type": "git",
+
"url": "https://spectrum-os.org/git/spectrum"
+
},
+
"original": {
+
"type": "git",
+
"url": "https://spectrum-os.org/git/spectrum"
+
}
+
},
"systems": {
"locked": {
"lastModified": 1681028828,
···
"type": "github"
}
},
+
"systems_4": {
+
"locked": {
+
"lastModified": 1681028828,
+
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+
"owner": "nix-systems",
+
"repo": "default",
+
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+
"type": "github"
+
},
+
"original": {
+
"owner": "nix-systems",
+
"repo": "default",
+
"type": "github"
+
}
+
},
+
"systems_5": {
+
"locked": {
+
"lastModified": 1681028828,
+
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+
"owner": "nix-systems",
+
"repo": "default",
+
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+
"type": "github"
+
},
+
"original": {
+
"owner": "nix-systems",
+
"repo": "default",
+
"type": "github"
+
}
+
},
+
"utils": {
+
"inputs": {
+
"systems": "systems_4"
+
},
+
"locked": {
+
"lastModified": 1710146030,
+
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
+
"owner": "numtide",
+
"repo": "flake-utils",
+
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
+
"type": "github"
+
},
+
"original": {
+
"owner": "numtide",
+
"repo": "flake-utils",
+
"type": "github"
+
}
+
},
"vscode-server": {
"inputs": {
-
"flake-utils": "flake-utils_2",
-
"nixpkgs": "nixpkgs_4"
+
"flake-utils": "flake-utils_3",
+
"nixpkgs": "nixpkgs_5"
},
"locked": {
-
"lastModified": 1729422940,
-
"narHash": "sha256-DlvJv33ml5UTKgu4b0HauOfFIoDx6QXtbqUF3vWeRCY=",
+
"lastModified": 1753541826,
+
"narHash": "sha256-foGgZu8+bCNIGeuDqQ84jNbmKZpd+JvnrL2WlyU4tuU=",
"owner": "nix-community",
"repo": "nixos-vscode-server",
-
"rev": "8b6db451de46ecf9b4ab3d01ef76e59957ff549f",
+
"rev": "6d5f074e4811d143d44169ba4af09b20ddb6937d",
"type": "github"
},
"original": {
···
]
},
"locked": {
-
"lastModified": 1748742977,
-
"narHash": "sha256-xacIevJ94ZOlNLOcFOAm3PiV7zXZr4G8FD7sJ9jKuz4=",
+
"lastModified": 1761180075,
+
"narHash": "sha256-V4WLeUQ4gCGZiVihlXWBOZ/1FNcL0jM4zgTY1haJLvY=",
"owner": "0xc000022070",
"repo": "zen-browser-flake",
-
"rev": "a9d3d70bc49c513a6e48ab24e279e014ab463bc6",
+
"rev": "771a2604606905d8c0ffe3b818dc2cc5bd1405d8",
"type": "github"
},
"original": {
+36 -34
flake.nix
···
nixpkgs.url = "github:nixos/nixpkgs/nixos-25.05";
nixos-hardware.url = "github:nixos/nixos-hardware/master";
+
proxmox-nixos.url = "github:SaumonNet/proxmox-nixos";
+
lix-module = {
-
url = "https://git.lix.systems/lix-project/nixos-module/archive/2.93.0.tar.gz";
+
url = "https://git.lix.systems/lix-project/nixos-module/archive/2.93.3-1.tar.gz";
inputs.nixpkgs.follows = "nixpkgs";
};
···
url = "github:0xc000022070/zen-browser-flake";
inputs.nixpkgs.follows = "nixpkgs";
};
-
#microvm.url = "github:astro/microvm.nix";
-
#microvm.inputs.nixpkgs.follows = "nixpkgs";
+
microvm.url = "github:astro/microvm.nix";
+
microvm.inputs.nixpkgs.follows = "nixpkgs";
+
+
disko.url = "github:nix-community/disko/v1.11.0";
catppuccin.url = "github:catppuccin/nix";
home-manager = {
···
system = "x86_64-linux";
};
modules = [
+
agenix.nixosModules.default
+
./hosts/focalor
lix-module.nixosModules.default
-
-
/*microvm.nixosModules.host
-
{
-
microvm.autostart = [
-
"windows"
-
];
-
}*/
-
vscode-server.nixosModules.default
-
agenix.nixosModules.default
-
catppuccin.nixosModules.catppuccin
-
home-manager.nixosModules.home-manager
{
home-manager.useGlobalPkgs = true;
···
system = "x86_64-linux";
};
}
+
+
{ imports = builtins.attrValues nixosModules; }
];
};
···
system = "x86_64-linux";
};
modules = [
+
agenix.nixosModules.default
+
./hosts/valefar
lix-module.nixosModules.default
-
vscode-server.nixosModules.default
-
agenix.nixosModules.default
+
+
proxmox-nixos.nixosModules.proxmox-ve
+
+
({ pkgs, lib, ... }: {
+
services.proxmox-ve = {
+
enable = true;
+
ipAddress = "10.0.0.30";
+
};
+
+
nixpkgs.overlays = [
+
proxmox-nixos.overlays.x86_64-linux
+
];
+
})
{ imports = builtins.attrValues nixosModules; }
];
···
./hosts/buer
agenix.nixosModules.default
+
+
{ imports = builtins.attrValues nixosModules; }
];
};
-
morax = nixpkgs.lib.nixosSystem {
+
baal = nixpkgs.lib.nixosSystem {
system = "aarch64-linux";
specialArgs = {
inherit inputs;
system = "aarch64-linux";
};
modules = [
-
./hosts/morax
-
nixos-hardware.nixosModules.raspberry-pi-4
-
+
./hosts/baal
+
agenix.nixosModules.default
+
disko.nixosModules.disko
+
+
{ imports = builtins.attrValues nixosModules; }
+
];
};
-
-
-
# Easy to add more hosts
-
/*
-
server2 = nixpkgs.lib.nixosSystem {
-
system = "x86_64-linux";
-
modules = [
-
./hosts/server2
-
agenix.nixosModules.default
-
# different services for server2
-
];
-
};
-
*/
};
};
-
}
+
}
+76 -24
home/regent/home.nix
···
-
{ config, pkgs, system, inputs, ... }:
+
{ config, pkgs, system, inputs, lib, ... }:
{
home.username = "regent";
···
theme = "catppuccin-mocha";
};
+
programs.neovim.enable = true;
programs.neovim = {
-
enable = true;
-
defaultEditor = true;
-
viAlias = true;
-
vimAlias = true;
-
vimdiffAlias = true;
+
extraPackages = with pkgs; [
+
lua-language-server
+
stylua
+
ripgrep
+
];
+
plugins = with pkgs.vimPlugins; [
-
nvim-lspconfig
-
nvim-treesitter.withAllGrammars
-
plenary-nvim
-
mini-nvim
+
lazy-nvim
];
};
-
home.pointerCursor = {
+
home.pointerCursor = {
gtk.enable = true;
package = pkgs.phinger-cursors;
name = "Phinger-cursors-light";
···
* {
-
font-family: FantasqueSansMono Nerd Font;
+
font-family: 'Fira Code', monospace;
font-size: 17px;
min-height: 0;
}
···
margin: 0px;
border: 0px;
/*background-color:rgb(0, 0, 0);*/
-
background-color: rgba(0, 0, 0, 0.8);
+
background-color: @base;
color: @text;
}
···
#workspaces {
border-radius: 1rem;
margin: 5px;
-
background-color: @surface0;
margin-left: 1rem;
+
background-color: rgba(0, 0, 0, 0.21);
}
-
#workspaces button {
color: @lavender;
···
#workspaces button.active {
color: @sky;
border-radius: 1rem;
+
background-color: rgba(255, 255, 255, 0.5);
}
#workspaces button:hover {
···
#pulseaudio,
#custom-lock,
#custom-power {
-
background-color: @surface0;
padding: 0.5rem 1rem;
margin: 5px 0;
+
color: @text;
}
#clock {
-
color: @blue;
+
color: @text;
border-radius: 0px 1rem 1rem 0px;
margin-right: 1rem;
}
-
'';
settings = {
mainBar = {
layer = "top";
position = "top";
-
height = 34;
+
mod = "dock";
+
exclusive = true;
+
passthrough = false;
+
#gtk-layer-shell = true;
+
height = 0;
output = [
"HDMI-A-1"
-
"DP-1"
+
"DP-3"
+
"DP-2"
];
-
modules-left = [ "sway/workspaces" ];
+
modules-left = [
+
"sway/workspaces"
+
];
modules-center = [ "sway/window" ];
-
modules-right = [ "clock" ];
+
modules-right = [
+
"pulseaudio"
+
"clock"
+
];
"sway/workspaces" = {
disable-scroll = true;
-
sort-by-name = true;
+
};
+
tray = {
+
icon-size = 13;
+
tooltip = false;
+
spacing = 10;
+
};
+
network = {
+
format = "󰖩 {essid}";
+
format-disconnected = "󰖪 disconnected";
+
};
+
clock = {
+
format = " {:%I:%M %p %m/%d} ";
+
tooltip-format = ''
+
<big>{:%Y %B}</big>
+
<tt><small>{calendar}</small></tt>'';
+
};
+
+
pulseaudio = {
+
format = "{icon} {volume}%";
+
tooltip = false;
+
format-muted = " Muted";
+
on-click = "pamixer -t";
+
on-scroll-up = "pamixer -i 5";
+
on-scroll-down = "pamixer -d 5";
+
scroll-step = 5;
+
format-icons = {
+
headphone = "";
+
hands-free = "";
+
headset = "";
+
phone = "";
+
portable = "";
+
car = "";
+
default = [ "" "" "" ];
+
};
+
};
+
+
"pulseaudio#microphone" = {
+
format = "{format_source}";
+
tooltip = false;
+
format-source = " {volume}%";
+
format-source-muted = " Muted";
+
on-click = "pamixer --default-source -t";
+
on-scroll-up = "pamixer --default-source -i 5";
+
on-scroll-down = "pamixer --default-source -d 5";
+
scroll-step = 5;
};
};
};
};
home.stateVersion = "25.05";
-
}
+
}
+7
host-secrets.nix
···
group = "garage";
mode = "0400";
};
+
+
"headscale-authkey" = {
+
file = ./secrets/headscale-authkey.age;
+
owner = "regent";
+
group = "users";
+
mode = "0400";
+
};
};
}
+58
hosts/baal/default.nix
···
+
{ config, lib, pkgs, modulesPath, inputs, ... }:
+
{
+
imports = [
+
./hardware.nix
+
./secrets.nix
+
+
../../common/system.nix
+
../../common/users.nix
+
../../common/services.nix
+
+
../../host-secrets.nix
+
];
+
+
boot = {
+
loader = {
+
systemd-boot.enable = true;
+
efi = {
+
canTouchEfiVariables = true;
+
efiSysMountPoint = "/boot";
+
};
+
};
+
initrd.systemd.enable = true;
+
};
+
+
system.stateVersion = "24.11";
+
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
+
+
systemd.targets.multi-user.enable = true;
+
+
networking = {
+
hostName = "baal";
+
hostId = "aaaaaaaa";
+
networkmanager.enable = true;
+
};
+
+
services.fail2ban = {
+
enable = true;
+
# Ban IP after 5 failures
+
maxretry = 5;
+
ignoreIP = [
+
"10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" "100.64.0.0/10"
+
];
+
bantime = "24h"; # Ban IPs for one day on the first ban
+
bantime-increment = {
+
enable = true; # Enable increment of bantime after each violation
+
multipliers = "1 2 4 8 16 32 64";
+
maxtime = "168h"; # Do not ban for more than 1 week
+
overalljails = true; # Calculate the bantime based on all the violations
+
};
+
};
+
+
virtualisation.docker = {
+
enable = true;
+
enableOnBoot = true;
+
};
+
+
documentation.enable = false;
+
}
+55
hosts/baal/hardware.nix
···
+
# Do not modify this file! It was generated by ‘nixos-generate-config’
+
# and may be overwritten by future invocations. Please make changes
+
# to /etc/nixos/configuration.nix instead.
+
{ config, lib, pkgs, modulesPath, ... }:
+
+
{
+
imports =
+
[ (modulesPath + "/profiles/qemu-guest.nix")
+
];
+
+
boot.initrd.availableKernelModules = [ "xhci_pci" "virtio_pci" "virtio_scsi" "usbhid" ];
+
boot.initrd.kernelModules = [ ];
+
boot.kernelModules = [ ];
+
boot.extraModulePackages = [ ];
+
+
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
+
# (the default) this is the recommended approach. When using systemd-networkd it's
+
# still possible to use this option, but it's recommended to use it in conjunction
+
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
+
networking.useDHCP = lib.mkDefault true;
+
# networking.interfaces.enp0s6.useDHCP = lib.mkDefault true;
+
+
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
+
+
disko.devices = {
+
disk = {
+
main = {
+
type = "disk";
+
device = "/dev/sda";
+
content = {
+
type = "gpt";
+
partitions = {
+
boot = {
+
size = "512M";
+
type = "EF00";
+
content = {
+
type = "filesystem";
+
format = "vfat";
+
mountpoint = "/boot";
+
};
+
};
+
root = {
+
size = "100%";
+
content = {
+
type = "filesystem";
+
format = "ext4";
+
mountpoint = "/";
+
};
+
};
+
};
+
};
+
};
+
};
+
};
+
}
+3
hosts/baal/secrets.nix
···
+
{
+
+
}
+105 -44
hosts/buer/default.nix
···
-
# hosts/valefar/configuration.nix (or default.nix)
+
# hosts/buer/configuration.nix (or default.nix)
{ config, lib, pkgs, modulesPath, inputs, ... }:
-
{
+
# =============================================================================
+
# IMPORTS
+
# =============================================================================
imports = [
# Host-specific hardware
./hardware.nix
···
../../common/users.nix
../../common/services.nix
-
# Common secrets
../../host-secrets.nix
];
+
# =============================================================================
+
# SYSTEM CONFIGURATION
+
# =============================================================================
system.stateVersion = "24.11";
+
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
+
+
# Intel microcode updates
+
hardware.cpu.intel.updateMicrocode = lib.mkDefault
+
config.hardware.enableRedistributableFirmware;
+
+
# =============================================================================
+
# CUSTOM MODULES
+
# =============================================================================
modules.garage.enable = true;
+
modules.seaweedfs.clusters.default = {
+
package = pkgs.seaweedfs;
-
# pin host platform & microcode
-
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
-
hardware.cpu.intel.updateMicrocode = lib.mkDefault
-
config.hardware.enableRedistributableFirmware;
+
masters.main = {
+
openFirewall = true;
+
ip = "fs.nkp.pet";
+
volumePreallocate = true;
+
+
defaultReplication = {
+
dataCenter = 0;
+
rack = 0;
+
server = 0;
+
};
+
};
+
};
+
+
# =============================================================================
+
# BOOT CONFIGURATION
+
# =============================================================================
+
boot.loader.grub = {
+
enable = true;
+
device = "/dev/vda";
+
};
-
boot.loader.grub.enable = true;
-
boot.loader.grub.device = "/dev/vda";
+
# =============================================================================
+
# NETWORKING
+
# =============================================================================
+
networking = {
+
hostName = "buer";
+
hostId = "1418d29e";
+
firewall.enable = false;
+
useDHCP = false;
+
};
-
networking.hostName = "buer";
-
networking.hostId = "1418d29e";
-
networking.firewall.enable = false;
-
networking.useDHCP = false;
-
systemd.network.enable = true;
-
systemd.network.networks."10-wan" = {
-
matchConfig.Name = "ens3";
-
address = [
-
"103.251.165.107/24"
-
"2a04:52c0:0135:48d1::2/48"
+
services.fail2ban = {
+
enable = true;
+
# Ban IP after 5 failures
+
maxretry = 5;
+
ignoreIP = [
+
"10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" "100.64.0.0/10"
];
-
gateway = [
-
"103.251.165.1"
-
"2a04:52c0:0135::1"
-
];
-
dns = [
-
"2a01:6340:1:20:4::10"
-
"2a04:52c0:130:2a5c::10"
-
"185.31.172.240"
-
"5.255.125.240"
-
];
+
bantime = "24h"; # Ban IPs for one day on the first ban
+
bantime-increment = {
+
enable = true; # Enable increment of bantime after each violation
+
multipliers = "1 2 4 8 16 32 64";
+
maxtime = "168h"; # Do not ban for more than 1 week
+
overalljails = true; # Calculate the bantime based on all the violations
+
};
};
-
-
#boot.supportedFilesystems = [ "zfs" ];
-
#boot.kernelModules = [ "nct6775" "coretemp" ];
-
#services.zfs.autoScrub.enable = true;
-
#services.zfs.trim.enable = true;
-
-
environment.systemPackages = with pkgs; [
-
#lm_sensors
-
#code-server
-
inputs.agenix.packages.x86_64-linux.default
-
];
+
# Static IP configuration via systemd-networkd
+
systemd.network = {
+
enable = true;
+
networks."10-wan" = {
+
matchConfig.Name = "ens3";
+
address = [
+
"103.251.165.107/24"
+
"2a04:52c0:0135:48d1::2/48"
+
];
+
gateway = [
+
"103.251.165.1"
+
"2a04:52c0:0135::1"
+
];
+
dns = [
+
"2a01:6340:1:20:4::10"
+
"2a04:52c0:130:2a5c::10"
+
"185.31.172.240"
+
"5.255.125.240"
+
];
+
};
+
};
+
# =============================================================================
+
# VIRTUALIZATION
+
# =============================================================================
virtualisation.docker = {
enable = true;
enableOnBoot = true;
-
package = pkgs.docker.override {
-
buildGoModule = pkgs.buildGo123Module;
-
};
};
-
}
+
+
# =============================================================================
+
# PACKAGES
+
# =============================================================================
+
environment.systemPackages = with pkgs; [
+
inputs.agenix.packages.x86_64-linux.default
+
];
+
+
# =============================================================================
+
# COMMENTED OUT / DISABLED
+
# =============================================================================
+
# ZFS support (not needed for this VPS)
+
# boot.supportedFilesystems = [ "zfs" ];
+
# boot.kernelModules = [ "nct6775" "coretemp" ];
+
# services.zfs.autoScrub.enable = true;
+
# services.zfs.trim.enable = true;
+
+
# Additional packages (not needed)
+
# lm_sensors
+
# code-server
+
}
-46
hosts/focalor/backup.nix
···
-
# Do not modify this file! It was generated by ‘nixos-generate-config’
-
# and may be overwritten by future invocations. Please make changes
-
# to /etc/nixos/configuration.nix instead.
-
{ config, lib, pkgs, modulesPath, ... }:
-
-
{
-
imports =
-
[ (modulesPath + "/installer/scan/not-detected.nix")
-
];
-
-
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" "usbhid" "sd_mod" ];
-
boot.initrd.kernelModules = [
-
"vfio" "vfio_iommu_type1" "vfio_pci"
-
"nvidia" "nvidia-modeset" "nvidia_uvm" "nvidia_drm"
-
];
-
boot.kernelModules = [ "kvm-amd" ];
-
boot.kernelParams = [
-
"amd_iommu=on"
-
"vfio-pci.ids=10de:2484,10de228b,1022:149c,15b7:5045"
-
];
-
boot.extraModulePackages = [ ];
-
-
fileSystems."/" =
-
{ device = "/dev/disk/by-uuid/2009b305-f22d-4d5c-a9d3-c49a2303232b";
-
fsType = "ext4";
-
};
-
-
fileSystems."/boot" =
-
{ device = "/dev/disk/by-uuid/E53C-502F";
-
fsType = "vfat";
-
options = [ "fmask=0077" "dmask=0077" ];
-
};
-
-
swapDevices = [ ];
-
-
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
-
# (the default) this is the recommended approach. When using systemd-networkd it's
-
# still possible to use this option, but it's recommended to use it in conjunction
-
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
-
networking.useDHCP = lib.mkDefault true;
-
# networking.interfaces.enp5s0.useDHCP = lib.mkDefault true;
-
# networking.interfaces.wlp4s0.useDHCP = lib.mkDefault true;
-
-
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
-
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
-
}
+108 -32
hosts/focalor/default.nix
···
-
# hosts/valefar/configuration.nix (or default.nix)
+
# hosts/focalor/configuration.nix (or default.nix)
{ config, lib, system, pkgs, modulesPath, inputs, ... }:
-
{
+
# =============================================================================
+
# IMPORTS
+
# =============================================================================
imports = [
# Host-specific hardware
./hardware.nix
···
../../common/users.nix
../../common/services.nix
../../common/efi.nix
-
+
../../common/bluetooth.nix
+
# Desktop modules
../../common/desktop/core.nix
../../common/desktop/sway.nix
../../common/desktop/vnc.nix
-
-
# Nvidia
+
+
# Hardware-specific
../../common/nvidia.nix
# Common secrets
-
#../../host-secrets.nix
+
../../host-secrets.nix
];
+
services.syncthing = {
+
enable = true;
+
openDefaultPorts = true;
+
user = "regent";
+
dataDir = "/home/regent";
+
configDir = "/home/regent/.config/syncthing";
+
};
+
+
# =============================================================================
+
# SYSTEM CONFIGURATION
+
# =============================================================================
system.stateVersion = "25.05";
-
-
# pin host platform & microcode
-
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
-
+
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
+
+
# Cross-compilation support
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
-
-
networking.hostName = "focalor";
-
networking.hostId = "84bdc587";
+
nix.settings.extra-platforms = config.boot.binfmt.emulatedSystems;
-
programs.steam.enable = true;
-
programs.steam.gamescopeSession.enable = true;
-
programs.appimage.enable = true;
-
programs.appimage.binfmt = true;
+
# =============================================================================
+
# NETWORKING
+
# =============================================================================
+
networking = {
+
hostName = "focalor";
+
hostId = "84bdc587";
+
firewall.enable = false;
+
firewall.trustedInterfaces = [ "tailscale0" ];
+
nameservers = [ "10.0.0.210" "1.1.1.1" ];
+
};
+
# Systemd networking with bridge
systemd.network = {
enable = true;
+
netdevs."br0" = {
netdevConfig = {
Name = "br0";
Kind = "bridge";
};
};
+
networks = {
"10-lan" = {
matchConfig.Name = ["enp5s0" "vm-*"];
···
Bridge = "br0";
};
};
+
"10-lan-bridge" = {
matchConfig.Name = "br0";
networkConfig = {
···
};
};
+
# DNS resolution
services.resolved = {
-
enable = true;
-
dnssec = "true";
-
domains = [ "~." ];
+
enable = true;
+
dnssec = "true";
+
domains = [ "~." ];
fallbackDns = [ "10.0.0.210" "1.0.0.1#one.one.one.one" ];
-
dnsovertls = "true";
+
dnsovertls = "true";
};
+
+
# =============================================================================
+
# FILESYSTEM & STORAGE
+
# =============================================================================
+
boot.supportedFilesystems = [ "nfs" ];
-
services.vscode-server.enable = true;
-
services.vscode-server.nodejsPackage = pkgs.nodejs_20;
+
/*fileSystems."/mnt/storage" = {
+
device = "valefar:/storage";
+
fsType = "nfs";
+
};*/
-
environment.systemPackages = with pkgs; [
-
#lm_sensors
-
#code-server
-
inputs.agenix.packages.x86_64-linux.default
-
];
+
# =============================================================================
+
# SERVICES
+
# =============================================================================
+
services.vscode-server = {
+
enable = true;
+
nodejsPackage = pkgs.nodejs_20;
+
};
-
environment.sessionVariables.WLR_RENDERER = "vulkan";
+
# =============================================================================
+
# PROGRAMS & APPLICATIONS
+
# =============================================================================
+
programs.steam.enable = true;
+
+
programs.obs-studio = {
+
enable = true;
+
enableVirtualCamera = true;
+
plugins = with pkgs.obs-studio-plugins; [
+
droidcam-obs
+
];
+
};
+
# =============================================================================
+
# VIRTUALIZATION
+
# =============================================================================
virtualisation.docker = {
enable = true;
enableOnBoot = true;
-
package = pkgs.docker.override {
-
buildGoModule = pkgs.buildGo123Module;
-
};
};
+
# =============================================================================
+
# DESKTOP ENVIRONMENT
+
# =============================================================================
+
# Vulkan renderer for Wayland
+
environment.sessionVariables.WLR_RENDERER = "vulkan";
+
+
# XDG Portals
xdg.portal = {
enable = true;
wlr.enable = true;
···
xdg-desktop-portal-gnome
];
};
+
+
# =============================================================================
+
# PACKAGES
+
# =============================================================================
+
environment.systemPackages = with pkgs; [
+
inputs.agenix.packages.x86_64-linux.default
+
prismlauncher
+
temurin-bin
+
signal-desktop
+
];
+
+
# =============================================================================
+
# COMMENTED OUT / DISABLED
+
# =============================================================================
+
# ZFS support (disabled for this host)
+
# boot.supportedFilesystems = [ "zfs" ];
+
# boot.kernelModules = [ "nct6775" "coretemp" ];
+
# services.zfs.autoScrub.enable = true;
+
# services.zfs.trim.enable = true;
+
+
# Additional packages (commented out)
+
# lm_sensors
+
# code-server
+
+
# DHCP (disabled in favor of systemd-networkd)
+
networking.useDHCP = false;
+
# firewall.allowedTCPPorts = [22 80 443 2456 2457 9000 9001 9002];
}
+2 -2
hosts/focalor/hardware.nix
···
];
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" "uas" "usbhid" "sd_mod" ];
-
boot.initrd.kernelModules = [ "vfio" "vfio_iommu_type1" "vfio_pci" ];
+
# boot.initrd.kernelModules = [ "vfio" "vfio_iommu_type1" "vfio_pci" ];
boot.kernelModules = [ "kvm-amd" ];
boot.kernelParams = [
"amd_iommu=on"
-
"vfio-pci.ids=10de:2484,10de228b,1022:149c,15b7:5045,1dbe:5236,1022:149c"
+
# "vfio-pci.ids=10de:2484,10de228b,1022:149c,15b7:5045,1dbe:5236,1022:149c"
];
boot.extraModulePackages = [ ];
+1
hosts/focalor/scripts/vm-win11-hook.sh
···
fi
if [[ "$HOOK_NAME" == "prepare" && "$STATE_NAME" == "begin" ]]; then
+
#start_hook
echo "do nothing"
elif [[ "$HOOK_NAME" == "release" && "$STATE_NAME" == "end" ]]; then
revert_hook
-7
hosts/focalor/vfio.nix
···
package = pkgs.qemu_kvm;
runAsRoot = true;
swtpm.enable = true;
-
ovmf = {
-
enable = true;
-
packages = [(pkgs.OVMF.override {
-
secureBoot = true;
-
tpmSupport = true;
-
}).fd];
-
};
};
hooks.qemu = {
win11 = ./scripts/vm-win11-hook.sh;
-45
hosts/morax/default.nix
···
-
{ config, lib, pkgs, modulesPath, inputs, ... }:
-
-
{
-
imports = [
-
./hardware.nix
-
./secrets.nix
-
-
../../common/system.nix
-
../../common/users.nix
-
../../common/services.nix
-
../../host-secrets.nix
-
];
-
-
system.stateVersion = "25.05";
-
-
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
-
hardware.enableRedistributableFirmware = true;
-
hardware.enableAllHardware = lib.mkForce false; #https://github.com/NixOS/nixpkgs/issues/154163#issuecomment-2868994145
-
-
networking = {
-
hostName = "morax";
-
hostId = "2631a44a";
-
firewall.enable = false;
-
defaultGateway = {
-
address = "10.0.0.1";
-
interface = "eth0";
-
};
-
nameservers = [ "1.1.1.1" ];
-
interfaces.eth0 = {
-
ipv4.addresses = [{
-
address = "10.0.0.210";
-
prefixLength = 24;
-
}];
-
};
-
};
-
-
environment.systemPackages = with pkgs; [
-
inputs.agenix.packages.aarch64-linux.default
-
];
-
-
virtualisation.docker = {
-
enable = true;
-
enableOnBoot = true;
-
};
-
}
-34
hosts/morax/hardware.nix
···
-
# Do not modify this file! It was generated by ‘nixos-generate-config’
-
# and may be overwritten by future invocations. Please make changes
-
# to /etc/nixos/configuration.nix instead.
-
{ config, lib, pkgs, modulesPath, ... }:
-
-
{
-
imports =
-
[ (modulesPath + "/installer/scan/not-detected.nix")
-
];
-
-
boot.initrd.availableKernelModules = [ "xhci_pci" ];
-
boot.initrd.kernelModules = [ ];
-
boot.kernelModules = [ ];
-
boot.extraModulePackages = [ ];
-
-
fileSystems."/" =
-
{ device = "/dev/disk/by-uuid/44444444-4444-4444-8888-888888888888";
-
fsType = "ext4";
-
};
-
-
swapDevices = [ ];
-
-
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
-
# (the default) this is the recommended approach. When using systemd-networkd it's
-
# still possible to use this option, but it's recommended to use it in conjunction
-
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
-
networking.useDHCP = lib.mkDefault true;
-
# networking.interfaces.docker0.useDHCP = lib.mkDefault true;
-
# networking.interfaces.eth0.useDHCP = lib.mkDefault true;
-
# networking.interfaces.tailscale0.useDHCP = lib.mkDefault true;
-
# networking.interfaces.wlan0.useDHCP = lib.mkDefault true;
-
-
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
-
}
-3
hosts/morax/secrets.nix
···
-
{
-
-
}
+64
hosts/valefar/backup.nix
···
+
# Do not modify this file! It was generated by ‘nixos-generate-config’
+
# and may be overwritten by future invocations. Please make changes
+
# to /etc/nixos/configuration.nix instead.
+
{ config, lib, pkgs, modulesPath, ... }:
+
+
{
+
imports =
+
[ (modulesPath + "/installer/scan/not-detected.nix")
+
];
+
+
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "mpt3sas" "sd_mod" ];
+
boot.initrd.kernelModules = [ ];
+
boot.kernelModules = [ "kvm-amd" ];
+
boot.extraModulePackages = [ ];
+
+
fileSystems."/" = {
+
device = "/dev/disk/by-uuid/17b399da-2210-4493-9ae3-c65b20b992a0";
+
fsType = "ext4";
+
};
+
+
fileSystems."/boot" =
+
{ device = "/dev/disk/by-uuid/6340-211B";
+
fsType = "vfat";
+
options = [ "fmask=0022" "dmask=0022" ];
+
};
+
+
/* fileSystems."/garage" = {
+
device = "garage";
+
fsType = "zfs";
+
};
+
+
fileSystems."/storage" = {
+
device = "storage";
+
fsType = "zfs";
+
};*/
+
+
swapDevices = [ ];
+
+
# Fan Control
+
hardware.fancontrol = {
+
enable = false;
+
config = ''
+
INTERVAL=10
+
DEVPATH=hwmon1=devices/platform/nct6775.2592 hwmon2=devices/platform/coretemp.0
+
DEVNAME=hwmon1=nct6795 hwmon2=coretemp
+
FCTEMPS=hwmon1/pwm2=hwmon2/temp1_input hwmon1/pwm3=hwmon2/temp1_input
+
FCFANS=hwmon1/pwm2=hwmon1/fan2_input hwmon1/pwm3=hwmon1/fan3_input
+
MINTEMP=hwmon1/pwm2=20 hwmon1/pwm3=20
+
MAXTEMP=hwmon1/pwm2=65 hwmon1/pwm3=60
+
MINSTART=hwmon1/pwm2=38 hwmon1/pwm3=75
+
MINSTOP=hwmon1/pwm2=28 hwmon1/pwm3=75
+
MINPWM=hwmon1/pwm2=28 hwmon1/pwm3=75
+
MAXPWM=hwmon1/pwm2=150 hwmon1/pwm3=105
+
'';
+
};
+
+
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
+
# (the default) this is the recommended approach. When using systemd-networkd it's
+
# still possible to use this option, but it's recommended to use it in conjunction
+
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
+
networking.useDHCP = lib.mkDefault true;
+
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
+
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
+
}
+256 -73
hosts/valefar/default.nix
···
# hosts/valefar/configuration.nix (or default.nix)
-
{ config, lib, pkgs, modulesPath, inputs, ... }:
-
+
{ config, lib, pkgs, modulesPath, microvm, inputs, ... }:
{
+
# =============================================================================
+
# IMPORTS
+
# =============================================================================
imports = [
-
# Host-specific hardware
./hardware.nix
./secrets.nix
-
#../../common/nvidia.nix
+
../../common/nvidia.nix
+
+
../../host-secrets.nix
-
# Common modules shared across hosts
../../common/system.nix
../../common/users.nix
../../common/services.nix
../../common/efi.nix
-
# Common secrets
-
../../host-secrets.nix
+
../../common/nvidia.nix
];
-
# Enable modules
+
# =============================================================================
+
# SYSTEM CONFIGURATION
+
# =============================================================================
+
system.stateVersion = "24.11";
+
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
+
+
hardware.cpu.amd.updateMicrocode = lib.mkDefault
+
config.hardware.enableRedistributableFirmware;
+
+
# =============================================================================
+
# CUSTOM MODULES
+
# =============================================================================
modules.garage.enable = true;
modules.forgejo.enable = true;
+
modules.immich.enable = true;
+
modules.github-runners.enable = true;
-
system.stateVersion = "24.11";
+
# =============================================================================
+
# NETWORKING
+
# =============================================================================
+
/*networking = {
+
hostName = "valefar";
+
hostId = "2a07da90";
+
firewall.enable = false;
+
firewall.trustedInterfaces = [ "tailscale0" ];
+
nameservers = [ "10.0.0.210" "1.1.1.1" ];
+
useDHCP = true;
+
firewall.allowedTCPPorts = [ 22 80 443 2049 2456 2457 9000 9001 9002 ];
+
firewall.allowedUDPPorts = [ 2049 ];
+
};*/
+
networking.useNetworkd = true;
+
systemd.network.enable = true;
+
networking.hostName = "valefar";
+
networking.hostId = "2a07da90";
+
networking.firewall.enable = false;
-
# pin host platform & microcode
-
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
-
hardware.cpu.intel.updateMicrocode = lib.mkDefault
-
config.hardware.enableRedistributableFirmware;
+
services.proxmox-ve.bridges = [ "vmbr0" ];
-
networking.hostName = "valefar";
-
networking.hostId = "2a07da90";
+
systemd.network.networks."10-lan" = {
+
matchConfig.Name = ["enp6s0"];
+
networkConfig = {
+
Bridge = "vmbr0";
+
};
+
};
+
systemd.network.netdevs."br0" = {
+
netdevConfig = {
+
Name = "vmbr0";
+
Kind = "bridge";
+
};
+
};
-
networking = {
-
firewall.enable = false;
-
firewall.trustedInterfaces = [
-
"tailscale0"
-
];
-
nameservers = [ "10.0.0.210" "1.1.1.1" ];
-
useDHCP = true;
-
firewall.allowedTCPPorts = [22 80 443 2456 2457 9000 9001 9002];
+
systemd.network.networks."10-lan-bridge" = {
+
matchConfig.Name = "vmbr0";
+
networkConfig = {
+
Address = ["10.0.0.30/24" "2601:5c2:8400:26c0::30/64"];
+
Gateway = "10.0.0.1";
+
DNS = ["10.0.0.210" "1.1.1.1" "1.0.0.1"];
+
IPv6AcceptRA = true;
+
};
+
linkConfig.RequiredForOnline = "routable";
};
+
# DNS resolution
services.resolved = {
-
enable = true;
-
dnssec = "false";
-
domains = [ "~." ];
+
enable = true;
+
dnssec = "false";
+
domains = [ "~." ];
fallbackDns = [ "10.0.0.210" "1.1.1.1" ];
-
dnsovertls = "false";
+
dnsovertls = "false";
};
-
-
boot.supportedFilesystems = [ "zfs" ];
-
boot.kernelModules = [ "nct6775" "coretemp" ];
-
boot.zfs.extraPools = [ "garage" "storage" ];
-
boot.zfs.devNodes = "/dev/disk/by-id";
-
boot.zfs.forceImportAll = true;
-
-
/*boot.kernelParams = [ "ip=dhcp" ];
-
boot.initrd = {
-
availableKernelModules = [ "r8169" ];
-
network = {
-
enable = true;
-
ssh = {
-
enable = true;
-
port = 22;
-
authorizedKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ0pU82lV9dSjkgYbdh9utZ5CDM2dPN70S5fBqN1m3Pb" ];
-
hostKeys = [ "/etc/secrets/initrd/ssh_host_rsa_key" ];
-
shell = "/bin/cryptsetup-askpass";
-
};
+
# =============================================================================
+
# BOOT & FILESYSTEMS
+
# =============================================================================
+
boot = {
+
supportedFilesystems = [ "zfs" ];
+
kernelModules = [ "nct6775" "coretemp" ];
+
+
zfs = {
+
extraPools = [ "garage" "storage" ];
+
devNodes = "/dev/disk/by-id";
+
forceImportAll = true;
};
-
};*/
+
};
+
# =============================================================================
+
# ZFS CONFIGURATION
+
# =============================================================================
+
# ZFS import services
systemd.services.zfs-import-cache.enable = false;
-
systemd.services.zfs-import-scan.enable = true;
-
systemd.services.zfs-import-scan = {
+
enable = true;
after = [ "systemd-udev-settle.service" ];
wants = [ "systemd-udev-settle.service" ];
};
+
# ZFS mount points
systemd.mounts = [
-
{
-
what = "garage";
-
where = "/garage";
-
type = "zfs";
-
after = [ "zfs-import-scan.service" ];
-
wants = [ "zfs-import-scan.service" ];
-
}
-
{
-
what = "storage";
-
where = "/storage";
-
type = "zfs";
-
after = [ "zfs-import-scan.service" ];
-
wants = [ "zfs-import-scan.service" ];
-
}
-
];
+
{
+
what = "garage";
+
where = "/garage";
+
type = "zfs";
+
after = [ "zfs-import-scan.service" ];
+
wants = [ "zfs-import-scan.service" ];
+
}
+
{
+
what = "storage";
+
where = "/storage";
+
type = "zfs";
+
after = [ "zfs-import-scan.service" ];
+
wants = [ "zfs-import-scan.service" ];
+
}
+
];
+
+
# ZFS maintenance
+
services.zfs = {
+
autoScrub.enable = true;
+
trim.enable = true;
+
};
+
+
# =============================================================================
+
# DIRECTORY STRUCTURE
+
# =============================================================================
+
systemd.tmpfiles.rules = [
+
"d /storage/immich 0755 immich immich -"
+
"d /storage/immich/photos 0755 immich immich -"
+
"Z /storage/immich 0755 immich immich -"
+
"d /storage/tm_share 0755 regent users"
+
"Z /garage/ 0755 garage garage -"
+
];
-
services.zfs.autoScrub.enable = true;
-
services.zfs.trim.enable = true;
+
# =============================================================================
+
# NFS SERVER
+
# =============================================================================
+
services.nfs.server = {
+
enable = true;
+
exports = ''
+
/storage *(rw,sync,no_subtree_check,no_root_squash)
+
'';
+
};
-
services.vscode-server.enable = true;
-
services.vscode-server.nodejsPackage = pkgs.nodejs_20;
+
services.samba = {
+
enable = true;
+
settings = {
+
global = {
+
"workgroup" = "WORKGROUP";
+
"server string" = "valefar";
+
"netbios name" = "valefar";
+
"security" = "user";
+
"hosts allow" = "100.64.0.0/10 10.0.0.0/24 127.0.0.1 localhost";
+
"hosts deny" = "0.0.0.0/0";
+
"guest account" = "nobody";
+
"map to guest" = "bad user";
+
};
+
+
"tm_share" = {
+
"path" = "/storage/tm_share";
+
"valid users" = "regent";
+
"public" = "yes";
+
"writeable" = "yes";
+
"force user" = "regent";
+
"fruit:aapl" = "yes";
+
"fruit:time machine" = "yes";
+
"vfs objects" = "catia fruit streams_xattr";
+
};
+
};
+
};
+
+
services.netatalk = {
+
enable = true;
+
settings = {
+
time-machine = {
+
path = "/storage/timemachine";
+
"valid users" = "regent";
+
"time machine" = true;
+
};
+
};
+
};
+
+
services.avahi = {
+
enable = true;
+
nssmdns = true;
+
publish = {
+
enable = true;
+
userServices = true;
+
};
+
+
extraServiceFiles = {
+
timemachine = ''
+
<?xml version="1.0" standalone='no'?>
+
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
+
<service-group>
+
<name replace-wildcards="yes">%h</name>
+
<service>
+
<type>_smb._tcp</type>
+
<port>445</port>
+
</service>
+
<service>
+
<type>_device-info._tcp</type>
+
<port>0</port>
+
<txt-record>model=TimeCapsule8,119</txt-record>
+
</service>
+
<service>
+
<type>_adisk._tcp</type>
+
<!--
+
change tm_share to share name, if you changed it.
+
-->
+
<txt-record>dk0=adVN=tm_share,adVF=0x82</txt-record>
+
<txt-record>sys=waMa=0,adVF=0x100</txt-record>
+
</service>
+
</service-group>
+
'';
+
};
+
};
+
+
# =============================================================================
+
# SERVICES
+
# =============================================================================
+
services.vscode-server = {
+
enable = true;
+
nodejsPackage = pkgs.nodejs_20;
+
};
+
+
services.ollama = {
+
enable = true;
+
loadModels = ["deepseek-r1:1.5b" "gemma3:12b"];
+
acceleration = "cuda";
+
};
+
+
services.open-webui.enable = true;
+
+
# =============================================================================
+
# VIRTUALIZATION
+
# =============================================================================
+
virtualisation.docker = {
+
enable = true;
+
enableOnBoot = true;
+
};
+
+
services.fail2ban = {
+
enable = true;
+
# Ban IP after 5 failures
+
maxretry = 5;
+
ignoreIP = [
+
"10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" "100.64.0.0/10"
+
];
+
bantime = "24h"; # Ban IPs for one day on the first ban
+
bantime-increment = {
+
enable = true; # Enable increment of bantime after each violation
+
multipliers = "1 2 4 8 16 32 64";
+
maxtime = "168h"; # Do not ban for more than 1 week
+
overalljails = true; # Calculate the bantime based on all the violations
+
};
+
};
+
+
# =============================================================================
+
# PACKAGES
+
# =============================================================================
environment.systemPackages = with pkgs; [
lm_sensors
code-server
inputs.agenix.packages.x86_64-linux.default
];
-
virtualisation.docker = {
-
enable = true;
-
enableOnBoot = true;
-
package = pkgs.docker.override {
-
buildGoModule = pkgs.buildGo123Module;
+
+
# =============================================================================
+
# VIRTUAL MACHINES
+
# =============================================================================
+
/*systemd.services."microvm@".after = [ "microvm-virtiofsd@%i.service" ];
+
+
microvm.vms = {
+
gameservers = {
+
config = import ./gamevm.nix;
};
};
+
+
microvm.autostart = [
+
"gameservers"
+
];*/
}
+12 -36
hosts/valefar/hardware.nix
···
[ (modulesPath + "/installer/scan/not-detected.nix")
];
-
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "mpt3sas" "sd_mod" ];
+
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "mpt3sas" "nvme" "usbhid" "uas" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
-
fileSystems."/" = {
-
device = "/dev/disk/by-uuid/17b399da-2210-4493-9ae3-c65b20b992a0";
-
fsType = "ext4";
-
};
+
fileSystems."/" =
+
{ device = "/dev/disk/by-uuid/e02d1d07-3bc8-4d1d-a301-6d589f4b4b6d";
+
fsType = "ext4";
+
};
fileSystems."/boot" =
-
{ device = "/dev/disk/by-uuid/6340-211B";
+
{ device = "/dev/disk/by-uuid/B3DE-0187";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
-
/* fileSystems."/garage" = {
-
device = "garage";
-
fsType = "zfs";
-
};
-
-
fileSystems."/storage" = {
-
device = "storage";
-
fsType = "zfs";
-
};*/
-
-
swapDevices = [ ];
-
-
# Fan Control
-
hardware.fancontrol = {
-
enable = false;
-
config = ''
-
INTERVAL=10
-
DEVPATH=hwmon1=devices/platform/nct6775.2592 hwmon2=devices/platform/coretemp.0
-
DEVNAME=hwmon1=nct6795 hwmon2=coretemp
-
FCTEMPS=hwmon1/pwm2=hwmon2/temp1_input hwmon1/pwm3=hwmon2/temp1_input
-
FCFANS=hwmon1/pwm2=hwmon1/fan2_input hwmon1/pwm3=hwmon1/fan3_input
-
MINTEMP=hwmon1/pwm2=20 hwmon1/pwm3=20
-
MAXTEMP=hwmon1/pwm2=65 hwmon1/pwm3=60
-
MINSTART=hwmon1/pwm2=38 hwmon1/pwm3=75
-
MINSTOP=hwmon1/pwm2=28 hwmon1/pwm3=75
-
MINPWM=hwmon1/pwm2=28 hwmon1/pwm3=75
-
MAXPWM=hwmon1/pwm2=150 hwmon1/pwm3=105
-
'';
-
};
+
swapDevices =
+
[ { device = "/dev/disk/by-uuid/c8f24f31-49e0-486c-9f63-1d31b2e36ce9"; }
+
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
+
# networking.interfaces.enp6s0.useDHCP = lib.mkDefault true;
+
+
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
-
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
}
+63 -4
modules/caddy/default.nix
···
let
cfg = config.modules.caddy;
caddyMetricsPort = 2019;
+
+
# Generate Caddyfile content from the proxy configuration
+
generateCaddyfile = proxies:
+
let
+
proxyEntries = mapAttrsToList (domain: upstreams:
+
let
+
upstreamList = if isList upstreams then upstreams else [upstreams];
+
upstreamStr = concatStringsSep " " upstreamList;
+
in ''
+
${domain} {
+
reverse_proxy ${upstreamStr}
+
+
# Optional: Add some common headers for better proxying
+
header_up Host {upstream_hostport}
+
header_up X-Real-IP {remote_host}
+
header_up X-Forwarded-For {remote_host}
+
header_up X-Forwarded-Proto {scheme}
+
}
+
'') proxies;
+
in
+
concatStringsSep "\n\n" proxyEntries;
+
in
{
options = {
modules = {
-
caddy = { enable = mkEnableOption "Deploy Caddy"; };
+
caddy = {
+
enable = mkEnableOption "Deploy Caddy";
+
+
reverseProxies = mkOption {
+
type = types.attrsOf (types.either types.str (types.listOf types.str));
+
default = {};
+
description = "Attribute set of domain to upstream mappings for reverse proxying. Upstreams can be a single string or a list of strings for load balancing.";
+
example = {
+
"notes.nekomimi.pet" = "valefar:3009";
+
"git.nekomimi.pet" = ["morax:3000" "valefar:3000"]; # Load balance between multiple upstreams
+
"api.nekomimi.pet" = ["server1:8080" "server2:8080" "server3:8080"];
+
};
+
};
+
+
extraConfig = mkOption {
+
type = types.lines;
+
default = "";
+
description = "Extra Caddyfile configuration to append";
+
};
+
+
email = mkOption {
+
type = types.nullOr types.str;
+
default = null;
+
description = "Email address for ACME certificate registration";
+
};
+
};
};
};
···
services.caddy = {
enable = true;
-
/* package = pkgs.caddy.withPlugins {
-
plugins = [ "github.com/caddy-dns/cloudflare@v0.0.0-20240703190432-89f16b99c18e"];
-
hash = "sha256-JVkUkDKdat4aALJHQCq1zorJivVCdyBT+7UhqTvaFLw=";
+
/*package = pkgs.caddy.withPlugins {
+
plugins = [ "github.com/caddy-dns/cloudflare@v0.2.1"];
+
hash = "sha256-1niaf801sijvjrqvw998y8x7b43a0g162h3ry530qwl8lrgkapii";
};*/
+
+
extraConfig = ''
+
${optionalString (cfg.email != null) ''
+
{
+
email ${cfg.email}
+
}
+
''}
+
+
${generateCaddyfile cfg.reverseProxies}
+
+
${cfg.extraConfig}
+
'';
};
systemd.services.caddy = {
+3 -2
modules/forgejo/default.nix
···
SSH_LISTEN_PORT = 2222;
SSH_PORT = 2222;
START_SSH_SERVER = true;
+
SSH_DOMAIN = "sgit.nekomimi.pet";
};
-
# service.DISABLE_REGISTRATION = true;
+
service.DISABLE_REGISTRATION = true;
actions = {
ENABLED = true;
DEFAULT_ACTIONS_URL = "github";
···
};
};
};
-
}
+
}
+4 -4
modules/garage/default.nix
···
config = mkIf cfg.enable {
services.garage = {
enable = true;
-
package = pkgs.garage;
+
package = pkgs.garage_2;
settings = {
metadata_dir = "/garage/metadata";
data_dir = "/garage/data";
db_engine = "lmdb";
-
replication_mode = "2";
+
replication_factor = 2;
rpc_bind_addr = "[::]:3901";
rpc_public_addr = "${config.networking.hostName}:3901";
rpc_secret_file = config.age.secrets."garage-rpc-secret".path;
···
metrics_token_file = config.age.secrets."garage-metrics-token".path;
};
bootstrap_peers = [
-
"d548d0c9ae9aec9e26fe0bd2ca3efe75f654fa350bad5cb02bc9aebc9850ba8f@[2a04:52c0:135:48d1::2]:3901" # buer
-
"5504cb25910dcef4a4312006691d651c099cde7c3a88df9ca79aa350571e6e65@[2601:5c2:8400:26c0:4ecc:6aff:fef7:98ca]:3901" #valefar
+
"d548d0c9ae9aec9e26fe0bd2ca3efe75f654fa350bad5cb02bc9aebc9850ba8f@[buer]:3901"
+
"5504cb25910dcef4a4312006691d651c099cde7c3a88df9ca79aa350571e6e65@[valefar]:3901"
];
};
};
+12 -3
modules/github-runners/default.nix
···
services.github-runners = {
simplelink = {
enable = true;
-
name = "simplelink";
+
name = "simplelink-" + config.networking.hostName;
url = "https://github.com/waveringana/simplelink";
-
token = config.age.secrets."build-token".path;
+
tokenFile = config.age.secrets."build-token".path;
+
user = "regent";
+
group = "docker";
+
extraPackages = extraPackages;
+
};
+
embedder = {
+
enable = true;
+
name = "embedder-" + config.networking.hostName;
+
url = "https://github.com/waveringana/embedder";
+
tokenFile = config.age.secrets."build-token".path;
user = "regent";
group = "docker";
extraPackages = extraPackages;
-
};
+
};
};
};
}
+229
modules/headscale/default.nix
···
+
{ config, lib, pkgs, ... }:
+
+
with lib;
+
let
+
cfg = config.modules.headscale;
+
in
+
{
+
options = {
+
modules = {
+
headscale = {
+
enable = mkEnableOption "Deploy headscale";
+
+
oidcClientSecretPath = mkOption {
+
type = types.str;
+
default = "/etc/headscale/oidc_client_secret.key";
+
description = "Path to OIDC client secret file";
+
example = "config.age.secrets.headscale-oidc-key.path";
+
};
+
+
litestream = {
+
enable = mkEnableOption "Enable litestream for headscale database backups";
+
+
replicas = mkOption {
+
type = types.listOf (types.attrsOf types.anything);
+
default = [];
+
description = "List of litestream replica configurations";
+
example = [
+
{
+
url = "s3://your-backup-bucket/headscale/db";
+
access-key-id = "$LITESTREAM_ACCESS_KEY_ID";
+
secret-access-key = "$LITESTREAM_SECRET_ACCESS_KEY";
+
region = "us-east-1";
+
}
+
];
+
};
+
+
backupPath = mkOption {
+
type = types.nullOr types.str;
+
default = null;
+
description = "Local backup path (alternative to S3)";
+
example = "/backup/headscale";
+
};
+
+
syncInterval = mkOption {
+
type = types.str;
+
default = "1s";
+
description = "How often to sync to replicas";
+
};
+
+
retention = mkOption {
+
type = types.str;
+
default = "72h";
+
description = "How long to retain snapshots";
+
};
+
+
environmentFile = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = "Environment file containing S3 credentials (can be agenix secret)";
+
example = "config.age.secrets.litestream-env.path";
+
};
+
};
+
};
+
};
+
};
+
+
config = mkIf cfg.enable {
+
services.headscale = {
+
enable = true;
+
address = "0.0.0.0";
+
port = 8080;
+
+
settings = {
+
server_url = "https://headscale.nekomimi.pet";
+
+
# Metrics and gRPC
+
metrics_listen_addr = "127.0.0.1:9090";
+
grpc_listen_addr = "127.0.0.1:50443";
+
grpc_allow_insecure = false;
+
+
# Prefixes
+
prefixes = {
+
v4 = "100.64.0.0/10";
+
v6 = "fd7a:115c:a1e0::/48";
+
allocation = "sequential";
+
};
+
+
# Database
+
database = {
+
type = "sqlite";
+
sqlite = {
+
path = "/var/lib/headscale/db.sqlite";
+
write_ahead_log = true;
+
};
+
};
+
+
# Noise
+
noise = {
+
private_key_path = "/var/lib/headscale/noise_private.key";
+
};
+
+
# DERP
+
derp = {
+
urls = [
+
"https://controlplane.tailscale.com/derpmap/default"
+
];
+
paths = [];
+
auto_update_enabled = true;
+
update_frequency = "24h";
+
server = {
+
enabled = false;
+
region_id = 999;
+
region_code = "headscale";
+
region_name = "Headscale Embedded DERP";
+
stun_listen_addr = "0.0.0.0:3478";
+
private_key_path = "/var/lib/headscale/derp_server_private.key";
+
automatically_add_embedded_derp_region = true;
+
ipv4 = "1.2.3.4";
+
ipv6 = "2001:db8::1";
+
};
+
};
+
+
# DNS
+
dns = {
+
magic_dns = true;
+
base_domain = "dns.sharkgirl.pet";
+
nameservers = {
+
global = [
+
"100.64.0.7"
+
"1.1.1.1"
+
"1.0.0.1"
+
"2606:4700:4700::1111"
+
"2606:4700:4700::1001"
+
];
+
};
+
search_domains = [];
+
};
+
+
# OIDC with configurable secret path
+
oidc = {
+
only_start_if_oidc_is_available = true;
+
issuer = "https://pocketid.nekomimi.pet";
+
client_id = "f345acad-3eac-45b7-9d91-57f388987a57";
+
client_secret_path = cfg.oidcClientSecretPath;
+
pkce = {
+
enabled = true;
+
method = "S256";
+
};
+
};
+
+
# Policy
+
policy = {
+
mode = "database";
+
};
+
+
# TLS/ACME
+
acme_url = "https://acme-v02.api.letsencrypt.org/directory";
+
acme_email = "";
+
tls_letsencrypt_hostname = "";
+
tls_letsencrypt_cache_dir = "/var/lib/headscale/cache";
+
tls_letsencrypt_challenge_type = "HTTP-01";
+
tls_letsencrypt_listen = ":http";
+
tls_cert_path = "";
+
tls_key_path = "";
+
+
# Logging
+
log = {
+
format = "text";
+
level = "info";
+
};
+
+
# Misc settings
+
disable_check_updates = false;
+
ephemeral_node_inactivity_timeout = "30m";
+
unix_socket = "/var/run/headscale/headscale.sock";
+
unix_socket_permission = "0770";
+
logtail = {
+
enabled = false;
+
};
+
randomize_client_port = false;
+
};
+
};
+
+
# Configurable Litestream for SQLite database backups
+
services.litestream = mkIf cfg.litestream.enable {
+
enable = true;
+
settings = {
+
dbs = [
+
{
+
path = "/var/lib/headscale/db.sqlite";
+
sync-interval = cfg.litestream.syncInterval;
+
retention = cfg.litestream.retention;
+
replicas =
+
# Use custom replicas if provided
+
if cfg.litestream.replicas != [] then
+
cfg.litestream.replicas
+
# Otherwise use local backup if path is provided
+
else if cfg.litestream.backupPath != null then
+
[{ path = cfg.litestream.backupPath; }]
+
# Default empty (user must configure)
+
else
+
[];
+
}
+
];
+
};
+
};
+
+
# Configure systemd service to use agenix secrets
+
systemd.services.headscale.serviceConfig = mkMerge [
+
{
+
SupplementaryGroups = [ "headscale-secrets" ];
+
}
+
# Add environment file for litestream if specified
+
(mkIf (cfg.litestream.enable && cfg.litestream.environmentFile != null) {
+
EnvironmentFile = cfg.litestream.environmentFile;
+
})
+
];
+
+
# Configure litestream service with environment file if specified
+
systemd.services.litestream = mkIf (cfg.litestream.enable && cfg.litestream.environmentFile != null) {
+
serviceConfig = {
+
EnvironmentFile = cfg.litestream.environmentFile;
+
};
+
};
+
+
# Create a group for accessing secrets
+
users.groups.headscale-secrets = {};
+
};
+
}
+28
modules/immich/default.nix
···
+
{ config, lib, pkgs, ... }:
+
+
with lib;
+
let
+
cfg = config.modules.immich;
+
+
immichRoot = "/storage/immich"; #TODO make this configurable through nix
+
immichPhotos = "${immichRoot}/photos";
+
in
+
{
+
options = {
+
modules = {
+
immich = {
+
enable = mkEnableOption "Deploy immich";
+
};
+
};
+
};
+
+
config = mkIf cfg.enable {
+
services.immich = {
+
enable = true;
+
port = 2283;
+
host = "0.0.0.0";
+
mediaLocation = immichPhotos;
+
settings = null;
+
};
+
};
+
}
+42
modules/monero/default.nix
···
+
{ config, lib, pkgs, ... }:
+
+
with lib;
+
let
+
cfg = config.modules.monero;
+
+
#TODO make this configurable through nix
+
address = "46Ev6Vk4QeQZTr14tRjksTT2VPhi4jKB48mGz31rpUUci2Bvg9PHZj9GLK3VceWDc13tkUbzmqQz8eKR3hkD9bUKFKHLrzg";
+
dataDir = "/storage/monero";
+
miningThreads = 6;
+
user = "regent";
+
password = "AnRPCPasswordChangedImperatively";
+
rpcAddress = "127.0.0.1";
+
rpcPort = 18081;
+
in
+
{
+
options = {
+
modules = {
+
monero = {
+
enable = mkEnableOption "Deploy monero node";
+
};
+
};
+
};
+
+
config = mkIf cfg.enable {
+
services.monero = {
+
enable = true;
+
dataDir = dataDir;
+
rpc = {
+
user = user;
+
password = password;
+
address = rpcAddress;
+
port = rpcPort;
+
};
+
mining = {
+
enable = true;
+
threads = miningThreads;
+
address = address;
+
};
+
};
+
};
+
}
+858
modules/seaweedfs/default.nix
···
+
/*https://hg.sr.ht/~dermetfan/seaweedfs-nixos/browse/seaweedfs.nix?rev=tip*/
+
+
{ config, lib, pkgs, ... }:
+
+
with lib;
+
+
let
+
cfg = config.modules.seaweedfs;
+
+
clusterModule = cluster: {
+
options = {
+
package = mkOption {
+
type = types.package;
+
default = pkgs.seaweedfs;
+
};
+
+
security.grpc = let
+
auth = mkOption {
+
type = with types; nullOr (submodule {
+
options = {
+
cert = mkOption { type = path; };
+
key = mkOption { type = path; };
+
};
+
});
+
default = null;
+
};
+
in {
+
ca = mkOption {
+
type = with types; nullOr str;
+
default = null;
+
};
+
+
master = auth;
+
volume = auth;
+
filer = auth;
+
client = auth;
+
msgBroker = auth;
+
};
+
+
masters = mkOption {
+
type = with types; attrsOf (submodule (masterModule cluster.config));
+
default = {};
+
description = "SeaweedFS masters";
+
};
+
+
volumes = mkOption {
+
type = with types; attrsOf (submodule (volumeModule cluster.config));
+
default = {};
+
description = "SeaweedFS volumes";
+
};
+
+
filers = mkOption {
+
type = with types; attrsOf (submodule (filerModule cluster.config));
+
default = {};
+
description = "SeaweedFS filers";
+
};
+
+
webdavs = mkOption {
+
type = with types; attrsOf (submodule (webdavModule cluster.config));
+
default = {};
+
description = "SeaweedFS WebDAV servers";
+
};
+
+
instances = mkOption {
+
type = with types; attrsOf (submodule instanceModule);
+
description = "SeaweedFS instances";
+
default =
+
mapAttrs' (name: master: nameValuePair
+
"master-${name}"
+
{
+
inherit (master) cluster configs;
+
+
command = "master";
+
+
args = with master;
+
[
+
"-port=${toString port}"
+
"-volumeSizeLimitMB=${toString volumeSizeLimitMB}"
+
] ++
+
optional (cpuprofile != "") "-cpuprofile=${cpuprofile}" ++
+
optional (defaultReplication != null) ("-defaultReplication=${defaultReplication.code}") ++
+
optional disableHttp "-disableHttp" ++
+
optional (garbageThreshold != "") "-garbageThreshold=${garbageThreshold}" ++
+
optional (ip != "") "-ip=${ip}" ++
+
optional (master."ip.bind" != "") "-ip.bind=${master."ip.bind"}" ++
+
optional (mdir != "") "-mdir=${mdir}" ++
+
optional (memprofile != "") "-memprofile=${memprofile}" ++
+
optional metrics.enable "-metrics.address=${metrics.address.text}" ++
+
optional (metrics.intervalSeconds != null) "-metrics.intervalSeconds=${toString metrics.intervalSeconds}" ++
+
optional (peers != []) ("-peers=" + (concatStringsSep "," (map (peer: peer.text) peers))) ++
+
optional resumeState "-resumeState" ++
+
optional volumePreallocate "-volumePreallocate" ++
+
optional (whiteList != []) ("-whiteList=" + (concatStringsSep "," whiteList));
+
}
+
) cluster.config.masters //
+
mapAttrs' (name: volume: nameValuePair
+
"volume-${name}"
+
{
+
inherit (volume) cluster configs;
+
+
command = "volume";
+
+
args = with volume;
+
[
+
"-port=${toString port}"
+
"-dir=${concatStringsSep "," dir}"
+
"-fileSizeLimitMB=${toString fileSizeLimitMB}"
+
"-idleTimeout=${toString idleTimeout}"
+
"-index=${index}"
+
"-minFreeSpacePercent=${toString minFreeSpacePercent}"
+
"-preStopSeconds=${toString preStopSeconds}"
+
] ++
+
optional (compactionMBps != null) ("-compactionMBps=${compactionMBps}") ++
+
optional (cpuprofile != "") "-cpuprofile=${cpuprofile}" ++
+
optional (dataCenter != "") "-dataCenter=${dataCenter}" ++
+
optional volume."images.fix.orientation" "-images.fix.orientation" ++
+
optional (ip != "") "-ip=${ip}" ++
+
optional (volume."ip.bind" != "") "-ip.bind=${volume."ip.bind"}" ++
+
optional (max != []) "-max=${concatStringsSep "," (map toString max)}" ++
+
optional (memprofile != "") "-memprofile=${memprofile}" ++
+
optional (metricsPort != null) "-metricsPort=${toString metricsPort}" ++
+
optional (mserver != []) ("-mserver=" + (concatStringsSep "," (map (mserver: mserver.text) mserver))) ++
+
optional (volume."port.public" != null) "-port.public=${toString volume."port.public"}" ++
+
optional pprof "-pprof" ++
+
optional (publicUrl != "") "-publicUrl=${publicUrl}" ++
+
optional (rack != "") "-rack=${rack}" ++
+
optional (!volume."read.redirect") "-read.redirect=false" ++
+
optional (whiteList != []) ("-whiteList=" + (concatStringsSep "," whiteList));
+
+
systemdService.preStart = "mkdir -p ${concatStringsSep " " volume.dir}";
+
}
+
) cluster.config.volumes //
+
mapAttrs' (name: filer: nameValuePair
+
"filer-${name}"
+
{
+
inherit (filer) cluster configs;
+
+
command = "filer";
+
+
args = with filer;
+
[
+
"-port=${toString port}"
+
"-dirListLimit=${toString dirListLimit}"
+
"-maxMB=${toString maxMB}"
+
] ++
+
optional (collection != "") "-collection=${collection}" ++
+
optional (dataCenter != "") "-dataCenter=${dataCenter}" ++
+
optional (defaultReplicaPlacement != null) ("-defaultReplicaPlacement=${defaultReplicaPlacement.code}") ++
+
optional disableDirListing "-disableDirListing" ++
+
optional disableHttp "-disableHttp" ++
+
optional encryptVolumeData "-encryptVolumeData" ++
+
optional (ip != "") "-ip=${ip}" ++
+
optional (filer."ip.bind" != "") "-ip.bind=${filer."ip.bind"}" ++
+
optional (master != []) ("-master=" + (concatStringsSep "," (map (master: master.text) master))) ++
+
optional (metricsPort != null) "-metricsPort=${toString metricsPort}" ++
+
optional (peers != []) ("-peers=" + (concatStringsSep "," (map (peer: peer.text) peers))) ++
+
optional (filer."port.readonly" != null) "-port.readonly=${toString filer."port.readonly"}" ++
+
optional (rack != "") "-rack=${rack}" ++
+
optionals s3.enable [
+
"-s3"
+
"-s3.port=${toString filer.s3.port}"
+
] ++
+
optional (s3.enable && s3."cert.file" != "") "-s3.cert.file=${s3."cert.file"}" ++
+
optional (s3.enable && s3."key.file" != "") "-s3.key.file=${s3."key.file"}" ++
+
optional (s3.enable && s3.config != "") "-s3.config=${s3.config}" ++
+
optional (s3.enable && s3.domainName != []) "-s3.domainName=${concatStringsSep "," s3.domainName}";
+
+
systemdService.preStart = let
+
conf = filer.configs.filer.leveldb2 or {};
+
in optionalString (conf ? "dir") "mkdir -p ${conf.dir}";
+
}
+
) cluster.config.filers //
+
mapAttrs' (name: webdav: nameValuePair
+
"webdav-${name}"
+
{
+
inherit (webdav) cluster;
+
+
command = "webdav";
+
+
args = with webdav;
+
[
+
"-port=${toString port}"
+
"-filer=${filer.text}"
+
"-cacheCapacityMB=${toString cacheCapacityMB}"
+
] ++
+
optional (collection != "") "-collection=${collection}" ++
+
optional (cacheDir != "") "-cacheDir=${cacheDir}";
+
}
+
) cluster.config.webdavs;
+
};
+
};
+
};
+
+
commonModule = cluster: common: {
+
options = {
+
cluster = mkOption {
+
type = types.submodule clusterModule;
+
internal = true;
+
};
+
+
openFirewall = mkEnableOption "open the firewall";
+
};
+
+
config = { inherit cluster; };
+
};
+
+
masterModule = cluster: master: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default.master.maintenance = {
+
scripts = ''
+
ec.encode -fullPercent=95 -quietFor=1h
+
ec.rebuild -force
+
ec.balance -force
+
volume.balance -force
+
volume.fix.replication
+
'';
+
sleep_minutes = 17;
+
};
+
};
+
+
cpuprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
defaultReplication = mkOption {
+
type = types.submodule replicationModule;
+
default = {};
+
};
+
+
disableHttp = mkEnableOption "disable HTTP requests, gRPC only";
+
+
garbageThreshold = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
ip = mkOption {
+
type = types.str;
+
default = config.networking.hostName;
+
};
+
+
"ip.bind" = mkOption {
+
type = types.str;
+
default = "0.0.0.0";
+
};
+
+
mdir = mkOption {
+
type = types.str;
+
default = ".";
+
};
+
+
memprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
metrics = {
+
enable = mkEnableOption "Prometheus";
+
+
address = mkOption {
+
type = types.submodule ipPortModule;
+
default = {};
+
};
+
+
intervalSeconds = mkOption {
+
type = types.ints.unsigned;
+
default = 15;
+
};
+
};
+
+
peers = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort master.config.cluster.masters;
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 9333;
+
};
+
+
resumeState = mkEnableOption "resume previous state on master server";
+
+
volumePreallocate = mkEnableOption "preallocate disk space for volumes";
+
+
volumeSizeLimitMB = mkOption {
+
type = types.ints.unsigned;
+
default = 30000;
+
};
+
+
whiteList = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
};
+
};
+
+
volumeModule = cluster: volume: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default = {};
+
};
+
+
compactionMBps = mkOption {
+
type = with types; nullOr ints.unsigned;
+
default = null;
+
};
+
+
cpuprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
dataCenter = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
dir = mkOption {
+
type = with types; listOf str;
+
default = [ "/var/lib/seaweedfs/${cluster._module.args.name}/volume-${volume.config._module.args.name}" ];
+
};
+
+
fileSizeLimitMB = mkOption {
+
type = types.ints.unsigned;
+
default = 256;
+
};
+
+
idleTimeout = mkOption{
+
type = types.ints.unsigned;
+
default = 30;
+
};
+
+
"images.fix.orientation" = mkEnableOption "adjustment of jpg orientation when uploading";
+
+
index = mkOption {
+
type = types.enum [
+
"memory"
+
"leveldb"
+
"leveldbMedium"
+
"leveldbLarge"
+
];
+
default = "memory";
+
};
+
+
ip = mkOption {
+
type = types.str;
+
default = config.networking.hostName;
+
};
+
+
"ip.bind" = mkOption {
+
type = types.str;
+
default = "0.0.0.0";
+
};
+
+
max = mkOption {
+
type = with types; listOf ints.unsigned;
+
default = [ 8 ];
+
};
+
+
memprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
metricsPort = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
minFreeSpacePercent = mkOption {
+
type = types.ints.unsigned;
+
default = 1;
+
};
+
+
mserver = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort volume.config.cluster.masters;
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8080;
+
};
+
+
"port.public" = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
pprof = mkEnableOption "pprof http handlers. precludes -memprofile and -cpuprofile";
+
+
preStopSeconds = mkOption {
+
type = types.int;
+
default = 10;
+
};
+
+
publicUrl = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
rack = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
"read.redirect" = mkOption {
+
type = types.bool;
+
default = true;
+
};
+
+
whiteList = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
};
+
};
+
+
filerModule = cluster: filer: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default.filer.leveldb2 = {
+
enabled = true;
+
dir = "/var/lib/seaweedfs/${cluster._module.args.name}/filer-${filer.config._module.args.name}/filerldb2";
+
};
+
};
+
+
collection = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
dataCenter = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
defaultReplicaPlacement = mkOption {
+
type = with types; nullOr (submodule replicationModule);
+
default = null;
+
};
+
+
dirListLimit = mkOption {
+
type = types.ints.unsigned;
+
default = 100000;
+
};
+
+
disableDirListing = mkEnableOption "turn off directory listing";
+
+
disableHttp = mkEnableOption "disable http request, only gRpc operations are allowed";
+
+
encryptVolumeData = mkEnableOption "encrypt data on volume servers";
+
+
ip = mkOption {
+
type = types.str;
+
default = config.networking.hostName;
+
};
+
+
"ip.bind" = mkOption {
+
type = types.str;
+
default = "0.0.0.0";
+
};
+
+
master = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort filer.config.cluster.masters;
+
};
+
+
maxMB = mkOption {
+
type = types.ints.unsigned;
+
default = 32;
+
};
+
+
metricsPort = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
peers = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort filer.config.cluster.filers;
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8888;
+
};
+
+
"port.readonly" = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
rack = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
s3 = {
+
enable = mkEnableOption "whether to start S3 gateway";
+
+
"cert.file" = mkOption {
+
type = types.path;
+
default = "";
+
};
+
+
config = mkOption {
+
type = types.path;
+
default = "";
+
};
+
+
domainName = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
+
"key.file" = mkOption {
+
type = types.path;
+
default = "";
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8333;
+
};
+
};
+
};
+
};
+
+
webdavModule = cluster: webdav: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
cacheCapacityMB = mkOption {
+
type = types.int;
+
default = 1000;
+
};
+
+
cacheDir = mkOption {
+
type = types.str;
+
default = ".";
+
};
+
+
collection = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
filer = mkOption {
+
type = types.submodule ipPortModule;
+
default = {
+
ip = "127.0.0.1";
+
port = 8888;
+
};
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 7333;
+
};
+
};
+
};
+
+
instanceModule = instance: {
+
options = {
+
cluster = mkOption {
+
type = types.submodule clusterModule;
+
internal = true;
+
};
+
+
command = mkOption {
+
type = types.enum [
+
"server"
+
"master"
+
"volume"
+
"mount"
+
"filer"
+
"filer.replicate"
+
"filer.sync"
+
"s3"
+
"msgBroker"
+
"watch"
+
"webdav"
+
];
+
};
+
+
logArgs = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
+
args = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default = {};
+
};
+
+
package = mkOption {
+
type = types.package;
+
default = instance.config.cluster.package;
+
};
+
+
systemdService = mkOption {
+
type = types.attrs;
+
default = {};
+
};
+
};
+
+
config = {
+
logArgs = [ "-logtostderr" ];
+
+
systemdService.path = optional (instance.config.command == "mount") pkgs.fuse;
+
};
+
};
+
+
replicationModule = replication: {
+
options = {
+
dataCenter = mkOption {
+
type = types.ints.between 0 9;
+
default = 0;
+
};
+
+
rack = mkOption {
+
type = types.ints.between 0 9;
+
default = 0;
+
};
+
+
server = mkOption {
+
type = types.ints.between 0 9;
+
default = 0;
+
};
+
+
code = mkOption {
+
readOnly = true;
+
internal = true;
+
type = types.str;
+
default = with replication.config; "${toString dataCenter}${toString rack}${toString server}";
+
};
+
};
+
};
+
+
peersType = with types; listOf (submodule ipPortModule);
+
+
ipPortModule = ipPort: {
+
options = {
+
ip = mkOption {
+
type = types.str;
+
};
+
+
port = mkOption {
+
type = types.port;
+
};
+
+
text = mkOption {
+
internal = true;
+
readOnly = true;
+
type = types.str;
+
default = with ipPort.config; "${ip}:${toString port}";
+
};
+
};
+
};
+
+
mapAttrsIpPort = attrs: mapAttrsToList (name: value: { inherit (value) ip port; }) attrs;
+
+
toTOML = with generators; toINI {
+
mkKeyValue = mkKeyValueDefault {
+
mkValueString = v:
+
if isString v
+
then (
+
if hasInfix "\n" v
+
then ''
+
"""
+
${removeSuffix "\n" v}
+
"""
+
''
+
else ''"${v}"''
+
)
+
else mkValueStringDefault {} v;
+
} "=";
+
};
+
+
flattenAttrs = separator: attrs: let
+
/*
+
attrs = {
+
a = {
+
m1 = {};
+
m2 = {};
+
};
+
b = {
+
m1 = {};
+
};
+
}
+
*/
+
+
/*
+
step1 = {
+
a = [
+
{ name = "a-m1"; value = {}; }
+
{ name = "a-m2"; value = {}; }
+
];
+
b = [
+
{ name = "b-m1"; value = {}; }
+
];
+
};
+
*/
+
step1 = mapAttrs (outerName: outerValues:
+
mapAttrsToList (innerName: innerValues: nameValuePair
+
"${outerName}${separator}${innerName}"
+
innerValues
+
) outerValues
+
) attrs;
+
+
/*
+
step2 = [
+
[
+
{ name = "a-m1"; value = {}; }
+
{ name = "a-m2"; value = {}; }
+
]
+
[
+
{ name = "b-m1"; value = {}; }
+
]
+
];
+
*/
+
step2 = mapAttrsToList (name: value: value) step1;
+
+
/*
+
step3 = [
+
{ name = "a-m1"; value = {}; }
+
{ name = "a-m2"; value = {}; }
+
{ name = "b-m1"; value = {}; }
+
];
+
*/
+
step3 = flatten step2;
+
in
+
/*
+
{
+
a-m1 = {};
+
a-m2 = {};
+
b-m1 = {};
+
};
+
*/
+
builtins.listToAttrs step3;
+
in {
+
options.modules.seaweedfs = {
+
clusters = mkOption {
+
type = with types; attrsOf (submodule clusterModule);
+
default = {};
+
description = "SeaweedFS clusters";
+
};
+
};
+
+
config = {
+
systemd.services = mapAttrs'
+
(name: instance: nameValuePair "seaweedfs-${name}" instance)
+
(flattenAttrs "-" (
+
mapAttrs (clusterName: cluster:
+
mapAttrs (instanceName: instance: with instance; recursiveUpdate systemdService rec {
+
description = "SeaweedFS ${clusterName} ${instanceName}";
+
wants = [ "network.target" ];
+
after = wants;
+
wantedBy = [ "multi-user.target" ];
+
preStart = with serviceConfig; ''
+
${
+
let securityFile = config.environment.etc."seaweedfs/${clusterName}/security.toml";
+
in optionalString securityFile.enable "ln -s /etc/${securityFile.target} ${WorkingDirectory}/"
+
}
+
+
# TODO replace find usage with statically known condition
+
find -L /etc/${ConfigurationDirectory} -type f -exec ln -s '{}' ${WorkingDirectory}/ \;
+
+
${optionalString (systemdService ? preStart) systemdService.preStart}
+
'';
+
serviceConfig = rec {
+
ExecStart = "${package}/bin/weed ${concatStringsSep " " logArgs} ${command} ${concatStringsSep " " args}";
+
Restart = "on-failure";
+
Type = "exec";
+
ConfigurationDirectory = "seaweedfs/${clusterName}/${instanceName}";
+
RuntimeDirectory = ConfigurationDirectory;
+
RuntimeDirectoryPreserve = "restart";
+
WorkingDirectory = "/run/${RuntimeDirectory}";
+
};
+
}) cluster.instances
+
) cfg.clusters
+
));
+
+
environment.etc =
+
(mapAttrs' (name: cluster:
+
let file = "seaweedfs/${name}/security.toml";
+
in nameValuePair file {
+
enable = config.environment.etc.${file}.text != "";
+
text = with cluster.security.grpc; toTOML (
+
(if ca == null then {} else { grpc.ca = ca; }) //
+
(if master == null then {} else { "grpc.master" = { inherit (master) cert key; }; }) //
+
(if volume == null then {} else { "grpc.volume" = { inherit (volume) cert key; }; }) //
+
(if filer == null then {} else { "grpc.filer" = { inherit (filer) cert key; }; }) //
+
(if client == null then {} else { "grpc.client" = { inherit (client) cert key; }; }) //
+
(if msgBroker == null then {} else { "grpc.msg_broker" = { inherit (msgBroker) cert key; }; })
+
);
+
}
+
) cfg.clusters) //
+
(mapAttrs'
+
(name: config: nameValuePair
+
"seaweedfs/${name}.toml"
+
{ text = toTOML config; }
+
)
+
(flattenAttrs "/" (
+
mapAttrs (clusterName: cluster:
+
flattenAttrs "/" (
+
mapAttrs
+
(instanceName: instance: instance.configs)
+
cluster.instances
+
)
+
) cfg.clusters
+
))
+
);
+
+
networking.firewall.allowedTCPPorts = let
+
modulesToPorts = extraPorts: mapAttrsToList (name: module:
+
with module;
+
optionals openFirewall (
+
[ port (port + 10000) ] ++
+
(filter (p: p != null) (extraPorts module))
+
)
+
);
+
in flatten (mapAttrsToList (clusterName: cluster:
+
modulesToPorts
+
(master: [])
+
cluster.masters ++
+
+
modulesToPorts
+
(volume: with volume; [ metricsPort volume."port.public" ])
+
cluster.volumes ++
+
+
modulesToPorts
+
(filer: with filer; [ metricsPort filer."port.readonly" s3.port])
+
cluster.filers ++
+
+
modulesToPorts
+
(webdav: [])
+
cluster.webdavs
+
) cfg.clusters);
+
};
+
}
secrets/build-token.age

This is a binary file and will not be displayed.

secrets/garage-admin-token.age

This is a binary file and will not be displayed.

secrets/garage-metrics-token.age

This is a binary file and will not be displayed.

secrets/garage-rpc-secret.age

This is a binary file and will not be displayed.

secrets/headscale-authkey.age

This is a binary file and will not be displayed.

+11
secrets/headscale-oidc-key.path
···
+
age-encryption.org/v1
+
-> ssh-ed25519 i9wBeA Gtd2ftibBF2166KCpJiJt1W9kbwrTybKx4O561e7oQw
+
3ci7PJxYqoglIml6YiyJrffteIZN0aUWDN5z4sogcfs
+
-> ssh-ed25519 du7llw zxlkrcUyO4q4CsRAYMr8vp7LzdK2E/O9fQrCi6TxYXs
+
q3xdu3He3SXg29mKS8Fv3YWt2CkENucPtPYtXmw+dx4
+
-> ssh-ed25519 YYzA7Q VQFwGeDchwrEiI3mPsNK1yGQKupTnh5jLxLhVlPbbzU
+
tsPNihdGL/2VumVXuOKRnfPw7LBlr5xKOODAKY5ROyc
+
-> ssh-ed25519 3RWqPQ YrxOoecRxIrNHq93LvFMgk2h83a0Z3UtsYeXKeQd1xo
+
lUM0BU8KTBjR13TGQj88n5BA4b9JAjZALfu9fTSmpu8
+
--- 8WCStyJ9IerfsQD3pL4ag8tnmt7hBXZxR+aCfv4BjS0
+
7�E�rY�)�GI���G�*K�b����b+��>�m�{����K�!��m�����J:���{��2/��
+9 -3
secrets/secrets.nix
···
regent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ0pU82lV9dSjkgYbdh9utZ5CDM2dPN70S5fBqN1m3Pb regent@orobas.local";
users = [ regent ];
-
valefar = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJlXq2lSfiWwRwIxsxhffW5FDGmjt0QKYN+BaikmRR71";
-
buer = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMVhjwDcO8eleSoR8a37ZGGPvkHEgV+c8SYcy07SayPB root@nixos";
-
systems = [ valefar buer ];
+
valefar = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIPu8CVFsnUxhvABEqv4+EBBOL8tva5HJFoV3hElAlD0";
+
buer = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMVhjwDcO8eleSoR8a37ZGGPvkHEgV+c8SYcy07SayPB";
+
focalor = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA518oTmTp5VG60/dBrLu7rlV1hh8muhMattoiGfmrei";
+
baal = "AAAAC3NzaC1lZDI1NTE5AAAAILdjRWunQNFeTTdnw4GaqL9G34oo4QuvrRE/jvxLdK1C";
+
systems = [ valefar buer focalor];
in
{
#"secret1.age".publicKeys = [ user1 system1 ];
"build-token.age".publicKeys = users ++ systems;
+
"garage-rpc-secret.age".publicKeys = users ++ systems;
"garage-admin-token.age".publicKeys = users ++ systems;
"garage-metrics-token.age".publicKeys = users ++ systems;
+
+
"headscale-authkey.age".publicKeys = users ++ systems;
+
"headscale-oidc-key.path".publicKeys = users ++ systems;
}