my nix configs for my servers and desktop

Compare changes

Choose any two refs to compare.

+1 -1
common/nvidia.nix
···
# supported GPUs is at:
# https://github.com/NVIDIA/open-gpu-kernel-modules#compatible-gpus
# Only available from driver 515.43.04+
-
open = false;
+
open = true;
# Enable the Nvidia settings menu,
# accessible via `nvidia-settings`.
+67
common/python-cuda-dev.nix
···
+
{
+
description = "A Nix-flake-based PyTorch development environment";
+
+
# CUDA binaries are cached by the community.
+
nixConfig = {
+
extra-substituters = [
+
"https://nix-community.cachix.org"
+
];
+
extra-trusted-public-keys = [
+
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
+
];
+
};
+
+
inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.*.tar.gz";
+
+
outputs = {
+
self,
+
nixpkgs,
+
}: let
+
supportedSystems = ["x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin"];
+
forEachSupportedSystem = f:
+
nixpkgs.lib.genAttrs supportedSystems (system:
+
f {
+
pkgs = import nixpkgs {
+
inherit system;
+
config.allowUnfree = true;
+
};
+
});
+
in {
+
devShells = forEachSupportedSystem ({pkgs}: let
+
libs = [
+
# PyTorch and Numpy depends on the following libraries.
+
pkgs.cudaPackages.cudatoolkit
+
pkgs.cudaPackages.cudnn
+
pkgs.stdenv.cc.cc.lib
+
pkgs.zlib
+
+
# PyTorch also needs to know where your local "lib/libcuda.so" lives.
+
# If you're not on NixOS, you should provide the right path (likely
+
# another one).
+
"/run/opengl-driver"
+
];
+
in {
+
default = pkgs.mkShell {
+
packages = [
+
pkgs.python312
+
pkgs.python312Packages.venvShellHook
+
];
+
+
env = {
+
CC = "${pkgs.gcc}/bin/gcc"; # For `torch.compile`.
+
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath libs;
+
};
+
+
venvDir = ".venv";
+
postVenvCreation = ''
+
# This is run only when creating the virtual environment.
+
pip install torch==2.5.1 numpy==2.2.2
+
'';
+
postShellHook = ''
+
# This is run every time you enter the devShell.
+
python3 -c "import torch; print('CUDA available' if torch.cuda.is_available() else 'CPU only')"
+
'';
+
};
+
});
+
};
+
}
+5
common/services.nix
···
zfs
nixos-generators
sqlite
+
bun
+
unzip
];
services.openssh.enable = true;
···
services.tailscale.useRoutingFeatures = "both";
services.tailscale.authKeyFile = lib.mkIf (config ? age && config.age ? secrets)
config.age.secrets."headscale-authkey".path;
+
services.tailscale.extraUpFlags = [
+
"--login-server=https://headscale.nekomimi.pet"
+
];
}
+188 -52
flake.lock
···
"systems": "systems"
},
"locked": {
-
"lastModified": 1747575206,
-
"narHash": "sha256-NwmAFuDUO/PFcgaGGr4j3ozG9Pe5hZ/ogitWhY+D81k=",
+
"lastModified": 1760836749,
+
"narHash": "sha256-wyT7Pl6tMFbFrs8Lk/TlEs81N6L+VSybPfiIgzU8lbQ=",
"owner": "ryantm",
"repo": "agenix",
-
"rev": "4835b1dc898959d8547a871ef484930675cb47f1",
+
"rev": "2f0f812f69f3eb4140157fe15e12739adf82e32a",
"type": "github"
},
"original": {
···
"nixpkgs": "nixpkgs_2"
},
"locked": {
-
"lastModified": 1750013871,
-
"narHash": "sha256-UQx3rC3QDjD/sIen51+5Juk1rqN3y/sTeMY1WinmhqQ=",
+
"lastModified": 1760953099,
+
"narHash": "sha256-sOKx2YcHa+lWEvaEOIGqLN2WWk1Wf5z6KM02tdfhMtw=",
"owner": "catppuccin",
"repo": "nix",
-
"rev": "fe78fa558d6603481c03eb03a946eadb970d1801",
+
"rev": "f5b21876888265d2fee7fb0640d1b66a1c1c6503",
"type": "github"
},
"original": {
···
"type": "github"
}
},
+
"disko": {
+
"inputs": {
+
"nixpkgs": "nixpkgs_3"
+
},
+
"locked": {
+
"lastModified": 1736864502,
+
"narHash": "sha256-ItkIZyebGvNH2dK9jVGzJHGPtb6BSWLN8Gmef16NeY0=",
+
"owner": "nix-community",
+
"repo": "disko",
+
"rev": "0141aabed359f063de7413f80d906e1d98c0c123",
+
"type": "github"
+
},
+
"original": {
+
"owner": "nix-community",
+
"ref": "v1.11.0",
+
"repo": "disko",
+
"type": "github"
+
}
+
},
+
"flake-compat": {
+
"locked": {
+
"lastModified": 1696426674,
+
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
+
"owner": "edolstra",
+
"repo": "flake-compat",
+
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
+
"type": "github"
+
},
+
"original": {
+
"owner": "edolstra",
+
"repo": "flake-compat",
+
"type": "github"
+
}
+
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
···
},
"flake-utils_3": {
"inputs": {
-
"systems": "systems_4"
+
"systems": "systems_5"
},
"locked": {
"lastModified": 1681202837,
···
]
},
"locked": {
-
"lastModified": 1750127463,
-
"narHash": "sha256-K2xFtlD3PcKAZriOE3LaBLYmVfGQu+rIF4Jr1RFYR0Q=",
+
"lastModified": 1761235135,
+
"narHash": "sha256-cux9xeceLIER1lBxUa1gMafkz7gg5ntcUmJBynWdBWI=",
"owner": "nix-community",
"repo": "home-manager",
-
"rev": "28eef8722d1af18ca13e687dbf485e1c653a0402",
+
"rev": "0adf9ba3f567da2d53af581a857aacf671aaa547",
"type": "github"
},
"original": {
···
]
},
"locked": {
-
"lastModified": 1743604125,
-
"narHash": "sha256-ZD61DNbsBt1mQbinAaaEqKaJk2RFo9R/j+eYWeGMx7A=",
+
"lastModified": 1752603129,
+
"narHash": "sha256-S+wmHhwNQ5Ru689L2Gu8n1OD6s9eU9n9mD827JNR+kw=",
"owner": "nix-community",
"repo": "home-manager",
-
"rev": "180fd43eea296e62ae68e079fcf56aba268b9a1a",
+
"rev": "e8c19a3cec2814c754f031ab3ae7316b64da085b",
"type": "github"
},
"original": {
···
"lix": {
"flake": false,
"locked": {
-
"lastModified": 1746827285,
-
"narHash": "sha256-hsFe4Tsqqg4l+FfQWphDtjC79WzNCZbEFhHI8j2KJzw=",
-
"rev": "47aad376c87e2e65967f17099277428e4b3f8e5a",
+
"lastModified": 1753223229,
+
"narHash": "sha256-tkT4aCZZE6IEmjYotOzKKa2rV3pGpH3ZREeQn7ACgdU=",
+
"rev": "7ac20fc47cf2f1b7469c7a2f379e5a3a51a6789a",
"type": "tarball",
-
"url": "https://git.lix.systems/api/v1/repos/lix-project/lix/archive/47aad376c87e2e65967f17099277428e4b3f8e5a.tar.gz?rev=47aad376c87e2e65967f17099277428e4b3f8e5a"
+
"url": "https://git.lix.systems/api/v1/repos/lix-project/lix/archive/7ac20fc47cf2f1b7469c7a2f379e5a3a51a6789a.tar.gz?rev=7ac20fc47cf2f1b7469c7a2f379e5a3a51a6789a"
},
"original": {
"type": "tarball",
-
"url": "https://git.lix.systems/lix-project/lix/archive/2.93.0.tar.gz"
+
"url": "https://git.lix.systems/lix-project/lix/archive/release-2.93.tar.gz"
}
},
"lix-module": {
···
]
},
"locked": {
-
"lastModified": 1746838955,
-
"narHash": "sha256-11R4K3iAx4tLXjUs+hQ5K90JwDABD/XHhsM9nkeS5N8=",
-
"rev": "cd2a9c028df820a83ca2807dc6c6e7abc3dfa7fc",
+
"lastModified": 1753282722,
+
"narHash": "sha256-KYMUrTV7H/RR5/HRnjV5R3rRIuBXMemyJzTLi50NFTs=",
+
"rev": "46a9e8fcfe4be72b4c7c8082ee11d2c42da1e873",
"type": "tarball",
-
"url": "https://git.lix.systems/api/v1/repos/lix-project/nixos-module/archive/cd2a9c028df820a83ca2807dc6c6e7abc3dfa7fc.tar.gz?rev=cd2a9c028df820a83ca2807dc6c6e7abc3dfa7fc"
+
"url": "https://git.lix.systems/api/v1/repos/lix-project/nixos-module/archive/46a9e8fcfe4be72b4c7c8082ee11d2c42da1e873.tar.gz?rev=46a9e8fcfe4be72b4c7c8082ee11d2c42da1e873"
},
"original": {
"type": "tarball",
-
"url": "https://git.lix.systems/lix-project/nixos-module/archive/2.93.0.tar.gz"
+
"url": "https://git.lix.systems/lix-project/nixos-module/archive/2.93.3-1.tar.gz"
}
},
"microvm": {
···
"spectrum": "spectrum"
},
"locked": {
-
"lastModified": 1750196518,
-
"narHash": "sha256-HJYnJg3TvzFZjVgYHZgH3NtwqkqKiGVCJXpZlO4Y4EE=",
+
"lastModified": 1760574296,
+
"narHash": "sha256-S3gIp6Wd9vQ2RYDxcbHM2CIYgDtogbwzSdu38WABKaQ=",
"owner": "astro",
"repo": "microvm.nix",
-
"rev": "094da86a3e68f2f0d93b654e97b5d42398ead67d",
+
"rev": "42628f7c61b02d385ce2cb1f66f9be333ac20140",
"type": "github"
},
"original": {
···
},
"nixos-hardware": {
"locked": {
-
"lastModified": 1750083401,
-
"narHash": "sha256-ynqbgIYrg7P1fAKYqe8I/PMiLABBcNDYG9YaAP/d/C4=",
+
"lastModified": 1760958188,
+
"narHash": "sha256-2m1S4jl+GEDtlt2QqeHil8Ny456dcGSKJAM7q3j/BFU=",
"owner": "nixos",
"repo": "nixos-hardware",
-
"rev": "61837d2a33ccc1582c5fabb7bf9130d39fee59ad",
+
"rev": "d6645c340ef7d821602fd2cd199e8d1eed10afbc",
"type": "github"
},
"original": {
···
},
"nixpkgs": {
"locked": {
-
"lastModified": 1745391562,
-
"narHash": "sha256-sPwcCYuiEopaafePqlG826tBhctuJsLx/mhKKM5Fmjo=",
+
"lastModified": 1754028485,
+
"narHash": "sha256-IiiXB3BDTi6UqzAZcf2S797hWEPCRZOwyNThJIYhUfk=",
"owner": "NixOS",
"repo": "nixpkgs",
-
"rev": "8a2f738d9d1f1d986b5a4cd2fd2061a7127237d7",
+
"rev": "59e69648d345d6e8fef86158c555730fa12af9de",
"type": "github"
},
"original": {
"owner": "NixOS",
-
"ref": "nixos-unstable",
+
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
+
"nixpkgs-stable": {
+
"locked": {
+
"lastModified": 1748437600,
+
"narHash": "sha256-hYKMs3ilp09anGO7xzfGs3JqEgUqFMnZ8GMAqI6/k04=",
+
"owner": "NixOS",
+
"repo": "nixpkgs",
+
"rev": "7282cb574e0607e65224d33be8241eae7cfe0979",
+
"type": "github"
+
},
+
"original": {
+
"id": "nixpkgs",
+
"ref": "nixos-25.05",
+
"type": "indirect"
+
}
+
},
+
"nixpkgs-unstable": {
+
"locked": {
+
"lastModified": 1723637854,
+
"narHash": "sha256-med8+5DSWa2UnOqtdICndjDAEjxr5D7zaIiK4pn0Q7c=",
+
"owner": "NixOS",
+
"repo": "nixpkgs",
+
"rev": "c3aa7b8938b17aebd2deecf7be0636000d62a2b9",
+
"type": "github"
+
},
+
"original": {
+
"id": "nixpkgs",
+
"ref": "nixos-unstable",
+
"type": "indirect"
+
}
+
},
"nixpkgs_2": {
"locked": {
-
"lastModified": 1744463964,
-
"narHash": "sha256-LWqduOgLHCFxiTNYi3Uj5Lgz0SR+Xhw3kr/3Xd0GPTM=",
+
"lastModified": 1760524057,
+
"narHash": "sha256-EVAqOteLBFmd7pKkb0+FIUyzTF61VKi7YmvP1tw4nEw=",
"owner": "NixOS",
"repo": "nixpkgs",
-
"rev": "2631b0b7abcea6e640ce31cd78ea58910d31e650",
+
"rev": "544961dfcce86422ba200ed9a0b00dd4b1486ec5",
"type": "github"
},
"original": {
···
},
"nixpkgs_3": {
"locked": {
-
"lastModified": 1750005367,
-
"narHash": "sha256-h/aac1dGLhS3qpaD2aZt25NdKY7b+JT0ZIP2WuGsJMU=",
+
"lastModified": 1736241350,
+
"narHash": "sha256-CHd7yhaDigUuJyDeX0SADbTM9FXfiWaeNyY34FL1wQU=",
+
"owner": "NixOS",
+
"repo": "nixpkgs",
+
"rev": "8c9fd3e564728e90829ee7dbac6edc972971cd0f",
+
"type": "github"
+
},
+
"original": {
+
"owner": "NixOS",
+
"ref": "nixpkgs-unstable",
+
"repo": "nixpkgs",
+
"type": "github"
+
}
+
},
+
"nixpkgs_4": {
+
"locked": {
+
"lastModified": 1761016216,
+
"narHash": "sha256-G/iC4t/9j/52i/nm+0/4ybBmAF4hzR8CNHC75qEhjHo=",
"owner": "nixos",
"repo": "nixpkgs",
-
"rev": "6c64dabd3aa85e0c02ef1cdcb6e1213de64baee3",
+
"rev": "481cf557888e05d3128a76f14c76397b7d7cc869",
"type": "github"
},
"original": {
···
"type": "github"
}
},
-
"nixpkgs_4": {
+
"nixpkgs_5": {
"locked": {
"lastModified": 1682134069,
"narHash": "sha256-TnI/ZXSmRxQDt2sjRYK/8j8iha4B4zP2cnQCZZ3vp7k=",
···
"type": "indirect"
}
},
+
"proxmox-nixos": {
+
"inputs": {
+
"flake-compat": "flake-compat",
+
"nixpkgs-stable": "nixpkgs-stable",
+
"nixpkgs-unstable": "nixpkgs-unstable",
+
"utils": "utils"
+
},
+
"locked": {
+
"lastModified": 1758650077,
+
"narHash": "sha256-ZeRtJimtk0Faiq7DPZEQNGipda3TaR4QXp0TAzu934Q=",
+
"owner": "SaumonNet",
+
"repo": "proxmox-nixos",
+
"rev": "ce8768f43b4374287cd8b88d8fa9c0061e749d9a",
+
"type": "github"
+
},
+
"original": {
+
"owner": "SaumonNet",
+
"repo": "proxmox-nixos",
+
"type": "github"
+
}
+
},
"root": {
"inputs": {
"agenix": "agenix",
"catppuccin": "catppuccin",
+
"disko": "disko",
"home-manager": "home-manager_2",
"lix-module": "lix-module",
"microvm": "microvm",
"nixos-hardware": "nixos-hardware",
-
"nixpkgs": "nixpkgs_3",
+
"nixpkgs": "nixpkgs_4",
+
"proxmox-nixos": "proxmox-nixos",
"vscode-server": "vscode-server",
"zen-browser": "zen-browser"
}
···
"spectrum": {
"flake": false,
"locked": {
-
"lastModified": 1746869549,
-
"narHash": "sha256-BKZ/yZO/qeLKh9YqVkKB6wJiDQJAZNN5rk5NsMImsWs=",
+
"lastModified": 1759482047,
+
"narHash": "sha256-H1wiXRQHxxPyMMlP39ce3ROKCwI5/tUn36P8x6dFiiQ=",
"ref": "refs/heads/main",
-
"rev": "d927e78530892ec8ed389e8fae5f38abee00ad87",
-
"revCount": 862,
+
"rev": "c5d5786d3dc938af0b279c542d1e43bce381b4b9",
+
"revCount": 996,
"type": "git",
"url": "https://spectrum-os.org/git/spectrum"
},
···
"type": "github"
}
},
+
"systems_5": {
+
"locked": {
+
"lastModified": 1681028828,
+
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+
"owner": "nix-systems",
+
"repo": "default",
+
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+
"type": "github"
+
},
+
"original": {
+
"owner": "nix-systems",
+
"repo": "default",
+
"type": "github"
+
}
+
},
+
"utils": {
+
"inputs": {
+
"systems": "systems_4"
+
},
+
"locked": {
+
"lastModified": 1710146030,
+
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
+
"owner": "numtide",
+
"repo": "flake-utils",
+
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
+
"type": "github"
+
},
+
"original": {
+
"owner": "numtide",
+
"repo": "flake-utils",
+
"type": "github"
+
}
+
},
"vscode-server": {
"inputs": {
"flake-utils": "flake-utils_3",
-
"nixpkgs": "nixpkgs_4"
+
"nixpkgs": "nixpkgs_5"
},
"locked": {
-
"lastModified": 1729422940,
-
"narHash": "sha256-DlvJv33ml5UTKgu4b0HauOfFIoDx6QXtbqUF3vWeRCY=",
+
"lastModified": 1753541826,
+
"narHash": "sha256-foGgZu8+bCNIGeuDqQ84jNbmKZpd+JvnrL2WlyU4tuU=",
"owner": "nix-community",
"repo": "nixos-vscode-server",
-
"rev": "8b6db451de46ecf9b4ab3d01ef76e59957ff549f",
+
"rev": "6d5f074e4811d143d44169ba4af09b20ddb6937d",
"type": "github"
},
"original": {
···
]
},
"locked": {
-
"lastModified": 1750091187,
-
"narHash": "sha256-mjAol6qR+onnZwLUdYjmuBr/tnyozUBXz75tSePVU00=",
+
"lastModified": 1761180075,
+
"narHash": "sha256-V4WLeUQ4gCGZiVihlXWBOZ/1FNcL0jM4zgTY1haJLvY=",
"owner": "0xc000022070",
"repo": "zen-browser-flake",
-
"rev": "cfdf98dac59a42e1642c533a5dbfb5bb242903b3",
+
"rev": "771a2604606905d8c0ffe3b818dc2cc5bd1405d8",
"type": "github"
},
"original": {
+40 -3
flake.nix
···
nixpkgs.url = "github:nixos/nixpkgs/nixos-25.05";
nixos-hardware.url = "github:nixos/nixos-hardware/master";
+
proxmox-nixos.url = "github:SaumonNet/proxmox-nixos";
+
lix-module = {
-
url = "https://git.lix.systems/lix-project/nixos-module/archive/2.93.0.tar.gz";
+
url = "https://git.lix.systems/lix-project/nixos-module/archive/2.93.3-1.tar.gz";
inputs.nixpkgs.follows = "nixpkgs";
};
···
};
microvm.url = "github:astro/microvm.nix";
microvm.inputs.nixpkgs.follows = "nixpkgs";
+
+
disko.url = "github:nix-community/disko/v1.11.0";
catppuccin.url = "github:catppuccin/nix";
home-manager = {
···
system = "x86_64-linux";
};
}
+
+
{ imports = builtins.attrValues nixosModules; }
];
};
···
./hosts/valefar
lix-module.nixosModules.default
vscode-server.nixosModules.default
-
microvm.nixosModules.host
+
proxmox-nixos.nixosModules.proxmox-ve
+
+
({ pkgs, lib, ... }: {
+
services.proxmox-ve = {
+
enable = true;
+
ipAddress = "10.0.0.30";
+
};
+
+
nixpkgs.overlays = [
+
proxmox-nixos.overlays.x86_64-linux
+
];
+
})
+
{ imports = builtins.attrValues nixosModules; }
];
};
···
./hosts/buer
agenix.nixosModules.default
+
+
{ imports = builtins.attrValues nixosModules; }
+
];
+
};
+
+
baal = nixpkgs.lib.nixosSystem {
+
system = "aarch64-linux";
+
specialArgs = {
+
inherit inputs;
+
system = "aarch64-linux";
+
};
+
modules = [
+
./hosts/baal
+
+
agenix.nixosModules.default
+
disko.nixosModules.disko
+
+
{ imports = builtins.attrValues nixosModules; }
+
];
};
};
};
-
}
+
}
+2 -2
home/regent/home.nix
···
height = 0;
output = [
"HDMI-A-1"
-
"DP-1"
+
"DP-3"
+
"DP-2"
];
modules-left = [
"sway/workspaces"
···
"sway/workspaces" = {
disable-scroll = true;
-
sort-by-name = true;
};
tray = {
icon-size = 13;
+58
hosts/baal/default.nix
···
+
{ config, lib, pkgs, modulesPath, inputs, ... }:
+
{
+
imports = [
+
./hardware.nix
+
./secrets.nix
+
+
../../common/system.nix
+
../../common/users.nix
+
../../common/services.nix
+
+
../../host-secrets.nix
+
];
+
+
boot = {
+
loader = {
+
systemd-boot.enable = true;
+
efi = {
+
canTouchEfiVariables = true;
+
efiSysMountPoint = "/boot";
+
};
+
};
+
initrd.systemd.enable = true;
+
};
+
+
system.stateVersion = "24.11";
+
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
+
+
systemd.targets.multi-user.enable = true;
+
+
networking = {
+
hostName = "baal";
+
hostId = "aaaaaaaa";
+
networkmanager.enable = true;
+
};
+
+
services.fail2ban = {
+
enable = true;
+
# Ban IP after 5 failures
+
maxretry = 5;
+
ignoreIP = [
+
"10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" "100.64.0.0/10"
+
];
+
bantime = "24h"; # Ban IPs for one day on the first ban
+
bantime-increment = {
+
enable = true; # Enable increment of bantime after each violation
+
multipliers = "1 2 4 8 16 32 64";
+
maxtime = "168h"; # Do not ban for more than 1 week
+
overalljails = true; # Calculate the bantime based on all the violations
+
};
+
};
+
+
virtualisation.docker = {
+
enable = true;
+
enableOnBoot = true;
+
};
+
+
documentation.enable = false;
+
}
+55
hosts/baal/hardware.nix
···
+
# Do not modify this file! It was generated by ‘nixos-generate-config’
+
# and may be overwritten by future invocations. Please make changes
+
# to /etc/nixos/configuration.nix instead.
+
{ config, lib, pkgs, modulesPath, ... }:
+
+
{
+
imports =
+
[ (modulesPath + "/profiles/qemu-guest.nix")
+
];
+
+
boot.initrd.availableKernelModules = [ "xhci_pci" "virtio_pci" "virtio_scsi" "usbhid" ];
+
boot.initrd.kernelModules = [ ];
+
boot.kernelModules = [ ];
+
boot.extraModulePackages = [ ];
+
+
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
+
# (the default) this is the recommended approach. When using systemd-networkd it's
+
# still possible to use this option, but it's recommended to use it in conjunction
+
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
+
networking.useDHCP = lib.mkDefault true;
+
# networking.interfaces.enp0s6.useDHCP = lib.mkDefault true;
+
+
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
+
+
disko.devices = {
+
disk = {
+
main = {
+
type = "disk";
+
device = "/dev/sda";
+
content = {
+
type = "gpt";
+
partitions = {
+
boot = {
+
size = "512M";
+
type = "EF00";
+
content = {
+
type = "filesystem";
+
format = "vfat";
+
mountpoint = "/boot";
+
};
+
};
+
root = {
+
size = "100%";
+
content = {
+
type = "filesystem";
+
format = "ext4";
+
mountpoint = "/";
+
};
+
};
+
};
+
};
+
};
+
};
+
};
+
}
+3
hosts/baal/secrets.nix
···
+
{
+
+
}
+31 -3
hosts/buer/default.nix
···
# CUSTOM MODULES
# =============================================================================
modules.garage.enable = true;
+
modules.seaweedfs.clusters.default = {
+
package = pkgs.seaweedfs;
+
+
masters.main = {
+
openFirewall = true;
+
ip = "fs.nkp.pet";
+
volumePreallocate = true;
+
+
defaultReplication = {
+
dataCenter = 0;
+
rack = 0;
+
server = 0;
+
};
+
};
+
};
# =============================================================================
# BOOT CONFIGURATION
···
useDHCP = false;
};
+
services.fail2ban = {
+
enable = true;
+
# Ban IP after 5 failures
+
maxretry = 5;
+
ignoreIP = [
+
"10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" "100.64.0.0/10"
+
];
+
bantime = "24h"; # Ban IPs for one day on the first ban
+
bantime-increment = {
+
enable = true; # Enable increment of bantime after each violation
+
multipliers = "1 2 4 8 16 32 64";
+
maxtime = "168h"; # Do not ban for more than 1 week
+
overalljails = true; # Calculate the bantime based on all the violations
+
};
+
};
+
# Static IP configuration via systemd-networkd
systemd.network = {
enable = true;
···
virtualisation.docker = {
enable = true;
enableOnBoot = true;
-
package = pkgs.docker.override {
-
buildGoModule = pkgs.buildGo123Module;
-
};
};
# =============================================================================
+17 -9
hosts/focalor/default.nix
···
# Hardware-specific
../../common/nvidia.nix
-
# Common secrets (commented out)
-
# ../../host-secrets.nix
+
# Common secrets
+
../../host-secrets.nix
];
+
+
services.syncthing = {
+
enable = true;
+
openDefaultPorts = true;
+
user = "regent";
+
dataDir = "/home/regent";
+
configDir = "/home/regent/.config/syncthing";
+
};
# =============================================================================
# SYSTEM CONFIGURATION
···
# =============================================================================
boot.supportedFilesystems = [ "nfs" ];
-
fileSystems."/mnt/storage" = {
+
/*fileSystems."/mnt/storage" = {
device = "valefar:/storage";
fsType = "nfs";
-
};
+
};*/
# =============================================================================
# SERVICES
···
virtualisation.docker = {
enable = true;
enableOnBoot = true;
-
package = pkgs.docker.override {
-
buildGoModule = pkgs.buildGo123Module;
-
};
};
# =============================================================================
···
# =============================================================================
environment.systemPackages = with pkgs; [
inputs.agenix.packages.x86_64-linux.default
+
prismlauncher
+
temurin-bin
+
signal-desktop
];
# =============================================================================
···
# code-server
# DHCP (disabled in favor of systemd-networkd)
-
# useDHCP = true;
+
networking.useDHCP = false;
# firewall.allowedTCPPorts = [22 80 443 2456 2457 9000 9001 9002];
-
}
+
}
+2 -2
hosts/focalor/hardware.nix
···
];
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" "uas" "usbhid" "sd_mod" ];
-
boot.initrd.kernelModules = [ "vfio" "vfio_iommu_type1" "vfio_pci" ];
+
# boot.initrd.kernelModules = [ "vfio" "vfio_iommu_type1" "vfio_pci" ];
boot.kernelModules = [ "kvm-amd" ];
boot.kernelParams = [
"amd_iommu=on"
-
"vfio-pci.ids=10de:2484,10de228b,1022:149c,15b7:5045,1dbe:5236,1022:149c"
+
# "vfio-pci.ids=10de:2484,10de228b,1022:149c,15b7:5045,1dbe:5236,1022:149c"
];
boot.extraModulePackages = [ ];
-7
hosts/focalor/vfio.nix
···
package = pkgs.qemu_kvm;
runAsRoot = true;
swtpm.enable = true;
-
ovmf = {
-
enable = true;
-
packages = [(pkgs.OVMF.override {
-
secureBoot = true;
-
tpmSupport = true;
-
}).fd];
-
};
};
hooks.qemu = {
win11 = ./scripts/vm-win11-hook.sh;
+64
hosts/valefar/backup.nix
···
+
# Do not modify this file! It was generated by ‘nixos-generate-config’
+
# and may be overwritten by future invocations. Please make changes
+
# to /etc/nixos/configuration.nix instead.
+
{ config, lib, pkgs, modulesPath, ... }:
+
+
{
+
imports =
+
[ (modulesPath + "/installer/scan/not-detected.nix")
+
];
+
+
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "mpt3sas" "sd_mod" ];
+
boot.initrd.kernelModules = [ ];
+
boot.kernelModules = [ "kvm-amd" ];
+
boot.extraModulePackages = [ ];
+
+
fileSystems."/" = {
+
device = "/dev/disk/by-uuid/17b399da-2210-4493-9ae3-c65b20b992a0";
+
fsType = "ext4";
+
};
+
+
fileSystems."/boot" =
+
{ device = "/dev/disk/by-uuid/6340-211B";
+
fsType = "vfat";
+
options = [ "fmask=0022" "dmask=0022" ];
+
};
+
+
/* fileSystems."/garage" = {
+
device = "garage";
+
fsType = "zfs";
+
};
+
+
fileSystems."/storage" = {
+
device = "storage";
+
fsType = "zfs";
+
};*/
+
+
swapDevices = [ ];
+
+
# Fan Control
+
hardware.fancontrol = {
+
enable = false;
+
config = ''
+
INTERVAL=10
+
DEVPATH=hwmon1=devices/platform/nct6775.2592 hwmon2=devices/platform/coretemp.0
+
DEVNAME=hwmon1=nct6795 hwmon2=coretemp
+
FCTEMPS=hwmon1/pwm2=hwmon2/temp1_input hwmon1/pwm3=hwmon2/temp1_input
+
FCFANS=hwmon1/pwm2=hwmon1/fan2_input hwmon1/pwm3=hwmon1/fan3_input
+
MINTEMP=hwmon1/pwm2=20 hwmon1/pwm3=20
+
MAXTEMP=hwmon1/pwm2=65 hwmon1/pwm3=60
+
MINSTART=hwmon1/pwm2=38 hwmon1/pwm3=75
+
MINSTOP=hwmon1/pwm2=28 hwmon1/pwm3=75
+
MINPWM=hwmon1/pwm2=28 hwmon1/pwm3=75
+
MAXPWM=hwmon1/pwm2=150 hwmon1/pwm3=105
+
'';
+
};
+
+
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
+
# (the default) this is the recommended approach. When using systemd-networkd it's
+
# still possible to use this option, but it's recommended to use it in conjunction
+
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
+
networking.useDHCP = lib.mkDefault true;
+
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
+
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
+
}
+34 -15
hosts/valefar/default.nix
···
# IMPORTS
# =============================================================================
imports = [
-
# Host-specific hardware
./hardware.nix
./secrets.nix
../../common/nvidia.nix
-
# Common secrets
../../host-secrets.nix
-
# Common modules shared across hosts
../../common/system.nix
../../common/users.nix
../../common/services.nix
../../common/efi.nix
-
# Hardware-specific (commented out)
-
# ../../common/nvidia.nix
+
../../common/nvidia.nix
];
# =============================================================================
···
system.stateVersion = "24.11";
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
-
# Intel microcode updates
-
hardware.cpu.intel.updateMicrocode = lib.mkDefault
+
hardware.cpu.amd.updateMicrocode = lib.mkDefault
config.hardware.enableRedistributableFirmware;
# =============================================================================
···
networking.hostId = "2a07da90";
networking.firewall.enable = false;
+
services.proxmox-ve.bridges = [ "vmbr0" ];
+
systemd.network.networks."10-lan" = {
-
matchConfig.Name = ["enp6s0" "vm-*"];
+
matchConfig.Name = ["enp6s0"];
networkConfig = {
-
Bridge = "br0";
+
Bridge = "vmbr0";
};
};
systemd.network.netdevs."br0" = {
netdevConfig = {
-
Name = "br0";
+
Name = "vmbr0";
Kind = "bridge";
};
};
systemd.network.networks."10-lan-bridge" = {
-
matchConfig.Name = "br0";
+
matchConfig.Name = "vmbr0";
networkConfig = {
Address = ["10.0.0.30/24" "2601:5c2:8400:26c0::30/64"];
Gateway = "10.0.0.1";
···
"d /storage/immich/photos 0755 immich immich -"
"Z /storage/immich 0755 immich immich -"
"d /storage/tm_share 0755 regent users"
+
"Z /garage/ 0755 garage garage -"
];
# =============================================================================
···
nodejsPackage = pkgs.nodejs_20;
};
+
services.ollama = {
+
enable = true;
+
loadModels = ["deepseek-r1:1.5b" "gemma3:12b"];
+
acceleration = "cuda";
+
};
+
+
services.open-webui.enable = true;
+
# =============================================================================
# VIRTUALIZATION
# =============================================================================
virtualisation.docker = {
enable = true;
enableOnBoot = true;
-
package = pkgs.docker.override {
-
buildGoModule = pkgs.buildGo123Module;
+
};
+
+
services.fail2ban = {
+
enable = true;
+
# Ban IP after 5 failures
+
maxretry = 5;
+
ignoreIP = [
+
"10.0.0.0/8" "172.16.0.0/12" "192.168.0.0/16" "100.64.0.0/10"
+
];
+
bantime = "24h"; # Ban IPs for one day on the first ban
+
bantime-increment = {
+
enable = true; # Enable increment of bantime after each violation
+
multipliers = "1 2 4 8 16 32 64";
+
maxtime = "168h"; # Do not ban for more than 1 week
+
overalljails = true; # Calculate the bantime based on all the violations
};
};
···
# =============================================================================
# VIRTUAL MACHINES
# =============================================================================
-
systemd.services."microvm@".after = [ "microvm-virtiofsd@%i.service" ];
+
/*systemd.services."microvm@".after = [ "microvm-virtiofsd@%i.service" ];
microvm.vms = {
gameservers = {
···
microvm.autostart = [
"gameservers"
-
];
+
];*/
}
-80
hosts/valefar/gamevm.nix
···
-
{ config, lib, pkgs, modulesPath, microvm, inputs, ... }:
-
-
{
-
# =============================================================================
-
# IMPORTS
-
# =============================================================================
-
imports = [
-
# Common modules shared across hosts
-
../../common/system.nix
-
../../common/users.nix
-
../../common/services.nix
-
];
-
-
system.stateVersion = "25.05";
-
networking.hostName = "gameservers";
-
-
virtualisation.docker = {
-
enable = true;
-
enableOnBoot = true;
-
};
-
-
systemd.network.networks."20-lan" = {
-
matchConfig.Type = "ether";
-
networkConfig = {
-
Address = [
-
"10.0.0.31/24"
-
"2601:5c2:8400:26c0::31/64"
-
];
-
Gateway = "10.0.0.1";
-
DNS = [
-
"10.0.0.210"
-
"1.1.1.1"
-
"1.0.0.1"
-
];
-
IPv6AcceptRA = true;
-
DHCP = "no";
-
};
-
};
-
-
systemd.network.networks."19-docker" = {
-
matchConfig.Name = "veth*";
-
linkConfig = {
-
Unmanaged = true;
-
};
-
};
-
-
microvm = {
-
interfaces = [
-
{
-
type = "tap";
-
id = "vm-test1";
-
mac = "02:00:00:00:00:01";
-
}
-
];
-
-
shares = [
-
{
-
source = "/nix/store";
-
mountPoint = "/nix/.ro-store";
-
tag = "ro-store";
-
proto = "virtiofs";
-
}
-
{
-
source = "/etc/ssh";
-
mountPoint = "/etc/ssh";
-
tag = "ssh";
-
proto = "virtiofs";
-
}
-
{
-
source = "/home/regent/gamedata";
-
mountPoint = "/root/gamedata";
-
tag = "gamedata";
-
proto = "virtiofs";
-
}
-
];
-
-
vcpu = 4;
-
mem = 8192;
-
};
-
}
+12 -36
hosts/valefar/hardware.nix
···
[ (modulesPath + "/installer/scan/not-detected.nix")
];
-
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "mpt3sas" "sd_mod" ];
+
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "mpt3sas" "nvme" "usbhid" "uas" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
-
fileSystems."/" = {
-
device = "/dev/disk/by-uuid/17b399da-2210-4493-9ae3-c65b20b992a0";
-
fsType = "ext4";
-
};
+
fileSystems."/" =
+
{ device = "/dev/disk/by-uuid/e02d1d07-3bc8-4d1d-a301-6d589f4b4b6d";
+
fsType = "ext4";
+
};
fileSystems."/boot" =
-
{ device = "/dev/disk/by-uuid/6340-211B";
+
{ device = "/dev/disk/by-uuid/B3DE-0187";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
-
/* fileSystems."/garage" = {
-
device = "garage";
-
fsType = "zfs";
-
};
-
-
fileSystems."/storage" = {
-
device = "storage";
-
fsType = "zfs";
-
};*/
-
-
swapDevices = [ ];
-
-
# Fan Control
-
hardware.fancontrol = {
-
enable = false;
-
config = ''
-
INTERVAL=10
-
DEVPATH=hwmon1=devices/platform/nct6775.2592 hwmon2=devices/platform/coretemp.0
-
DEVNAME=hwmon1=nct6795 hwmon2=coretemp
-
FCTEMPS=hwmon1/pwm2=hwmon2/temp1_input hwmon1/pwm3=hwmon2/temp1_input
-
FCFANS=hwmon1/pwm2=hwmon1/fan2_input hwmon1/pwm3=hwmon1/fan3_input
-
MINTEMP=hwmon1/pwm2=20 hwmon1/pwm3=20
-
MAXTEMP=hwmon1/pwm2=65 hwmon1/pwm3=60
-
MINSTART=hwmon1/pwm2=38 hwmon1/pwm3=75
-
MINSTOP=hwmon1/pwm2=28 hwmon1/pwm3=75
-
MINPWM=hwmon1/pwm2=28 hwmon1/pwm3=75
-
MAXPWM=hwmon1/pwm2=150 hwmon1/pwm3=105
-
'';
-
};
+
swapDevices =
+
[ { device = "/dev/disk/by-uuid/c8f24f31-49e0-486c-9f63-1d31b2e36ce9"; }
+
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
+
# networking.interfaces.enp6s0.useDHCP = lib.mkDefault true;
+
+
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
-
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
}
+3 -2
modules/forgejo/default.nix
···
SSH_LISTEN_PORT = 2222;
SSH_PORT = 2222;
START_SSH_SERVER = true;
+
SSH_DOMAIN = "sgit.nekomimi.pet";
};
-
# service.DISABLE_REGISTRATION = true;
+
service.DISABLE_REGISTRATION = true;
actions = {
ENABLED = true;
DEFAULT_ACTIONS_URL = "github";
···
};
};
};
-
}
+
}
+4 -4
modules/garage/default.nix
···
config = mkIf cfg.enable {
services.garage = {
enable = true;
-
package = pkgs.garage;
+
package = pkgs.garage_2;
settings = {
metadata_dir = "/garage/metadata";
data_dir = "/garage/data";
db_engine = "lmdb";
-
replication_mode = "2";
+
replication_factor = 2;
rpc_bind_addr = "[::]:3901";
rpc_public_addr = "${config.networking.hostName}:3901";
rpc_secret_file = config.age.secrets."garage-rpc-secret".path;
···
metrics_token_file = config.age.secrets."garage-metrics-token".path;
};
bootstrap_peers = [
-
"d548d0c9ae9aec9e26fe0bd2ca3efe75f654fa350bad5cb02bc9aebc9850ba8f@[2a04:52c0:135:48d1::2]:3901" # buer
-
"5504cb25910dcef4a4312006691d651c099cde7c3a88df9ca79aa350571e6e65@[2601:5c2:8400:26c0:4ecc:6aff:fef7:98ca]:3901" #valefar
+
"d548d0c9ae9aec9e26fe0bd2ca3efe75f654fa350bad5cb02bc9aebc9850ba8f@[buer]:3901"
+
"5504cb25910dcef4a4312006691d651c099cde7c3a88df9ca79aa350571e6e65@[valefar]:3901"
];
};
};
+229
modules/headscale/default.nix
···
+
{ config, lib, pkgs, ... }:
+
+
with lib;
+
let
+
cfg = config.modules.headscale;
+
in
+
{
+
options = {
+
modules = {
+
headscale = {
+
enable = mkEnableOption "Deploy headscale";
+
+
oidcClientSecretPath = mkOption {
+
type = types.str;
+
default = "/etc/headscale/oidc_client_secret.key";
+
description = "Path to OIDC client secret file";
+
example = "config.age.secrets.headscale-oidc-key.path";
+
};
+
+
litestream = {
+
enable = mkEnableOption "Enable litestream for headscale database backups";
+
+
replicas = mkOption {
+
type = types.listOf (types.attrsOf types.anything);
+
default = [];
+
description = "List of litestream replica configurations";
+
example = [
+
{
+
url = "s3://your-backup-bucket/headscale/db";
+
access-key-id = "$LITESTREAM_ACCESS_KEY_ID";
+
secret-access-key = "$LITESTREAM_SECRET_ACCESS_KEY";
+
region = "us-east-1";
+
}
+
];
+
};
+
+
backupPath = mkOption {
+
type = types.nullOr types.str;
+
default = null;
+
description = "Local backup path (alternative to S3)";
+
example = "/backup/headscale";
+
};
+
+
syncInterval = mkOption {
+
type = types.str;
+
default = "1s";
+
description = "How often to sync to replicas";
+
};
+
+
retention = mkOption {
+
type = types.str;
+
default = "72h";
+
description = "How long to retain snapshots";
+
};
+
+
environmentFile = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = "Environment file containing S3 credentials (can be agenix secret)";
+
example = "config.age.secrets.litestream-env.path";
+
};
+
};
+
};
+
};
+
};
+
+
config = mkIf cfg.enable {
+
services.headscale = {
+
enable = true;
+
address = "0.0.0.0";
+
port = 8080;
+
+
settings = {
+
server_url = "https://headscale.nekomimi.pet";
+
+
# Metrics and gRPC
+
metrics_listen_addr = "127.0.0.1:9090";
+
grpc_listen_addr = "127.0.0.1:50443";
+
grpc_allow_insecure = false;
+
+
# Prefixes
+
prefixes = {
+
v4 = "100.64.0.0/10";
+
v6 = "fd7a:115c:a1e0::/48";
+
allocation = "sequential";
+
};
+
+
# Database
+
database = {
+
type = "sqlite";
+
sqlite = {
+
path = "/var/lib/headscale/db.sqlite";
+
write_ahead_log = true;
+
};
+
};
+
+
# Noise
+
noise = {
+
private_key_path = "/var/lib/headscale/noise_private.key";
+
};
+
+
# DERP
+
derp = {
+
urls = [
+
"https://controlplane.tailscale.com/derpmap/default"
+
];
+
paths = [];
+
auto_update_enabled = true;
+
update_frequency = "24h";
+
server = {
+
enabled = false;
+
region_id = 999;
+
region_code = "headscale";
+
region_name = "Headscale Embedded DERP";
+
stun_listen_addr = "0.0.0.0:3478";
+
private_key_path = "/var/lib/headscale/derp_server_private.key";
+
automatically_add_embedded_derp_region = true;
+
ipv4 = "1.2.3.4";
+
ipv6 = "2001:db8::1";
+
};
+
};
+
+
# DNS
+
dns = {
+
magic_dns = true;
+
base_domain = "dns.sharkgirl.pet";
+
nameservers = {
+
global = [
+
"100.64.0.7"
+
"1.1.1.1"
+
"1.0.0.1"
+
"2606:4700:4700::1111"
+
"2606:4700:4700::1001"
+
];
+
};
+
search_domains = [];
+
};
+
+
# OIDC with configurable secret path
+
oidc = {
+
only_start_if_oidc_is_available = true;
+
issuer = "https://pocketid.nekomimi.pet";
+
client_id = "f345acad-3eac-45b7-9d91-57f388987a57";
+
client_secret_path = cfg.oidcClientSecretPath;
+
pkce = {
+
enabled = true;
+
method = "S256";
+
};
+
};
+
+
# Policy
+
policy = {
+
mode = "database";
+
};
+
+
# TLS/ACME
+
acme_url = "https://acme-v02.api.letsencrypt.org/directory";
+
acme_email = "";
+
tls_letsencrypt_hostname = "";
+
tls_letsencrypt_cache_dir = "/var/lib/headscale/cache";
+
tls_letsencrypt_challenge_type = "HTTP-01";
+
tls_letsencrypt_listen = ":http";
+
tls_cert_path = "";
+
tls_key_path = "";
+
+
# Logging
+
log = {
+
format = "text";
+
level = "info";
+
};
+
+
# Misc settings
+
disable_check_updates = false;
+
ephemeral_node_inactivity_timeout = "30m";
+
unix_socket = "/var/run/headscale/headscale.sock";
+
unix_socket_permission = "0770";
+
logtail = {
+
enabled = false;
+
};
+
randomize_client_port = false;
+
};
+
};
+
+
# Configurable Litestream for SQLite database backups
+
services.litestream = mkIf cfg.litestream.enable {
+
enable = true;
+
settings = {
+
dbs = [
+
{
+
path = "/var/lib/headscale/db.sqlite";
+
sync-interval = cfg.litestream.syncInterval;
+
retention = cfg.litestream.retention;
+
replicas =
+
# Use custom replicas if provided
+
if cfg.litestream.replicas != [] then
+
cfg.litestream.replicas
+
# Otherwise use local backup if path is provided
+
else if cfg.litestream.backupPath != null then
+
[{ path = cfg.litestream.backupPath; }]
+
# Default empty (user must configure)
+
else
+
[];
+
}
+
];
+
};
+
};
+
+
# Configure systemd service to use agenix secrets
+
systemd.services.headscale.serviceConfig = mkMerge [
+
{
+
SupplementaryGroups = [ "headscale-secrets" ];
+
}
+
# Add environment file for litestream if specified
+
(mkIf (cfg.litestream.enable && cfg.litestream.environmentFile != null) {
+
EnvironmentFile = cfg.litestream.environmentFile;
+
})
+
];
+
+
# Configure litestream service with environment file if specified
+
systemd.services.litestream = mkIf (cfg.litestream.enable && cfg.litestream.environmentFile != null) {
+
serviceConfig = {
+
EnvironmentFile = cfg.litestream.environmentFile;
+
};
+
};
+
+
# Create a group for accessing secrets
+
users.groups.headscale-secrets = {};
+
};
+
}
+858
modules/seaweedfs/default.nix
···
+
/*https://hg.sr.ht/~dermetfan/seaweedfs-nixos/browse/seaweedfs.nix?rev=tip*/
+
+
{ config, lib, pkgs, ... }:
+
+
with lib;
+
+
let
+
cfg = config.modules.seaweedfs;
+
+
clusterModule = cluster: {
+
options = {
+
package = mkOption {
+
type = types.package;
+
default = pkgs.seaweedfs;
+
};
+
+
security.grpc = let
+
auth = mkOption {
+
type = with types; nullOr (submodule {
+
options = {
+
cert = mkOption { type = path; };
+
key = mkOption { type = path; };
+
};
+
});
+
default = null;
+
};
+
in {
+
ca = mkOption {
+
type = with types; nullOr str;
+
default = null;
+
};
+
+
master = auth;
+
volume = auth;
+
filer = auth;
+
client = auth;
+
msgBroker = auth;
+
};
+
+
masters = mkOption {
+
type = with types; attrsOf (submodule (masterModule cluster.config));
+
default = {};
+
description = "SeaweedFS masters";
+
};
+
+
volumes = mkOption {
+
type = with types; attrsOf (submodule (volumeModule cluster.config));
+
default = {};
+
description = "SeaweedFS volumes";
+
};
+
+
filers = mkOption {
+
type = with types; attrsOf (submodule (filerModule cluster.config));
+
default = {};
+
description = "SeaweedFS filers";
+
};
+
+
webdavs = mkOption {
+
type = with types; attrsOf (submodule (webdavModule cluster.config));
+
default = {};
+
description = "SeaweedFS WebDAV servers";
+
};
+
+
instances = mkOption {
+
type = with types; attrsOf (submodule instanceModule);
+
description = "SeaweedFS instances";
+
default =
+
mapAttrs' (name: master: nameValuePair
+
"master-${name}"
+
{
+
inherit (master) cluster configs;
+
+
command = "master";
+
+
args = with master;
+
[
+
"-port=${toString port}"
+
"-volumeSizeLimitMB=${toString volumeSizeLimitMB}"
+
] ++
+
optional (cpuprofile != "") "-cpuprofile=${cpuprofile}" ++
+
optional (defaultReplication != null) ("-defaultReplication=${defaultReplication.code}") ++
+
optional disableHttp "-disableHttp" ++
+
optional (garbageThreshold != "") "-garbageThreshold=${garbageThreshold}" ++
+
optional (ip != "") "-ip=${ip}" ++
+
optional (master."ip.bind" != "") "-ip.bind=${master."ip.bind"}" ++
+
optional (mdir != "") "-mdir=${mdir}" ++
+
optional (memprofile != "") "-memprofile=${memprofile}" ++
+
optional metrics.enable "-metrics.address=${metrics.address.text}" ++
+
optional (metrics.intervalSeconds != null) "-metrics.intervalSeconds=${toString metrics.intervalSeconds}" ++
+
optional (peers != []) ("-peers=" + (concatStringsSep "," (map (peer: peer.text) peers))) ++
+
optional resumeState "-resumeState" ++
+
optional volumePreallocate "-volumePreallocate" ++
+
optional (whiteList != []) ("-whiteList=" + (concatStringsSep "," whiteList));
+
}
+
) cluster.config.masters //
+
mapAttrs' (name: volume: nameValuePair
+
"volume-${name}"
+
{
+
inherit (volume) cluster configs;
+
+
command = "volume";
+
+
args = with volume;
+
[
+
"-port=${toString port}"
+
"-dir=${concatStringsSep "," dir}"
+
"-fileSizeLimitMB=${toString fileSizeLimitMB}"
+
"-idleTimeout=${toString idleTimeout}"
+
"-index=${index}"
+
"-minFreeSpacePercent=${toString minFreeSpacePercent}"
+
"-preStopSeconds=${toString preStopSeconds}"
+
] ++
+
optional (compactionMBps != null) ("-compactionMBps=${compactionMBps}") ++
+
optional (cpuprofile != "") "-cpuprofile=${cpuprofile}" ++
+
optional (dataCenter != "") "-dataCenter=${dataCenter}" ++
+
optional volume."images.fix.orientation" "-images.fix.orientation" ++
+
optional (ip != "") "-ip=${ip}" ++
+
optional (volume."ip.bind" != "") "-ip.bind=${volume."ip.bind"}" ++
+
optional (max != []) "-max=${concatStringsSep "," (map toString max)}" ++
+
optional (memprofile != "") "-memprofile=${memprofile}" ++
+
optional (metricsPort != null) "-metricsPort=${toString metricsPort}" ++
+
optional (mserver != []) ("-mserver=" + (concatStringsSep "," (map (mserver: mserver.text) mserver))) ++
+
optional (volume."port.public" != null) "-port.public=${toString volume."port.public"}" ++
+
optional pprof "-pprof" ++
+
optional (publicUrl != "") "-publicUrl=${publicUrl}" ++
+
optional (rack != "") "-rack=${rack}" ++
+
optional (!volume."read.redirect") "-read.redirect=false" ++
+
optional (whiteList != []) ("-whiteList=" + (concatStringsSep "," whiteList));
+
+
systemdService.preStart = "mkdir -p ${concatStringsSep " " volume.dir}";
+
}
+
) cluster.config.volumes //
+
mapAttrs' (name: filer: nameValuePair
+
"filer-${name}"
+
{
+
inherit (filer) cluster configs;
+
+
command = "filer";
+
+
args = with filer;
+
[
+
"-port=${toString port}"
+
"-dirListLimit=${toString dirListLimit}"
+
"-maxMB=${toString maxMB}"
+
] ++
+
optional (collection != "") "-collection=${collection}" ++
+
optional (dataCenter != "") "-dataCenter=${dataCenter}" ++
+
optional (defaultReplicaPlacement != null) ("-defaultReplicaPlacement=${defaultReplicaPlacement.code}") ++
+
optional disableDirListing "-disableDirListing" ++
+
optional disableHttp "-disableHttp" ++
+
optional encryptVolumeData "-encryptVolumeData" ++
+
optional (ip != "") "-ip=${ip}" ++
+
optional (filer."ip.bind" != "") "-ip.bind=${filer."ip.bind"}" ++
+
optional (master != []) ("-master=" + (concatStringsSep "," (map (master: master.text) master))) ++
+
optional (metricsPort != null) "-metricsPort=${toString metricsPort}" ++
+
optional (peers != []) ("-peers=" + (concatStringsSep "," (map (peer: peer.text) peers))) ++
+
optional (filer."port.readonly" != null) "-port.readonly=${toString filer."port.readonly"}" ++
+
optional (rack != "") "-rack=${rack}" ++
+
optionals s3.enable [
+
"-s3"
+
"-s3.port=${toString filer.s3.port}"
+
] ++
+
optional (s3.enable && s3."cert.file" != "") "-s3.cert.file=${s3."cert.file"}" ++
+
optional (s3.enable && s3."key.file" != "") "-s3.key.file=${s3."key.file"}" ++
+
optional (s3.enable && s3.config != "") "-s3.config=${s3.config}" ++
+
optional (s3.enable && s3.domainName != []) "-s3.domainName=${concatStringsSep "," s3.domainName}";
+
+
systemdService.preStart = let
+
conf = filer.configs.filer.leveldb2 or {};
+
in optionalString (conf ? "dir") "mkdir -p ${conf.dir}";
+
}
+
) cluster.config.filers //
+
mapAttrs' (name: webdav: nameValuePair
+
"webdav-${name}"
+
{
+
inherit (webdav) cluster;
+
+
command = "webdav";
+
+
args = with webdav;
+
[
+
"-port=${toString port}"
+
"-filer=${filer.text}"
+
"-cacheCapacityMB=${toString cacheCapacityMB}"
+
] ++
+
optional (collection != "") "-collection=${collection}" ++
+
optional (cacheDir != "") "-cacheDir=${cacheDir}";
+
}
+
) cluster.config.webdavs;
+
};
+
};
+
};
+
+
commonModule = cluster: common: {
+
options = {
+
cluster = mkOption {
+
type = types.submodule clusterModule;
+
internal = true;
+
};
+
+
openFirewall = mkEnableOption "open the firewall";
+
};
+
+
config = { inherit cluster; };
+
};
+
+
masterModule = cluster: master: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default.master.maintenance = {
+
scripts = ''
+
ec.encode -fullPercent=95 -quietFor=1h
+
ec.rebuild -force
+
ec.balance -force
+
volume.balance -force
+
volume.fix.replication
+
'';
+
sleep_minutes = 17;
+
};
+
};
+
+
cpuprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
defaultReplication = mkOption {
+
type = types.submodule replicationModule;
+
default = {};
+
};
+
+
disableHttp = mkEnableOption "disable HTTP requests, gRPC only";
+
+
garbageThreshold = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
ip = mkOption {
+
type = types.str;
+
default = config.networking.hostName;
+
};
+
+
"ip.bind" = mkOption {
+
type = types.str;
+
default = "0.0.0.0";
+
};
+
+
mdir = mkOption {
+
type = types.str;
+
default = ".";
+
};
+
+
memprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
metrics = {
+
enable = mkEnableOption "Prometheus";
+
+
address = mkOption {
+
type = types.submodule ipPortModule;
+
default = {};
+
};
+
+
intervalSeconds = mkOption {
+
type = types.ints.unsigned;
+
default = 15;
+
};
+
};
+
+
peers = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort master.config.cluster.masters;
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 9333;
+
};
+
+
resumeState = mkEnableOption "resume previous state on master server";
+
+
volumePreallocate = mkEnableOption "preallocate disk space for volumes";
+
+
volumeSizeLimitMB = mkOption {
+
type = types.ints.unsigned;
+
default = 30000;
+
};
+
+
whiteList = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
};
+
};
+
+
volumeModule = cluster: volume: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default = {};
+
};
+
+
compactionMBps = mkOption {
+
type = with types; nullOr ints.unsigned;
+
default = null;
+
};
+
+
cpuprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
dataCenter = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
dir = mkOption {
+
type = with types; listOf str;
+
default = [ "/var/lib/seaweedfs/${cluster._module.args.name}/volume-${volume.config._module.args.name}" ];
+
};
+
+
fileSizeLimitMB = mkOption {
+
type = types.ints.unsigned;
+
default = 256;
+
};
+
+
idleTimeout = mkOption{
+
type = types.ints.unsigned;
+
default = 30;
+
};
+
+
"images.fix.orientation" = mkEnableOption "adjustment of jpg orientation when uploading";
+
+
index = mkOption {
+
type = types.enum [
+
"memory"
+
"leveldb"
+
"leveldbMedium"
+
"leveldbLarge"
+
];
+
default = "memory";
+
};
+
+
ip = mkOption {
+
type = types.str;
+
default = config.networking.hostName;
+
};
+
+
"ip.bind" = mkOption {
+
type = types.str;
+
default = "0.0.0.0";
+
};
+
+
max = mkOption {
+
type = with types; listOf ints.unsigned;
+
default = [ 8 ];
+
};
+
+
memprofile = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
metricsPort = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
minFreeSpacePercent = mkOption {
+
type = types.ints.unsigned;
+
default = 1;
+
};
+
+
mserver = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort volume.config.cluster.masters;
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8080;
+
};
+
+
"port.public" = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
pprof = mkEnableOption "pprof http handlers. precludes -memprofile and -cpuprofile";
+
+
preStopSeconds = mkOption {
+
type = types.int;
+
default = 10;
+
};
+
+
publicUrl = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
rack = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
"read.redirect" = mkOption {
+
type = types.bool;
+
default = true;
+
};
+
+
whiteList = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
};
+
};
+
+
filerModule = cluster: filer: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default.filer.leveldb2 = {
+
enabled = true;
+
dir = "/var/lib/seaweedfs/${cluster._module.args.name}/filer-${filer.config._module.args.name}/filerldb2";
+
};
+
};
+
+
collection = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
dataCenter = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
defaultReplicaPlacement = mkOption {
+
type = with types; nullOr (submodule replicationModule);
+
default = null;
+
};
+
+
dirListLimit = mkOption {
+
type = types.ints.unsigned;
+
default = 100000;
+
};
+
+
disableDirListing = mkEnableOption "turn off directory listing";
+
+
disableHttp = mkEnableOption "disable http request, only gRpc operations are allowed";
+
+
encryptVolumeData = mkEnableOption "encrypt data on volume servers";
+
+
ip = mkOption {
+
type = types.str;
+
default = config.networking.hostName;
+
};
+
+
"ip.bind" = mkOption {
+
type = types.str;
+
default = "0.0.0.0";
+
};
+
+
master = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort filer.config.cluster.masters;
+
};
+
+
maxMB = mkOption {
+
type = types.ints.unsigned;
+
default = 32;
+
};
+
+
metricsPort = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
peers = mkOption {
+
type = peersType;
+
default = mapAttrsIpPort filer.config.cluster.filers;
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8888;
+
};
+
+
"port.readonly" = mkOption {
+
type = with types; nullOr port;
+
default = null;
+
};
+
+
rack = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
s3 = {
+
enable = mkEnableOption "whether to start S3 gateway";
+
+
"cert.file" = mkOption {
+
type = types.path;
+
default = "";
+
};
+
+
config = mkOption {
+
type = types.path;
+
default = "";
+
};
+
+
domainName = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
+
"key.file" = mkOption {
+
type = types.path;
+
default = "";
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8333;
+
};
+
};
+
};
+
};
+
+
webdavModule = cluster: webdav: {
+
imports = [ (commonModule cluster) ];
+
+
options = {
+
cacheCapacityMB = mkOption {
+
type = types.int;
+
default = 1000;
+
};
+
+
cacheDir = mkOption {
+
type = types.str;
+
default = ".";
+
};
+
+
collection = mkOption {
+
type = types.str;
+
default = "";
+
};
+
+
filer = mkOption {
+
type = types.submodule ipPortModule;
+
default = {
+
ip = "127.0.0.1";
+
port = 8888;
+
};
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 7333;
+
};
+
};
+
};
+
+
instanceModule = instance: {
+
options = {
+
cluster = mkOption {
+
type = types.submodule clusterModule;
+
internal = true;
+
};
+
+
command = mkOption {
+
type = types.enum [
+
"server"
+
"master"
+
"volume"
+
"mount"
+
"filer"
+
"filer.replicate"
+
"filer.sync"
+
"s3"
+
"msgBroker"
+
"watch"
+
"webdav"
+
];
+
};
+
+
logArgs = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
+
args = mkOption {
+
type = with types; listOf str;
+
default = [];
+
};
+
+
configs = mkOption {
+
type = with types; attrsOf attrs;
+
default = {};
+
};
+
+
package = mkOption {
+
type = types.package;
+
default = instance.config.cluster.package;
+
};
+
+
systemdService = mkOption {
+
type = types.attrs;
+
default = {};
+
};
+
};
+
+
config = {
+
logArgs = [ "-logtostderr" ];
+
+
systemdService.path = optional (instance.config.command == "mount") pkgs.fuse;
+
};
+
};
+
+
replicationModule = replication: {
+
options = {
+
dataCenter = mkOption {
+
type = types.ints.between 0 9;
+
default = 0;
+
};
+
+
rack = mkOption {
+
type = types.ints.between 0 9;
+
default = 0;
+
};
+
+
server = mkOption {
+
type = types.ints.between 0 9;
+
default = 0;
+
};
+
+
code = mkOption {
+
readOnly = true;
+
internal = true;
+
type = types.str;
+
default = with replication.config; "${toString dataCenter}${toString rack}${toString server}";
+
};
+
};
+
};
+
+
peersType = with types; listOf (submodule ipPortModule);
+
+
ipPortModule = ipPort: {
+
options = {
+
ip = mkOption {
+
type = types.str;
+
};
+
+
port = mkOption {
+
type = types.port;
+
};
+
+
text = mkOption {
+
internal = true;
+
readOnly = true;
+
type = types.str;
+
default = with ipPort.config; "${ip}:${toString port}";
+
};
+
};
+
};
+
+
mapAttrsIpPort = attrs: mapAttrsToList (name: value: { inherit (value) ip port; }) attrs;
+
+
toTOML = with generators; toINI {
+
mkKeyValue = mkKeyValueDefault {
+
mkValueString = v:
+
if isString v
+
then (
+
if hasInfix "\n" v
+
then ''
+
"""
+
${removeSuffix "\n" v}
+
"""
+
''
+
else ''"${v}"''
+
)
+
else mkValueStringDefault {} v;
+
} "=";
+
};
+
+
flattenAttrs = separator: attrs: let
+
/*
+
attrs = {
+
a = {
+
m1 = {};
+
m2 = {};
+
};
+
b = {
+
m1 = {};
+
};
+
}
+
*/
+
+
/*
+
step1 = {
+
a = [
+
{ name = "a-m1"; value = {}; }
+
{ name = "a-m2"; value = {}; }
+
];
+
b = [
+
{ name = "b-m1"; value = {}; }
+
];
+
};
+
*/
+
step1 = mapAttrs (outerName: outerValues:
+
mapAttrsToList (innerName: innerValues: nameValuePair
+
"${outerName}${separator}${innerName}"
+
innerValues
+
) outerValues
+
) attrs;
+
+
/*
+
step2 = [
+
[
+
{ name = "a-m1"; value = {}; }
+
{ name = "a-m2"; value = {}; }
+
]
+
[
+
{ name = "b-m1"; value = {}; }
+
]
+
];
+
*/
+
step2 = mapAttrsToList (name: value: value) step1;
+
+
/*
+
step3 = [
+
{ name = "a-m1"; value = {}; }
+
{ name = "a-m2"; value = {}; }
+
{ name = "b-m1"; value = {}; }
+
];
+
*/
+
step3 = flatten step2;
+
in
+
/*
+
{
+
a-m1 = {};
+
a-m2 = {};
+
b-m1 = {};
+
};
+
*/
+
builtins.listToAttrs step3;
+
in {
+
options.modules.seaweedfs = {
+
clusters = mkOption {
+
type = with types; attrsOf (submodule clusterModule);
+
default = {};
+
description = "SeaweedFS clusters";
+
};
+
};
+
+
config = {
+
systemd.services = mapAttrs'
+
(name: instance: nameValuePair "seaweedfs-${name}" instance)
+
(flattenAttrs "-" (
+
mapAttrs (clusterName: cluster:
+
mapAttrs (instanceName: instance: with instance; recursiveUpdate systemdService rec {
+
description = "SeaweedFS ${clusterName} ${instanceName}";
+
wants = [ "network.target" ];
+
after = wants;
+
wantedBy = [ "multi-user.target" ];
+
preStart = with serviceConfig; ''
+
${
+
let securityFile = config.environment.etc."seaweedfs/${clusterName}/security.toml";
+
in optionalString securityFile.enable "ln -s /etc/${securityFile.target} ${WorkingDirectory}/"
+
}
+
+
# TODO replace find usage with statically known condition
+
find -L /etc/${ConfigurationDirectory} -type f -exec ln -s '{}' ${WorkingDirectory}/ \;
+
+
${optionalString (systemdService ? preStart) systemdService.preStart}
+
'';
+
serviceConfig = rec {
+
ExecStart = "${package}/bin/weed ${concatStringsSep " " logArgs} ${command} ${concatStringsSep " " args}";
+
Restart = "on-failure";
+
Type = "exec";
+
ConfigurationDirectory = "seaweedfs/${clusterName}/${instanceName}";
+
RuntimeDirectory = ConfigurationDirectory;
+
RuntimeDirectoryPreserve = "restart";
+
WorkingDirectory = "/run/${RuntimeDirectory}";
+
};
+
}) cluster.instances
+
) cfg.clusters
+
));
+
+
environment.etc =
+
(mapAttrs' (name: cluster:
+
let file = "seaweedfs/${name}/security.toml";
+
in nameValuePair file {
+
enable = config.environment.etc.${file}.text != "";
+
text = with cluster.security.grpc; toTOML (
+
(if ca == null then {} else { grpc.ca = ca; }) //
+
(if master == null then {} else { "grpc.master" = { inherit (master) cert key; }; }) //
+
(if volume == null then {} else { "grpc.volume" = { inherit (volume) cert key; }; }) //
+
(if filer == null then {} else { "grpc.filer" = { inherit (filer) cert key; }; }) //
+
(if client == null then {} else { "grpc.client" = { inherit (client) cert key; }; }) //
+
(if msgBroker == null then {} else { "grpc.msg_broker" = { inherit (msgBroker) cert key; }; })
+
);
+
}
+
) cfg.clusters) //
+
(mapAttrs'
+
(name: config: nameValuePair
+
"seaweedfs/${name}.toml"
+
{ text = toTOML config; }
+
)
+
(flattenAttrs "/" (
+
mapAttrs (clusterName: cluster:
+
flattenAttrs "/" (
+
mapAttrs
+
(instanceName: instance: instance.configs)
+
cluster.instances
+
)
+
) cfg.clusters
+
))
+
);
+
+
networking.firewall.allowedTCPPorts = let
+
modulesToPorts = extraPorts: mapAttrsToList (name: module:
+
with module;
+
optionals openFirewall (
+
[ port (port + 10000) ] ++
+
(filter (p: p != null) (extraPorts module))
+
)
+
);
+
in flatten (mapAttrsToList (clusterName: cluster:
+
modulesToPorts
+
(master: [])
+
cluster.masters ++
+
+
modulesToPorts
+
(volume: with volume; [ metricsPort volume."port.public" ])
+
cluster.volumes ++
+
+
modulesToPorts
+
(filer: with filer; [ metricsPort filer."port.readonly" s3.port])
+
cluster.filers ++
+
+
modulesToPorts
+
(webdav: [])
+
cluster.webdavs
+
) cfg.clusters);
+
};
+
}
secrets/build-token.age

This is a binary file and will not be displayed.

+10 -10
secrets/garage-admin-token.age
···
age-encryption.org/v1
-
-> ssh-ed25519 i9wBeA 3kSlwZ54PYTcvftI/3XcTpnSDxBmKRKNMy3Xf69Psm8
-
QtqP2ebKP2M7hWcvFoT24aBkhj2Kvu1dlBbcGPyBo3k
-
-> ssh-ed25519 UbxDgg o/TFehVOAh1Mcw3TJdldANFahiFhj9UvhFoFWyXVmzk
-
suQBydY0cRZ3FrrYDizfE8CrF8YjyJXWtzpPMJ5vgqI
-
-> ssh-ed25519 YYzA7Q nbNcoC5R6CxsZKEvscezknX56mvDnw3VdG2gApHcZVY
-
AJHD+nM0OmZMX+aILK4s3x8wHI8K3O6hmb+1T7URTWc
-
-> ssh-ed25519 UbxDgg iK4ea0RJLy7UGCHe8B5g6qr7hpSMYvx3dSQ3DW47sS4
-
8dAWBDtDOY3YB0u+aEiIUTMqBQqYxV/HafosHk5Vklo
-
--- wbbBYCiJ+zOdj8bEmKLSd+vU3RMGX5nulXAcKcK4fRI
-
�U�$F�q�p��h?���x�yd��5������>���*=/=�Gi(�,�Bw{� h�r���-'X�G��9�5#/�'�!�
+
-> ssh-ed25519 i9wBeA fg1LWmMYua0wvyimvg0ACEuZW/xQMG8BmdLm88TdZTU
+
ArN7CSKr0DDQ5y1W5RSfWfgdYJ6lmVpNcqdWoWLF3lk
+
-> ssh-ed25519 du7llw CcU5ZZufG24Vhkap2aHpgnx49bbF/VFE4b1TPAtE2CA
+
oP4hZZ1vl1GBzVHH1MbHGHen9lNNzn3IKeurT+LuB58
+
-> ssh-ed25519 YYzA7Q IKh3q1rcRds4lVvHuF/wsmTXgUcY/MmAUBS6QeGMPFI
+
eTdHuOwaBq/ikNyb/D1YJMWJ0JyAMmC2aJTcw2/f5Co
+
-> ssh-ed25519 3RWqPQ 16o8nTSNZyZZpedt5wwzFqU+p0GptbXK4n5s+PIsrwA
+
jZqX8fYZcvLO9wgKj0jz/Jxl+KawQq0x303HiqGVeew
+
--- qCeaSJ7KVru9VSYN04w845ZdqO0ELrPJ3JpgElzfMfk
+
�����Q�'�t[��;�F���IȆ#�`#;Oi��r�� Ka���&'m�5n>�1�: ��i����q&,k
+10 -10
secrets/garage-metrics-token.age
···
age-encryption.org/v1
-
-> ssh-ed25519 i9wBeA fIaj6vvNiIxYr9vRBmytSawuZoPv2bPg1HHwnFY1rEw
-
rMtOdDZGGSCwpQADwz2sHZ9FQyd+DzJiHhkF7mMDwK8
-
-> ssh-ed25519 UbxDgg mvKuVDcA4cErPHhyvGywtqKwEEMN3mgll7hb99Of+z0
-
qIin8xtByFIx/3o9PWEXttuDJ6QuNyDhtqR1DJ2WzQc
-
-> ssh-ed25519 YYzA7Q HbD22umTExyRT0BUbOf64Flg3sFuuyD6Oj2pI1gI/nk
-
IXwpzbLeCU9FzZ2bc3+iNWWK9UjqO3hHzUod47mUCho
-
-> ssh-ed25519 UbxDgg H1S52sDX6YXv53ldwrFgJUhFdh0VhnwBhiOUVxMJcls
-
jSEgl4VJBwp4R5iklLhs8aXFewWQqayPceBx5bxaTd8
-
--- QaGGtqgTi9qeSoWRRJcj6HLecoJ05D0vjTCiEXMeS+w
-
PTUd�G� �qP�lL�n��X�MQ�;�"�w�s0B��s��`)R������d�Q�ha�pD���^1�E��m]ڲ
+
-> ssh-ed25519 i9wBeA a4gBmZE+RpBZyTpERIEoaU/ZUqqTvZRicqS/evUgvWk
+
Le/fvk2QEG9SQfl8s2cgT585wpb2xvvIDqMCaP9JHow
+
-> ssh-ed25519 du7llw CuD1HwdLd0hxXgmJxWJXO/YmUuwaEq1kLhnWeDa/oQ0
+
R8cjKQLF+cAJpVr9660EhhbwXtD5OIH+6CG93+p0rBs
+
-> ssh-ed25519 YYzA7Q yySJ88QwwfiXnJ8IqxnycRAUrBMSjudSbNgIVbwDLUo
+
tQFPx6Ri1nMI9SI+eD+dYnTXBNLPKJLtUd2McBc1d9w
+
-> ssh-ed25519 3RWqPQ EpQFJb+dq5YGJ7oNaRHJUj8yU/Uash0FXibCZWXjVUE
+
pW52gB+WHwZk9qSSHxzdCzOsbhI9cdLvEBQMt4ARjiM
+
--- tNQpOQN20gjxqJzJjPCGNUQbJEZgOLoxyXbNao8cS1Q
+
x*�$��)����S�پ���s1�m/�4�J��կ��� �X�E 2�NlƟދ gȦ �|���{��H��
+10 -11
secrets/garage-rpc-secret.age
···
age-encryption.org/v1
-
-> ssh-ed25519 i9wBeA AtZIxAsM2lbP4hpZ5RjMkdVN3Ko4IVciNLsI+2ioh10
-
HA5dZPJeO0RJpQVcXSTXl9Bzah55Md+UPldiz9NkzFM
-
-> ssh-ed25519 UbxDgg IT0sHTltNKJqDnBdJXcBa3D8LO0rWY1ff2yursA0zl4
-
jKE3Y2zOL92q82mcmdwZ1zi9AyaGLF1i9kl1+gegb7o
-
-> ssh-ed25519 YYzA7Q DNlwj0lNOmQukfavVMyUxAJtNTpqKUobCu7stzrU/lg
-
Qxu4ITmiPfhDUnMdfBQPVEJi8AkZ3wCpKsfMlkWKoNs
-
-> ssh-ed25519 UbxDgg bSJTVJtj9b4hb8/MFyWry79pez8xa2+lXgufBHinwz4
-
lFnBfg1BZ9Kzb6wcYqbR0km9jRvSuK/fyhV5H508s24
-
--- RhUMM66NH9bAUKHjMiJdQXV96SBap8hKKGayMaZ578c
-
b�p�қU}�@�=5«�����Φ=���
-
T�-�� *��M����ɳ��5ы���"�`1[��_rt��jU]�&g���ՠ�P�q���C8��
+
-> ssh-ed25519 i9wBeA 8jCNk6EVR5xRMZm8xcDU6+HbM6bPbOuJD0hkfD0z8V0
+
35OKqytIzelEGkCCZ6wZ/JntcKhkYnrXqC1tjcWem/Q
+
-> ssh-ed25519 du7llw +CSkWwDcxadmXUazTQ3ilexC9D/tDLGqq/JLmuXisSQ
+
o7LiFJDX39Jf9rVNNB3tWr59enehKGT8YE4IrH75754
+
-> ssh-ed25519 YYzA7Q gVw+pxR9ZjhIztMwv+qmVf9r24AaMc3PZoIwS8+aI3I
+
qQhalCXRuNsSeMgRaiPN5ho8eLza2TInJTNkHKecVGM
+
-> ssh-ed25519 3RWqPQ iCzv+xB1+FWrdeQJC9BUrPSjFifkNobbnAvnR1qQWBg
+
xGayQTVaxOJdA+dJPZmm8MOnvOScfrDbqcanq/FidTk
+
--- JmDhgmtt40ySOQuJOTsJf7CqE9duhGodJNxMV3SJmic
+
�W�E����e�R�`cY ��\^Ϡ �W��I��`~8�х(=nH�k0�����m~P��,�S���kb}&@�QR_�P36S)����W��C�3�
secrets/headscale-authkey.age

This is a binary file and will not be displayed.

+11
secrets/headscale-oidc-key.path
···
+
age-encryption.org/v1
+
-> ssh-ed25519 i9wBeA Gtd2ftibBF2166KCpJiJt1W9kbwrTybKx4O561e7oQw
+
3ci7PJxYqoglIml6YiyJrffteIZN0aUWDN5z4sogcfs
+
-> ssh-ed25519 du7llw zxlkrcUyO4q4CsRAYMr8vp7LzdK2E/O9fQrCi6TxYXs
+
q3xdu3He3SXg29mKS8Fv3YWt2CkENucPtPYtXmw+dx4
+
-> ssh-ed25519 YYzA7Q VQFwGeDchwrEiI3mPsNK1yGQKupTnh5jLxLhVlPbbzU
+
tsPNihdGL/2VumVXuOKRnfPw7LBlr5xKOODAKY5ROyc
+
-> ssh-ed25519 3RWqPQ YrxOoecRxIrNHq93LvFMgk2h83a0Z3UtsYeXKeQd1xo
+
lUM0BU8KTBjR13TGQj88n5BA4b9JAjZALfu9fTSmpu8
+
--- 8WCStyJ9IerfsQD3pL4ag8tnmt7hBXZxR+aCfv4BjS0
+
7�E�rY�)�GI���G�*K�b����b+��>�m�{����K�!��m�����J:���{��2/��
+4 -2
secrets/secrets.nix
···
regent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ0pU82lV9dSjkgYbdh9utZ5CDM2dPN70S5fBqN1m3Pb regent@orobas.local";
users = [ regent ];
-
valefar = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJlXq2lSfiWwRwIxsxhffW5FDGmjt0QKYN+BaikmRR71";
+
valefar = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIPu8CVFsnUxhvABEqv4+EBBOL8tva5HJFoV3hElAlD0";
buer = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMVhjwDcO8eleSoR8a37ZGGPvkHEgV+c8SYcy07SayPB";
-
focalor = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJlXq2lSfiWwRwIxsxhffW5FDGmjt0QKYN+BaikmRR71";
+
focalor = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA518oTmTp5VG60/dBrLu7rlV1hh8muhMattoiGfmrei";
+
baal = "AAAAC3NzaC1lZDI1NTE5AAAAILdjRWunQNFeTTdnw4GaqL9G34oo4QuvrRE/jvxLdK1C";
systems = [ valefar buer focalor];
in
{
···
"garage-metrics-token.age".publicKeys = users ++ systems;
"headscale-authkey.age".publicKeys = users ++ systems;
+
"headscale-oidc-key.path".publicKeys = users ++ systems;
}