Merge pull request #262347 from adamcstephens/incus/module

nixos/incus: init module and tests

Changed files
+482 -6
nixos
pkgs
by-name
co
cowsql
in
incus
incus-unwrapped
ra
raft-cowsql
+1
nixos/modules/module-list.nix
···
./virtualisation/docker.nix
./virtualisation/ecs-agent.nix
./virtualisation/hyperv-guest.nix
./virtualisation/kvmgt.nix
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix
···
./virtualisation/docker.nix
./virtualisation/ecs-agent.nix
./virtualisation/hyperv-guest.nix
+
./virtualisation/incus.nix
./virtualisation/kvmgt.nix
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix
+236
nixos/modules/virtualisation/incus.nix
···
···
+
{ config, lib, pkgs, ... }:
+
+
let
+
cfg = config.virtualisation.incus;
+
preseedFormat = pkgs.formats.yaml { };
+
in
+
{
+
meta.maintainers = [ lib.maintainers.adamcstephens ];
+
+
options = {
+
virtualisation.incus = {
+
enable = lib.mkEnableOption (lib.mdDoc ''
+
incusd, a daemon that manages containers and virtual machines.
+
+
Users in the "incus-admin" group can interact with
+
the daemon (e.g. to start or stop containers) using the
+
{command}`incus` command line tool, among others.
+
'');
+
+
package = lib.mkPackageOptionMD pkgs "incus" { };
+
+
lxcPackage = lib.mkPackageOptionMD pkgs "lxc" { };
+
+
preseed = lib.mkOption {
+
type = lib.types.nullOr (
+
lib.types.submodule { freeformType = preseedFormat.type; }
+
);
+
+
default = null;
+
+
description = lib.mdDoc ''
+
Configuration for Incus preseed, see
+
<https://linuxcontainers.org/incus/docs/main/howto/initialize/#non-interactive-configuration>
+
for supported values.
+
+
Changes to this will be re-applied to Incus which will overwrite existing entities or create missing ones,
+
but entities will *not* be removed by preseed.
+
'';
+
+
example = {
+
networks = [
+
{
+
name = "incusbr0";
+
type = "bridge";
+
config = {
+
"ipv4.address" = "10.0.100.1/24";
+
"ipv4.nat" = "true";
+
};
+
}
+
];
+
profiles = [
+
{
+
name = "default";
+
devices = {
+
eth0 = {
+
name = "eth0";
+
network = "incusbr0";
+
type = "nic";
+
};
+
root = {
+
path = "/";
+
pool = "default";
+
size = "35GiB";
+
type = "disk";
+
};
+
};
+
}
+
];
+
storage_pools = [
+
{
+
name = "default";
+
driver = "dir";
+
config = {
+
source = "/var/lib/incus/storage-pools/default";
+
};
+
}
+
];
+
};
+
};
+
+
socketActivation = lib.mkEnableOption (
+
lib.mdDoc ''
+
socket-activation for starting incus.service. Enabling this option
+
will stop incus.service from starting automatically on boot.
+
''
+
);
+
+
startTimeout = lib.mkOption {
+
type = lib.types.ints.unsigned;
+
default = 600;
+
apply = toString;
+
description = lib.mdDoc ''
+
Time to wait (in seconds) for incusd to become ready to process requests.
+
If incusd does not reply within the configured time, `incus.service` will be
+
considered failed and systemd will attempt to restart it.
+
'';
+
};
+
};
+
};
+
+
config = lib.mkIf cfg.enable {
+
# https://github.com/lxc/incus/blob/f145309929f849b9951658ad2ba3b8f10cbe69d1/doc/reference/server_settings.md
+
boot.kernel.sysctl = {
+
"fs.aio-max-nr" = lib.mkDefault 524288;
+
"fs.inotify.max_queued_events" = lib.mkDefault 1048576;
+
"fs.inotify.max_user_instances" = lib.mkOverride 1050 1048576; # override in case conflict nixos/modules/services/x11/xserver.nix
+
"fs.inotify.max_user_watches" = lib.mkOverride 1050 1048576; # override in case conflict nixos/modules/services/x11/xserver.nix
+
"kernel.dmesg_restrict" = lib.mkDefault 1;
+
"kernel.keys.maxbytes" = lib.mkDefault 2000000;
+
"kernel.keys.maxkeys" = lib.mkDefault 2000;
+
"net.core.bpf_jit_limit" = lib.mkDefault 1000000000;
+
"net.ipv4.neigh.default.gc_thresh3" = lib.mkDefault 8192;
+
"net.ipv6.neigh.default.gc_thresh3" = lib.mkDefault 8192;
+
# vm.max_map_count is set higher in nixos/modules/config/sysctl.nix
+
};
+
+
boot.kernelModules = [
+
"veth"
+
"xt_comment"
+
"xt_CHECKSUM"
+
"xt_MASQUERADE"
+
"vhost_vsock"
+
] ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
+
+
environment.systemPackages = [ cfg.package ];
+
+
# Note: the following options are also declared in virtualisation.lxc, but
+
# the latter can't be simply enabled to reuse the formers, because it
+
# does a bunch of unrelated things.
+
systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+
security.apparmor = {
+
packages = [ cfg.lxcPackage ];
+
policies = {
+
"bin.lxc-start".profile = ''
+
include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
+
'';
+
"lxc-containers".profile = ''
+
include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
+
'';
+
};
+
};
+
+
systemd.services.incus = {
+
description = "Incus Container and Virtual Machine Management Daemon";
+
+
wantedBy = lib.mkIf (!cfg.socketActivation) [ "multi-user.target" ];
+
after = [
+
"network-online.target"
+
"lxcfs.service"
+
] ++ (lib.optional cfg.socketActivation "incus.socket");
+
requires = [
+
"lxcfs.service"
+
] ++ (lib.optional cfg.socketActivation "incus.socket");
+
wants = [
+
"network-online.target"
+
];
+
+
path = lib.mkIf config.boot.zfs.enabled [ config.boot.zfs.package ];
+
+
environment = {
+
# Override Path to the LXC template configuration directory
+
INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
+
};
+
+
serviceConfig = {
+
ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
+
ExecStartPost = "${cfg.package}/bin/incusd waitready --timeout=${cfg.startTimeout}";
+
ExecStop = "${cfg.package}/bin/incus admin shutdown";
+
+
KillMode = "process"; # when stopping, leave the containers alone
+
Delegate = "yes";
+
LimitMEMLOCK = "infinity";
+
LimitNOFILE = "1048576";
+
LimitNPROC = "infinity";
+
TasksMax = "infinity";
+
+
Restart = "on-failure";
+
TimeoutStartSec = "${cfg.startTimeout}s";
+
TimeoutStopSec = "30s";
+
};
+
};
+
+
systemd.sockets.incus = lib.mkIf cfg.socketActivation {
+
description = "Incus UNIX socket";
+
wantedBy = [ "sockets.target" ];
+
+
socketConfig = {
+
ListenStream = "/var/lib/incus/unix.socket";
+
SocketMode = "0660";
+
SocketGroup = "incus-admin";
+
Service = "incus.service";
+
};
+
};
+
+
systemd.services.incus-preseed = lib.mkIf (cfg.preseed != null) {
+
description = "Incus initialization with preseed file";
+
+
wantedBy = ["incus.service"];
+
after = ["incus.service"];
+
bindsTo = ["incus.service"];
+
partOf = ["incus.service"];
+
+
script = ''
+
${cfg.package}/bin/incus admin init --preseed <${
+
preseedFormat.generate "incus-preseed.yaml" cfg.preseed
+
}
+
'';
+
+
serviceConfig = {
+
Type = "oneshot";
+
RemainAfterExit = true;
+
};
+
};
+
+
users.groups.incus-admin = { };
+
+
users.users.root = {
+
# match documented default ranges https://linuxcontainers.org/incus/docs/main/userns-idmap/#allowed-ranges
+
subUidRanges = [
+
{
+
startUid = 1000000;
+
count = 1000000000;
+
}
+
];
+
subGidRanges = [
+
{
+
startGid = 1000000;
+
count = 1000000000;
+
}
+
];
+
};
+
+
virtualisation.lxc.lxcfs.enable = true;
+
};
+
}
+1
nixos/tests/all-tests.nix
···
icingaweb2 = handleTest ./icingaweb2.nix {};
iftop = handleTest ./iftop.nix {};
incron = handleTest ./incron.nix {};
influxdb = handleTest ./influxdb.nix {};
influxdb2 = handleTest ./influxdb2.nix {};
initrd-network-openvpn = handleTest ./initrd-network-openvpn {};
···
icingaweb2 = handleTest ./icingaweb2.nix {};
iftop = handleTest ./iftop.nix {};
incron = handleTest ./incron.nix {};
+
incus = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; });
influxdb = handleTest ./influxdb.nix {};
influxdb2 = handleTest ./influxdb2.nix {};
initrd-network-openvpn = handleTest ./initrd-network-openvpn {};
+77
nixos/tests/incus/container.nix
···
···
+
import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+
let
+
releases = import ../../release.nix {
+
configuration = {
+
# Building documentation makes the test unnecessarily take a longer time:
+
documentation.enable = lib.mkForce false;
+
};
+
};
+
+
container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+
container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+
in
+
{
+
name = "incus-container";
+
+
meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+
nodes.machine = { ... }: {
+
virtualisation = {
+
# Ensure test VM has enough resources for creating and managing guests
+
cores = 2;
+
memorySize = 1024;
+
diskSize = 4096;
+
+
incus.enable = true;
+
};
+
};
+
+
testScript = ''
+
def instance_is_up(_) -> bool:
+
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+
return status == 0
+
+
def set_container(config):
+
machine.succeed(f"incus config set container {config}")
+
machine.succeed("incus restart container")
+
with machine.nested("Waiting for instance to start and be usable"):
+
retry(instance_is_up)
+
+
machine.wait_for_unit("incus.service")
+
+
# no preseed should mean no service
+
machine.fail("systemctl status incus-preseed.service")
+
+
machine.succeed("incus admin init --minimal")
+
+
with subtest("Container image can be imported"):
+
machine.succeed("incus image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
+
+
with subtest("Container can be launched and managed"):
+
machine.succeed("incus launch nixos container")
+
with machine.nested("Waiting for instance to start and be usable"):
+
retry(instance_is_up)
+
machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
+
+
with subtest("Container CPU limits can be managed"):
+
set_container("limits.cpu 1")
+
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
+
assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
+
+
set_container("limits.cpu 2")
+
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
+
assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
+
+
with subtest("Container memory limits can be managed"):
+
set_container("limits.memory 64MB")
+
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
+
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
+
assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
+
+
set_container("limits.memory 128MB")
+
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
+
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
+
assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
+
'';
+
})
+14
nixos/tests/incus/default.nix
···
···
+
{
+
system ? builtins.currentSystem,
+
config ? { },
+
pkgs ? import ../../.. { inherit system config; },
+
handleTestOn,
+
}:
+
{
+
container = import ./container.nix { inherit system pkgs; };
+
preseed = import ./preseed.nix { inherit system pkgs; };
+
socket-activated = import ./socket-activated.nix { inherit system pkgs; };
+
virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix {
+
inherit system pkgs;
+
};
+
}
+60
nixos/tests/incus/preseed.nix
···
···
+
import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+
{
+
name = "incus-preseed";
+
+
meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+
nodes.machine = { lib, ... }: {
+
virtualisation = {
+
incus.enable = true;
+
+
incus.preseed = {
+
networks = [
+
{
+
name = "nixostestbr0";
+
type = "bridge";
+
config = {
+
"ipv4.address" = "10.0.100.1/24";
+
"ipv4.nat" = "true";
+
};
+
}
+
];
+
profiles = [
+
{
+
name = "nixostest_default";
+
devices = {
+
eth0 = {
+
name = "eth0";
+
network = "nixostestbr0";
+
type = "nic";
+
};
+
root = {
+
path = "/";
+
pool = "default";
+
size = "35GiB";
+
type = "disk";
+
};
+
};
+
}
+
];
+
storage_pools = [
+
{
+
name = "nixostest_pool";
+
driver = "dir";
+
}
+
];
+
};
+
};
+
};
+
+
testScript = ''
+
machine.wait_for_unit("incus.service")
+
machine.wait_for_unit("incus-preseed.service")
+
+
with subtest("Verify preseed resources created"):
+
machine.succeed("incus profile show nixostest_default")
+
machine.succeed("incus network info nixostestbr0")
+
machine.succeed("incus storage show nixostest_pool")
+
'';
+
})
+26
nixos/tests/incus/socket-activated.nix
···
···
+
import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+
{
+
name = "incus-socket-activated";
+
+
meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+
nodes.machine = { lib, ... }: {
+
virtualisation = {
+
incus.enable = true;
+
incus.socketActivation = true;
+
};
+
};
+
+
testScript = ''
+
machine.wait_for_unit("incus.socket")
+
+
# ensure service is not running by default
+
machine.fail("systemctl is-active incus.service")
+
machine.fail("systemctl is-active incus-preseed.service")
+
+
# access the socket and ensure the service starts
+
machine.succeed("incus list")
+
machine.wait_for_unit("incus.service")
+
'';
+
})
+55
nixos/tests/incus/virtual-machine.nix
···
···
+
import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+
let
+
releases = import ../../release.nix {
+
configuration = {
+
# Building documentation makes the test unnecessarily take a longer time:
+
documentation.enable = lib.mkForce false;
+
+
# Our tests require `grep` & friends:
+
environment.systemPackages = with pkgs; [busybox];
+
};
+
};
+
+
vm-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
+
vm-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
+
+
instance-name = "instance1";
+
in
+
{
+
name = "incus-virtual-machine";
+
+
meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+
nodes.machine = {...}: {
+
virtualisation = {
+
# Ensure test VM has enough resources for creating and managing guests
+
cores = 2;
+
memorySize = 1024;
+
diskSize = 4096;
+
+
incus.enable = true;
+
};
+
};
+
+
testScript = ''
+
def instance_is_up(_) -> bool:
+
status, _ = machine.execute("incus exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+
return status == 0
+
+
machine.wait_for_unit("incus.service")
+
+
machine.succeed("incus admin init --minimal")
+
+
with subtest("virtual-machine image can be imported"):
+
machine.succeed("incus image import ${vm-image-metadata}/*/*.tar.xz ${vm-image-disk}/nixos.qcow2 --alias nixos")
+
+
with subtest("virtual-machine can be launched and become available"):
+
machine.succeed("incus launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
+
with machine.nested("Waiting for instance to start and be usable"):
+
retry(instance_is_up)
+
+
with subtest("lxd-agent is started"):
+
machine.succeed("incus exec ${instance-name} systemctl is-active lxd-agent")
+
'';
+
})
+1 -3
pkgs/by-name/co/cowsql/package.nix
···
outputs = [ "dev" "out" ];
passthru = {
-
tests = {
-
inherit incus;
-
};
updateScript = gitUpdater {
rev-prefix = "v";
···
outputs = [ "dev" "out" ];
passthru = {
+
inherit (incus) tests;
updateScript = gitUpdater {
rev-prefix = "v";
+3
pkgs/by-name/in/incus-unwrapped/package.nix
···
, udev
, installShellFiles
, nix-update-script
}:
buildGoModule rec {
···
'';
passthru = {
updateScript = nix-update-script {
extraArgs = [
"-vr" "incus-\(.*\)"
···
, udev
, installShellFiles
, nix-update-script
+
, nixosTests
}:
buildGoModule rec {
···
'';
passthru = {
+
tests.incus = nixosTests.incus;
+
updateScript = nix-update-script {
extraArgs = [
"-vr" "incus-\(.*\)"
+2
pkgs/by-name/in/incus/package.nix
···
, rsync
, spice-gtk
, squashfsTools
, virtiofsd
, xz
}:
···
qemu-utils
rsync
squashfsTools
virtiofsd
xz
···
, rsync
, spice-gtk
, squashfsTools
+
, util-linux
, virtiofsd
, xz
}:
···
qemu-utils
rsync
squashfsTools
+
util-linux
virtiofsd
xz
+6 -3
pkgs/by-name/ra/raft-cowsql/package.nix
···
outputs = [ "dev" "out" ];
-
passthru.tests = {
-
inherit incus;
-
updateScript = gitUpdater { };
};
meta = with lib; {
···
outputs = [ "dev" "out" ];
+
passthru = {
+
inherit (incus) tests;
+
+
updateScript = gitUpdater {
+
rev-prefix = "v";
+
};
};
meta = with lib; {