lxd: remove packages and modules

Changed files
+31 -1269
maintainers
nixos
pkgs
by-name
lx
lxd-lts
lxd-ui
lxd-unwrapped-lts
top-level
+1 -1
maintainers/team-list.nix
···
megheaiulian
mkg20001
];
-
scope = "All things linuxcontainers. LXC, Incus, LXD and related packages.";
+
scope = "All things linuxcontainers. Incus, LXC, and related packages.";
shortName = "lxc";
};
+2
nixos/doc/manual/release-notes/rl-2511.section.md
···
- The `services.polipo` module has been removed as `polipo` is unmaintained and archived upstream.
+
- `virtualisation.lxd` has been removed due to lack of Nixpkgs maintenance. Users can migrate to `virtualisation.incus`, a fork of LXD, as a replacement. See [Incus migration documentation](https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/) for migration information.
+
- The non-LTS Forgejo package (`forgejo`) has been updated to 12.0.0. This release contains breaking changes, see the [release blog post](https://forgejo.org/2025-07-release-v12-0/)
for all the details and how to ensure smooth upgrades.
-1
nixos/modules/misc/ids.nix
···
#shout = 206; #unused
#gateone = 207; #removed 2025-08-21
namecoin = 208;
-
#lxd = 210; # unused
#kibana = 211;
xtreemfs = 212;
calibre-server = 213;
-2
nixos/modules/module-list.nix
···
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix
./virtualisation/lxcfs.nix
-
./virtualisation/lxd-agent.nix
-
./virtualisation/lxd.nix
./virtualisation/multipass.nix
./virtualisation/nixos-containers.nix
./virtualisation/oci-containers.nix
+1 -1
nixos/modules/virtualisation/lxc-image-metadata.nix
···
options = {
virtualisation.lxc = {
templates = lib.mkOption {
-
description = "Templates for LXD";
+
description = "Templates for LXC images";
type = lib.types.attrsOf (lib.types.submodule templateSubmodule);
default = { };
example = lib.literalExpression ''
-110
nixos/modules/virtualisation/lxd-agent.nix
···
-
{
-
config,
-
lib,
-
pkgs,
-
...
-
}:
-
-
let
-
cfg = config.virtualisation.lxd.agent;
-
-
# the lxd agent is provided by the lxd daemon through a virtiofs or 9p mount
-
# this is a port of the distrobuilder lxd-agent generator
-
# https://github.com/lxc/distrobuilder/blob/f77300bf7d7d5707b08eaf8a434d647d1ba81b5d/generators/lxd-agent.go#L18-L55
-
preStartScript = ''
-
PREFIX="/run/lxd_agent"
-
-
mount_virtiofs() {
-
mount -t virtiofs config "$PREFIX/.mnt" >/dev/null 2>&1
-
}
-
-
mount_9p() {
-
modprobe 9pnet_virtio >/dev/null 2>&1 || true
-
mount -t 9p config "$PREFIX/.mnt" -o access=0,trans=virtio,size=1048576 >/dev/null 2>&1
-
}
-
-
fail() {
-
umount -l "$PREFIX" >/dev/null 2>&1 || true
-
rmdir "$PREFIX" >/dev/null 2>&1 || true
-
echo "$1"
-
exit 1
-
}
-
-
# Setup the mount target.
-
umount -l "$PREFIX" >/dev/null 2>&1 || true
-
mkdir -p "$PREFIX"
-
mount -t tmpfs tmpfs "$PREFIX" -o mode=0700,size=50M
-
mkdir -p "$PREFIX/.mnt"
-
-
# Try virtiofs first.
-
mount_virtiofs || mount_9p || fail "Couldn't mount virtiofs or 9p, failing."
-
-
# Copy the data.
-
cp -Ra "$PREFIX/.mnt/"* "$PREFIX"
-
-
# Unmount the temporary mount.
-
umount "$PREFIX/.mnt"
-
rmdir "$PREFIX/.mnt"
-
-
# Fix up permissions.
-
chown -R root:root "$PREFIX"
-
'';
-
in
-
{
-
options = {
-
virtualisation.lxd.agent.enable = lib.mkEnableOption "LXD agent";
-
};
-
-
config = lib.mkIf cfg.enable {
-
# https://github.com/lxc/distrobuilder/blob/f77300bf7d7d5707b08eaf8a434d647d1ba81b5d/generators/lxd-agent.go#L108-L125
-
systemd.services.lxd-agent = {
-
enable = true;
-
wantedBy = [ "multi-user.target" ];
-
before = [
-
"shutdown.target"
-
]
-
++ lib.optionals config.services.cloud-init.enable [
-
"cloud-init.target"
-
"cloud-init.service"
-
"cloud-init-local.service"
-
];
-
conflicts = [ "shutdown.target" ];
-
path = [
-
pkgs.kmod
-
pkgs.util-linux
-
-
# allow `incus exec` to find system binaries
-
"/run/current-system/sw"
-
];
-
-
preStart = preStartScript;
-
-
# avoid killing nixos-rebuild switch when executed through lxc exec
-
restartIfChanged = false;
-
stopIfChanged = false;
-
-
unitConfig = {
-
Description = "LXD - agent";
-
Documentation = "https://documentation.ubuntu.com/lxd/en/latest";
-
ConditionPathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
-
DefaultDependencies = "no";
-
StartLimitInterval = "60";
-
StartLimitBurst = "10";
-
};
-
-
serviceConfig = {
-
Type = "notify";
-
WorkingDirectory = "-/run/lxd_agent";
-
ExecStart = "/run/lxd_agent/lxd-agent";
-
Restart = "on-failure";
-
RestartSec = "5s";
-
};
-
};
-
-
systemd.paths.lxd-agent = {
-
enable = true;
-
wantedBy = [ "multi-user.target" ];
-
pathConfig.PathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
-
};
-
};
-
}
-283
nixos/modules/virtualisation/lxd.nix
···
-
# Systemd services for lxd.
-
-
{
-
config,
-
lib,
-
pkgs,
-
...
-
}:
-
-
let
-
cfg = config.virtualisation.lxd;
-
preseedFormat = pkgs.formats.yaml { };
-
in
-
{
-
imports = [
-
(lib.mkRemovedOptionModule [
-
"virtualisation"
-
"lxd"
-
"zfsPackage"
-
] "Override zfs in an overlay instead to override it globally")
-
];
-
-
options = {
-
virtualisation.lxd = {
-
enable = lib.mkOption {
-
type = lib.types.bool;
-
default = false;
-
description = ''
-
This option enables lxd, a daemon that manages
-
containers. Users in the "lxd" group can interact with
-
the daemon (e.g. to start or stop containers) using the
-
{command}`lxc` command line tool, among others.
-
-
Most of the time, you'll also want to start lxcfs, so
-
that containers can "see" the limits:
-
```
-
virtualisation.lxc.lxcfs.enable = true;
-
```
-
'';
-
};
-
-
package = lib.mkPackageOption pkgs "lxd-lts" { };
-
-
lxcPackage = lib.mkOption {
-
type = lib.types.package;
-
default = config.virtualisation.lxc.package;
-
defaultText = lib.literalExpression "config.virtualisation.lxc.package";
-
description = "The lxc package to use.";
-
};
-
-
zfsSupport = lib.mkOption {
-
type = lib.types.bool;
-
default = config.boot.zfs.enabled;
-
defaultText = lib.literalExpression "config.boot.zfs.enabled";
-
description = ''
-
Enables lxd to use zfs as a storage for containers.
-
-
This option is enabled by default if a zfs pool is configured
-
with nixos.
-
'';
-
};
-
-
recommendedSysctlSettings = lib.mkOption {
-
type = lib.types.bool;
-
default = false;
-
description = ''
-
Enables various settings to avoid common pitfalls when
-
running containers requiring many file operations.
-
Fixes errors like "Too many open files" or
-
"neighbour: ndisc_cache: neighbor table overflow!".
-
See <https://lxd.readthedocs.io/en/latest/production-setup/>
-
for details.
-
'';
-
};
-
-
preseed = lib.mkOption {
-
type = lib.types.nullOr (
-
lib.types.submodule {
-
freeformType = preseedFormat.type;
-
}
-
);
-
-
default = null;
-
-
description = ''
-
Configuration for LXD preseed, see
-
<https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#initialize-preseed>
-
for supported values.
-
-
Changes to this will be re-applied to LXD which will overwrite existing entities or create missing ones,
-
but entities will *not* be removed by preseed.
-
'';
-
-
example = lib.literalExpression ''
-
{
-
networks = [
-
{
-
name = "lxdbr0";
-
type = "bridge";
-
config = {
-
"ipv4.address" = "10.0.100.1/24";
-
"ipv4.nat" = "true";
-
};
-
}
-
];
-
profiles = [
-
{
-
name = "default";
-
devices = {
-
eth0 = {
-
name = "eth0";
-
network = "lxdbr0";
-
type = "nic";
-
};
-
root = {
-
path = "/";
-
pool = "default";
-
size = "35GiB";
-
type = "disk";
-
};
-
};
-
}
-
];
-
storage_pools = [
-
{
-
name = "default";
-
driver = "dir";
-
config = {
-
source = "/var/lib/lxd/storage-pools/default";
-
};
-
}
-
];
-
}
-
'';
-
};
-
-
startTimeout = lib.mkOption {
-
type = lib.types.int;
-
default = 600;
-
apply = toString;
-
description = ''
-
Time to wait (in seconds) for LXD to become ready to process requests.
-
If LXD does not reply within the configured time, lxd.service will be
-
considered failed and systemd will attempt to restart it.
-
'';
-
};
-
-
ui = {
-
enable = lib.mkEnableOption "(experimental) LXD UI";
-
-
package = lib.mkPackageOption pkgs [ "lxd-ui" ] { };
-
};
-
};
-
};
-
-
###### implementation
-
config = lib.mkIf cfg.enable {
-
environment.systemPackages = [ cfg.package ];
-
-
# Note: the following options are also declared in virtualisation.lxc, but
-
# the latter can't be simply enabled to reuse the formers, because it
-
# does a bunch of unrelated things.
-
systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
-
-
security.apparmor = {
-
packages = [ cfg.lxcPackage ];
-
policies = {
-
"bin.lxc-start".profile = ''
-
include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
-
'';
-
"lxc-containers".profile = ''
-
include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
-
'';
-
};
-
};
-
-
systemd.sockets.lxd = {
-
description = "LXD UNIX socket";
-
wantedBy = [ "sockets.target" ];
-
-
socketConfig = {
-
ListenStream = "/var/lib/lxd/unix.socket";
-
SocketMode = "0660";
-
SocketGroup = "lxd";
-
Service = "lxd.service";
-
};
-
};
-
-
systemd.services.lxd = {
-
description = "LXD Container Management Daemon";
-
-
wantedBy = [ "multi-user.target" ];
-
after = [
-
"network-online.target"
-
(lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
-
];
-
requires = [
-
"network-online.target"
-
"lxd.socket"
-
(lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
-
];
-
documentation = [ "man:lxd(1)" ];
-
-
path = [ pkgs.util-linux ] ++ lib.optional cfg.zfsSupport config.boot.zfs.package;
-
-
environment = lib.mkIf (cfg.ui.enable) {
-
"LXD_UI" = cfg.ui.package;
-
};
-
-
serviceConfig = {
-
ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
-
ExecStartPost = "${cfg.package}/bin/lxd waitready --timeout=${cfg.startTimeout}";
-
ExecStop = "${cfg.package}/bin/lxd shutdown";
-
-
KillMode = "process"; # when stopping, leave the containers alone
-
LimitMEMLOCK = "infinity";
-
LimitNOFILE = "1048576";
-
LimitNPROC = "infinity";
-
TasksMax = "infinity";
-
Delegate = true; # LXD needs to manage cgroups in its subtree
-
-
# By default, `lxd` loads configuration files from hard-coded
-
# `/usr/share/lxc/config` - since this is a no-go for us, we have to
-
# explicitly tell it where the actual configuration files are
-
Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable) "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
-
};
-
-
unitConfig.ConditionPathExists = "!/var/lib/incus/.migrated-from-lxd";
-
};
-
-
systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
-
description = "LXD initialization with preseed file";
-
wantedBy = [ "multi-user.target" ];
-
requires = [ "lxd.service" ];
-
after = [ "lxd.service" ];
-
-
script = ''
-
${pkgs.coreutils}/bin/cat ${preseedFormat.generate "lxd-preseed.yaml" cfg.preseed} | ${cfg.package}/bin/lxd init --preseed
-
'';
-
-
serviceConfig = {
-
Type = "oneshot";
-
};
-
};
-
-
users.groups.lxd = { };
-
-
users.users.root = {
-
subUidRanges = [
-
{
-
startUid = 1000000;
-
count = 65536;
-
}
-
];
-
subGidRanges = [
-
{
-
startGid = 1000000;
-
count = 65536;
-
}
-
];
-
};
-
-
boot.kernel.sysctl = lib.mkIf cfg.recommendedSysctlSettings {
-
"fs.inotify.max_queued_events" = 1048576;
-
"fs.inotify.max_user_instances" = 1048576;
-
"fs.inotify.max_user_watches" = 1048576;
-
"vm.max_map_count" = 262144; # TODO: Default vm.max_map_count has been increased system-wide
-
"kernel.dmesg_restrict" = 1;
-
"net.ipv4.neigh.default.gc_thresh3" = 8192;
-
"net.ipv6.neigh.default.gc_thresh3" = 8192;
-
"kernel.keys.maxkeys" = 2000;
-
};
-
-
boot.kernelModules = [
-
"veth"
-
"xt_comment"
-
"xt_CHECKSUM"
-
"xt_MASQUERADE"
-
"vhost_vsock"
-
]
-
++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
-
};
-
}
-1
nixos/tests/all-tests.nix
···
luks = runTest ./luks.nix;
lvm2 = handleTest ./lvm2 { };
lxc = handleTest ./lxc { };
-
lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
lxd-image-server = runTest ./lxd-image-server.nix;
lxqt = runTest ./lxqt.nix;
ly = runTest ./ly.nix;
-4
nixos/tests/incus/default.nix
···
storageLvm = true;
};
-
lxd-to-incus = import ./lxd-to-incus.nix {
-
inherit lts pkgs system;
-
};
-
openvswitch = incusTest {
inherit lts pkgs system;
networkOvs = true;
-119
nixos/tests/incus/lxd-to-incus.nix
···
-
import ../make-test-python.nix (
-
-
{
-
pkgs,
-
lib,
-
lts ? true,
-
...
-
}:
-
-
let
-
releases = import ../../release.nix { configuration.documentation.enable = lib.mkForce false; };
-
-
container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
-
container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
-
in
-
{
-
name = "lxd-to-incus";
-
-
meta = {
-
maintainers = lib.teams.lxc.members;
-
};
-
-
nodes.machine =
-
{ ... }:
-
{
-
virtualisation = {
-
diskSize = 6144;
-
cores = 2;
-
memorySize = 2048;
-
-
lxd.enable = true;
-
lxd.preseed = {
-
networks = [
-
{
-
name = "nixostestbr0";
-
type = "bridge";
-
config = {
-
"ipv4.address" = "10.0.100.1/24";
-
"ipv4.nat" = "true";
-
};
-
}
-
];
-
profiles = [
-
{
-
name = "default";
-
devices = {
-
eth0 = {
-
name = "eth0";
-
network = "nixostestbr0";
-
type = "nic";
-
};
-
root = {
-
path = "/";
-
pool = "nixostest_pool";
-
size = "35GiB";
-
type = "disk";
-
};
-
};
-
}
-
{
-
name = "nixos_notdefault";
-
devices = { };
-
}
-
];
-
storage_pools = [
-
{
-
name = "nixostest_pool";
-
driver = "dir";
-
}
-
];
-
};
-
-
incus = {
-
enable = true;
-
package = if lts then pkgs.incus-lts else pkgs.incus;
-
};
-
};
-
networking.nftables.enable = true;
-
};
-
-
testScript = ''
-
def lxd_wait_for_preseed(_) -> bool:
-
_, output = machine.systemctl("is-active lxd-preseed.service")
-
return ("inactive" in output)
-
-
def lxd_instance_is_up(_) -> bool:
-
status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
-
return status == 0
-
-
def incus_instance_is_up(_) -> bool:
-
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
-
return status == 0
-
-
with machine.nested("initialize lxd and resources"):
-
machine.wait_for_unit("sockets.target")
-
machine.wait_for_unit("lxd.service")
-
retry(lxd_wait_for_preseed)
-
-
machine.succeed("lxc image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
-
machine.succeed("lxc launch nixos container")
-
retry(lxd_instance_is_up)
-
-
machine.wait_for_unit("incus.service")
-
-
with machine.nested("run migration"):
-
machine.succeed("${pkgs.incus}/bin/lxd-to-incus --yes")
-
-
with machine.nested("verify resources migrated to incus"):
-
machine.succeed("incus config show container")
-
retry(incus_instance_is_up)
-
machine.succeed("incus exec container -- true")
-
machine.succeed("incus profile show default | grep nixostestbr0")
-
machine.succeed("incus profile show default | grep nixostest_pool")
-
machine.succeed("incus profile show nixos_notdefault")
-
machine.succeed("incus storage show nixostest_pool")
-
machine.succeed("incus network show nixostestbr0")
-
'';
-
}
-
)
+2 -2
nixos/tests/lxc/default.nix
···
};
};
-
lxc-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
-
lxc-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+
lxc-image-metadata = releases.incusContainerMeta.${pkgs.stdenv.hostPlatform.system};
+
lxc-image-rootfs = releases.incusContainerImage.${pkgs.stdenv.hostPlatform.system};
in
{
-133
nixos/tests/lxd/container.nix
···
-
import ../make-test-python.nix (
-
{ pkgs, lib, ... }:
-
-
let
-
releases = import ../../release.nix {
-
configuration = {
-
# Building documentation makes the test unnecessarily take a longer time:
-
documentation.enable = lib.mkForce false;
-
-
# Our tests require `grep` & friends:
-
environment.systemPackages = with pkgs; [ busybox ];
-
};
-
};
-
-
lxd-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
-
lxd-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
-
lxd-image-rootfs-squashfs = releases.lxdContainerImageSquashfs.${pkgs.stdenv.hostPlatform.system};
-
-
in
-
{
-
name = "lxd-container";
-
-
nodes.machine =
-
{ lib, ... }:
-
{
-
virtualisation = {
-
diskSize = 6144;
-
-
# Since we're testing `limits.cpu`, we've gotta have a known number of
-
# cores to lean on
-
cores = 2;
-
-
# Ditto, for `limits.memory`
-
memorySize = 512;
-
-
lxc.lxcfs.enable = true;
-
lxd.enable = true;
-
};
-
};
-
-
testScript = ''
-
def instance_is_up(_) -> bool:
-
status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
-
return status == 0
-
-
machine.wait_for_unit("sockets.target")
-
machine.wait_for_unit("lxd.service")
-
machine.wait_for_file("/var/lib/lxd/unix.socket")
-
-
# Wait for lxd to settle
-
machine.succeed("lxd waitready")
-
-
# no preseed should mean no service
-
machine.fail("systemctl status lxd-preseed.service")
-
-
machine.succeed("lxd init --minimal")
-
-
machine.succeed(
-
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
-
)
-
-
with subtest("Container can be managed"):
-
machine.succeed("lxc launch nixos container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
-
machine.succeed("lxc delete -f container")
-
-
with subtest("Squashfs image is functional"):
-
machine.succeed(
-
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs-squashfs}/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs --alias nixos-squashfs"
-
)
-
machine.succeed("lxc launch nixos-squashfs container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
-
machine.succeed("lxc delete -f container")
-
-
with subtest("Container is mounted with lxcfs inside"):
-
machine.succeed("lxc launch nixos container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
-
## ---------- ##
-
## limits.cpu ##
-
-
machine.succeed("lxc config set container limits.cpu 1")
-
machine.succeed("lxc restart container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
-
assert (
-
"1"
-
== machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
-
)
-
-
machine.succeed("lxc config set container limits.cpu 2")
-
machine.succeed("lxc restart container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
-
assert (
-
"2"
-
== machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
-
)
-
-
## ------------- ##
-
## limits.memory ##
-
-
machine.succeed("lxc config set container limits.memory 64MB")
-
machine.succeed("lxc restart container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
-
assert (
-
"MemTotal: 62500 kB"
-
== machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
-
)
-
-
machine.succeed("lxc config set container limits.memory 128MB")
-
machine.succeed("lxc restart container")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
-
assert (
-
"MemTotal: 125000 kB"
-
== machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
-
)
-
-
machine.succeed("lxc delete -f container")
-
'';
-
}
-
)
-13
nixos/tests/lxd/default.nix
···
-
{
-
system ? builtins.currentSystem,
-
config ? { },
-
pkgs ? import ../../.. { inherit system config; },
-
handleTestOn,
-
}:
-
{
-
container = import ./container.nix { inherit system pkgs; };
-
nftables = import ./nftables.nix { inherit system pkgs; };
-
preseed = import ./preseed.nix { inherit system pkgs; };
-
ui = import ./ui.nix { inherit system pkgs; };
-
virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
-
}
-51
nixos/tests/lxd/nftables.nix
···
-
# This test makes sure that lxd stops implicitly depending on iptables when
-
# user enabled nftables.
-
#
-
# It has been extracted from `lxd.nix` for clarity, and because switching from
-
# iptables to nftables requires a full reboot, which is a bit hard inside NixOS
-
# tests.
-
-
import ../make-test-python.nix (
-
{ pkgs, lib, ... }:
-
{
-
name = "lxd-nftables";
-
-
nodes.machine =
-
{ lib, ... }:
-
{
-
virtualisation = {
-
lxd.enable = true;
-
};
-
-
networking = {
-
firewall.enable = false;
-
nftables.enable = true;
-
nftables.tables."filter".family = "inet";
-
nftables.tables."filter".content = ''
-
chain incoming {
-
type filter hook input priority 0;
-
policy accept;
-
}
-
-
chain forward {
-
type filter hook forward priority 0;
-
policy accept;
-
}
-
-
chain output {
-
type filter hook output priority 0;
-
policy accept;
-
}
-
'';
-
};
-
};
-
-
testScript = ''
-
machine.wait_for_unit("network.target")
-
-
with subtest("When nftables are enabled, lxd doesn't depend on iptables anymore"):
-
machine.succeed("lsmod | grep nf_tables")
-
machine.fail("lsmod | grep ip_tables")
-
'';
-
}
-
)
-71
nixos/tests/lxd/preseed.nix
···
-
import ../make-test-python.nix (
-
{ pkgs, lib, ... }:
-
-
{
-
name = "lxd-preseed";
-
-
nodes.machine =
-
{ lib, ... }:
-
{
-
virtualisation = {
-
diskSize = 4096;
-
-
lxc.lxcfs.enable = true;
-
lxd.enable = true;
-
-
lxd.preseed = {
-
networks = [
-
{
-
name = "nixostestbr0";
-
type = "bridge";
-
config = {
-
"ipv4.address" = "10.0.100.1/24";
-
"ipv4.nat" = "true";
-
};
-
}
-
];
-
profiles = [
-
{
-
name = "nixostest_default";
-
devices = {
-
eth0 = {
-
name = "eth0";
-
network = "nixostestbr0";
-
type = "nic";
-
};
-
root = {
-
path = "/";
-
pool = "default";
-
size = "35GiB";
-
type = "disk";
-
};
-
};
-
}
-
];
-
storage_pools = [
-
{
-
name = "nixostest_pool";
-
driver = "dir";
-
}
-
];
-
};
-
};
-
};
-
-
testScript = ''
-
def wait_for_preseed(_) -> bool:
-
_, output = machine.systemctl("is-active lxd-preseed.service")
-
return ("inactive" in output)
-
-
machine.wait_for_unit("sockets.target")
-
machine.wait_for_unit("lxd.service")
-
with machine.nested("Waiting for preseed to complete"):
-
retry(wait_for_preseed)
-
-
with subtest("Verify preseed resources created"):
-
machine.succeed("lxc profile show nixostest_default")
-
machine.succeed("lxc network info nixostestbr0")
-
machine.succeed("lxc storage show nixostest_pool")
-
'';
-
}
-
)
-74
nixos/tests/lxd/ui.nix
···
-
import ../make-test-python.nix (
-
{ pkgs, ... }:
-
{
-
name = "lxd-ui";
-
-
nodes.machine =
-
{ lib, ... }:
-
{
-
virtualisation = {
-
lxd.enable = true;
-
lxd.ui.enable = true;
-
};
-
-
environment.systemPackages =
-
let
-
seleniumScript =
-
pkgs.writers.writePython3Bin "selenium-script"
-
{
-
libraries = with pkgs.python3Packages; [ selenium ];
-
}
-
''
-
from selenium import webdriver
-
from selenium.webdriver.common.by import By
-
from selenium.webdriver.firefox.options import Options
-
from selenium.webdriver.support.ui import WebDriverWait
-
-
options = Options()
-
options.add_argument("--headless")
-
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
-
-
driver = webdriver.Firefox(options=options, service=service)
-
driver.implicitly_wait(10)
-
driver.get("https://localhost:8443/ui")
-
-
wait = WebDriverWait(driver, 60)
-
-
assert len(driver.find_elements(By.CLASS_NAME, "l-application")) > 0
-
assert len(driver.find_elements(By.CLASS_NAME, "l-navigation__drawer")) > 0
-
-
driver.close()
-
'';
-
in
-
with pkgs;
-
[
-
curl
-
firefox-unwrapped
-
geckodriver
-
seleniumScript
-
];
-
};
-
-
testScript = ''
-
machine.wait_for_unit("sockets.target")
-
machine.wait_for_unit("lxd.service")
-
machine.wait_for_file("/var/lib/lxd/unix.socket")
-
-
# Wait for lxd to settle
-
machine.succeed("lxd waitready")
-
-
# Configure LXC listen address
-
machine.succeed("lxc config set core.https_address :8443")
-
machine.succeed("systemctl restart lxd")
-
-
# Check that the LXD_UI environment variable is populated in the systemd unit
-
machine.succeed("cat /etc/systemd/system/lxd.service | grep 'LXD_UI'")
-
-
# Ensure the endpoint returns an HTML page with 'LXD UI' in the title
-
machine.succeed("curl -kLs https://localhost:8443/ui | grep '<title>LXD UI</title>'")
-
-
# Ensure the application is actually rendered by the Javascript
-
machine.succeed("PYTHONUNBUFFERED=1 selenium-script")
-
'';
-
}
-
)
-65
nixos/tests/lxd/virtual-machine.nix
···
-
import ../make-test-python.nix (
-
{ pkgs, lib, ... }:
-
-
let
-
releases = import ../../release.nix {
-
configuration = {
-
# Building documentation makes the test unnecessarily take a longer time:
-
documentation.enable = lib.mkForce false;
-
-
# Our tests require `grep` & friends:
-
environment.systemPackages = with pkgs; [ busybox ];
-
};
-
};
-
-
lxd-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
-
lxd-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
-
-
instance-name = "instance1";
-
in
-
{
-
name = "lxd-virtual-machine";
-
-
nodes.machine =
-
{ lib, ... }:
-
{
-
virtualisation = {
-
diskSize = 4096;
-
-
cores = 2;
-
-
# Ensure we have enough memory for the nested virtual machine
-
memorySize = 1024;
-
-
lxc.lxcfs.enable = true;
-
lxd.enable = true;
-
};
-
};
-
-
testScript = ''
-
def instance_is_up(_) -> bool:
-
status, _ = machine.execute("lxc exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
-
return status == 0
-
-
machine.wait_for_unit("sockets.target")
-
machine.wait_for_unit("lxd.service")
-
machine.wait_for_file("/var/lib/lxd/unix.socket")
-
-
# Wait for lxd to settle
-
machine.succeed("lxd waitready")
-
-
machine.succeed("lxd init --minimal")
-
-
with subtest("virtual-machine image can be imported"):
-
machine.succeed("lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-disk}/nixos.qcow2 --alias nixos")
-
-
with subtest("virtual-machine can be launched and become available"):
-
machine.succeed("lxc launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
-
with machine.nested("Waiting for instance to start and be usable"):
-
retry(instance_is_up)
-
-
with subtest("lxd-agent is started"):
-
machine.succeed("lxc exec ${instance-name} systemctl is-active lxd-agent")
-
'';
-
}
-
)
-148
pkgs/by-name/lx/lxd-lts/package.nix
···
-
{
-
lib,
-
lxd-unwrapped-lts,
-
linkFarm,
-
makeWrapper,
-
stdenv,
-
symlinkJoin,
-
writeShellScriptBin,
-
acl,
-
apparmor-parser,
-
apparmor-profiles,
-
attr,
-
bash,
-
btrfs-progs,
-
cdrkit,
-
criu,
-
dnsmasq,
-
e2fsprogs,
-
getent,
-
gnutar,
-
gptfdisk,
-
gzip,
-
iproute2,
-
iptables,
-
kmod,
-
lvm2,
-
minio,
-
nftables,
-
OVMF,
-
qemu_kvm,
-
qemu-utils,
-
rsync,
-
spice-gtk,
-
squashfsTools,
-
thin-provisioning-tools,
-
util-linux,
-
virtiofsd,
-
xz,
-
}:
-
let
-
binPath = lib.makeBinPath [
-
acl
-
attr
-
bash
-
btrfs-progs
-
cdrkit
-
criu
-
dnsmasq
-
e2fsprogs
-
getent
-
gnutar
-
gptfdisk
-
gzip
-
iproute2
-
iptables
-
kmod
-
lvm2
-
minio
-
nftables
-
qemu_kvm
-
qemu-utils
-
rsync
-
squashfsTools
-
thin-provisioning-tools
-
util-linux
-
virtiofsd
-
xz
-
-
(writeShellScriptBin "apparmor_parser" ''
-
exec '${apparmor-parser}/bin/apparmor_parser' -I '${apparmor-profiles}/etc/apparmor.d' "$@"
-
'')
-
];
-
-
clientBinPath = [ spice-gtk ];
-
-
ovmf-2mb = OVMF.override {
-
secureBoot = true;
-
fdSize2MB = true;
-
};
-
-
ovmf-4mb = OVMF.override {
-
secureBoot = true;
-
fdSize4MB = true;
-
};
-
-
ovmf-prefix = if stdenv.hostPlatform.isAarch64 then "AAVMF" else "OVMF";
-
-
# mimic ovmf from https://github.com/canonical/lxd-pkg-snap/blob/3abebe1dfeb20f9b7729556960c7e9fe6ad5e17c/snapcraft.yaml#L378
-
# also found in /snap/lxd/current/share/qemu/ on a snap install
-
ovmf = linkFarm "lxd-ovmf" [
-
{
-
name = "OVMF_CODE.2MB.fd";
-
path = "${ovmf-2mb.fd}/FV/${ovmf-prefix}_CODE.fd";
-
}
-
{
-
name = "OVMF_CODE.4MB.fd";
-
path = "${ovmf-4mb.fd}/FV/${ovmf-prefix}_CODE.fd";
-
}
-
{
-
name = "OVMF_CODE.fd";
-
path = "${ovmf-2mb.fd}/FV/${ovmf-prefix}_CODE.fd";
-
}
-
-
{
-
name = "OVMF_VARS.2MB.fd";
-
path = "${ovmf-2mb.fd}/FV/${ovmf-prefix}_VARS.fd";
-
}
-
{
-
name = "OVMF_VARS.2MB.ms.fd";
-
path = "${ovmf-2mb.fd}/FV/${ovmf-prefix}_VARS.fd";
-
}
-
{
-
name = "OVMF_VARS.4MB.fd";
-
path = "${ovmf-4mb.fd}/FV/${ovmf-prefix}_VARS.fd";
-
}
-
{
-
name = "OVMF_VARS.4MB.ms.fd";
-
path = "${ovmf-4mb.fd}/FV/${ovmf-prefix}_VARS.fd";
-
}
-
{
-
name = "OVMF_VARS.fd";
-
path = "${ovmf-2mb.fd}/FV/${ovmf-prefix}_VARS.fd";
-
}
-
{
-
name = "OVMF_VARS.ms.fd";
-
path = "${ovmf-2mb.fd}/FV/${ovmf-prefix}_VARS.fd";
-
}
-
];
-
in
-
symlinkJoin {
-
name = "lxd-${lxd-unwrapped-lts.version}";
-
-
paths = [ lxd-unwrapped-lts ];
-
-
nativeBuildInputs = [ makeWrapper ];
-
-
postBuild = ''
-
wrapProgram $out/bin/lxd --prefix PATH : ${lib.escapeShellArg binPath}:${qemu_kvm}/libexec:$out/bin --set LXD_OVMF_PATH ${ovmf}
-
-
wrapProgram $out/bin/lxc --prefix PATH : ${lib.makeBinPath clientBinPath}
-
'';
-
-
passthru = {
-
inherit (lxd-unwrapped-lts) tests;
-
};
-
-
inherit (lxd-unwrapped-lts) meta pname version;
-
}
-74
pkgs/by-name/lx/lxd-ui/package.nix
···
-
{
-
lib,
-
stdenv,
-
fetchFromGitHub,
-
fetchYarnDeps,
-
nodejs,
-
fixup-yarn-lock,
-
yarn,
-
nixosTests,
-
nix-update-script,
-
}:
-
-
stdenv.mkDerivation rec {
-
pname = "lxd-ui";
-
version = "0.16";
-
-
src = fetchFromGitHub {
-
owner = "canonical";
-
repo = "lxd-ui";
-
tag = version;
-
hash = "sha256-JVozXgAu0rTjO9aNzKMzzoGYL09lRzNI5qcjDfRaMnE=";
-
};
-
-
offlineCache = fetchYarnDeps {
-
yarnLock = "${src}/yarn.lock";
-
hash = "sha256-Z/C0QgqxBWob6KIWuU8PACkTKuAhTrJzod9WNXTO8Zs=";
-
};
-
-
nativeBuildInputs = [
-
nodejs
-
fixup-yarn-lock
-
yarn
-
];
-
-
configurePhase = ''
-
runHook preConfigure
-
-
export HOME=$(mktemp -d)
-
yarn config --offline set yarn-offline-mirror "$offlineCache"
-
fixup-yarn-lock yarn.lock
-
yarn --offline --frozen-lockfile --ignore-platform --ignore-scripts --no-progress --non-interactive install
-
patchShebangs node_modules
-
-
runHook postConfigure
-
'';
-
-
buildPhase = ''
-
runHook preBuild
-
-
yarn --offline build
-
-
runHook postBuild
-
'';
-
-
installPhase = ''
-
runHook preInstall
-
-
cp -r build/ui/ $out
-
-
runHook postInstall
-
'';
-
-
passthru.tests.default = nixosTests.lxd.ui;
-
passthru.updateScript = nix-update-script { };
-
-
meta = {
-
description = "Web user interface for LXD";
-
homepage = "https://github.com/canonical/lxd-ui";
-
changelog = "https://github.com/canonical/lxd-ui/releases/tag/${version}";
-
license = lib.licenses.gpl3;
-
maintainers = [ ];
-
platforms = lib.platforms.linux;
-
};
-
}
-114
pkgs/by-name/lx/lxd-unwrapped-lts/package.nix
···
-
{
-
lib,
-
hwdata,
-
pkg-config,
-
lxc,
-
buildGoModule,
-
fetchFromGitHub,
-
acl,
-
libcap,
-
dqlite,
-
raft-canonical,
-
sqlite,
-
udev,
-
installShellFiles,
-
nixosTests,
-
nix-update-script,
-
}:
-
-
buildGoModule rec {
-
pname = "lxd-unwrapped-lts";
-
# major/minor are used in updateScript to pin to LTS
-
version = "5.21.2";
-
-
src = fetchFromGitHub {
-
owner = "canonical";
-
repo = "lxd";
-
tag = "lxd-${version}";
-
hash = "sha256-3C5pLvO7oABWFhFiWtBr5ohFFWm20Gg36WBfVVJgKdc=";
-
};
-
-
vendorHash = "sha256-W7+Z2o5cw3u0DbTZA+a3pRXt9zRggUUjFTUTtQ7B22A=";
-
-
postPatch = ''
-
substituteInPlace shared/usbid/load.go \
-
--replace "/usr/share/misc/usb.ids" "${hwdata}/share/hwdata/usb.ids"
-
'';
-
-
excludedPackages = [
-
"test"
-
"lxd/db/generate"
-
"lxd-agent"
-
"lxd-migrate"
-
];
-
-
nativeBuildInputs = [
-
installShellFiles
-
pkg-config
-
];
-
buildInputs = [
-
lxc
-
acl
-
libcap
-
dqlite.dev
-
raft-canonical.dev
-
sqlite
-
udev.dev
-
];
-
-
ldflags = [
-
"-s"
-
"-w"
-
];
-
tags = [ "libsqlite3" ];
-
-
preBuild = ''
-
# required for go-dqlite. See: https://github.com/canonical/lxd/pull/8939
-
export CGO_LDFLAGS_ALLOW="(-Wl,-wrap,pthread_create)|(-Wl,-z,now)"
-
'';
-
-
# build static binaries: https://github.com/canonical/lxd/blob/6fd175c45e65cd475d198db69d6528e489733e19/Makefile#L43-L51
-
postBuild = ''
-
make lxd-agent lxd-migrate
-
'';
-
-
checkFlags =
-
let
-
skippedTests = [
-
"TestValidateConfig"
-
"TestConvertNetworkConfig"
-
"TestConvertStorageConfig"
-
"TestSnapshotCommon"
-
"TestContainerTestSuite"
-
];
-
in
-
[ "-skip=^${builtins.concatStringsSep "$|^" skippedTests}$" ];
-
-
postInstall = ''
-
installShellCompletion --bash --name lxd ./scripts/bash/lxd-client
-
'';
-
-
passthru = {
-
tests.lxd = nixosTests.lxd;
-
tests.lxd-to-incus = nixosTests.incus.lxd-to-incus;
-
-
updateScript = nix-update-script {
-
extraArgs = [
-
"--version-regex"
-
"lxd-(5.21.*)"
-
];
-
};
-
};
-
-
meta = {
-
description = "Daemon based on liblxc offering a REST API to manage containers";
-
homepage = "https://ubuntu.com/lxd";
-
changelog = "https://github.com/canonical/lxd/releases/tag/lxd-${version}";
-
license = with lib.licenses; [
-
asl20
-
agpl3Plus
-
];
-
maintainers = with lib.maintainers; [ ];
-
platforms = lib.platforms.linux;
-
};
-
}
+25 -2
pkgs/top-level/aliases.nix
···
luci-go = throw "luci-go has been removed since it was unused and failing to build for 5 months"; # Added 2025-08-27
lumail = throw "'lumail' has been removed since its upstream is unavailable"; # Added 2025-05-07
lv_img_conv = throw "'lv_img_conv' has been removed from nixpkgs as it is broken"; # Added 2024-06-18
-
lxd = lib.warnOnInstantiate "lxd has been renamed to lxd-lts" lxd-lts; # Added 2024-04-01
+
lxd = throw ''
+
LXD has been removed from NixOS due to lack of Nixpkgs maintenance.
+
Consider migrating or switching to Incus, or remove from your configuration.
+
https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/
+
''; # Added 2025-09-05
+
lxd-lts = throw ''
+
LXD has been removed from NixOS due to lack of Nixpkgs maintenance.
+
Consider migrating or switching to Incus, or remove from your configuration.
+
https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/
+
''; # Added 2025-09-05
+
lxd-ui = throw ''
+
LXD has been removed from NixOS due to lack of Nixpkgs maintenance.
+
Consider migrating or switching to Incus, or remove from your configuration.
+
https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/
+
''; # Added 2025-09-05
+
lxd-unwrapped = throw ''
+
LXD has been removed from NixOS due to lack of Nixpkgs maintenance.
+
Consider migrating or switching to Incus, or remove from your configuration.
+
https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/
+
''; # Added 2025-09-05
+
lxd-unwrapped-lts = throw ''
+
LXD has been removed from NixOS due to lack of Nixpkgs maintenance.
+
Consider migrating or switching to Incus, or remove from your configuration.
+
https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/
+
''; # Added 2025-09-05
lxde = {
gtk2-x11 = throw "'lxde.gtk2-x11' has been removed. Use 'gtk2-x11' directly.";
···
lxtask = throw "'lxtask' has been moved to top-level. Use 'lxtask' directly"; # added 2025-08-31
};
-
lxd-unwrapped = lib.warnOnInstantiate "lxd-unwrapped has been renamed to lxd-unwrapped-lts" lxd-unwrapped-lts; # Added 2024-04-01
lxdvdrip = throw "'lxdvdrip' has been removed due to lack of upstream maintenance."; # Added 2025-06-09
lzma = throw "'lzma' has been renamed to/replaced by 'xz'"; # Converted to throw 2024-10-17
lzwolf = throw "'lzwolf' has been removed because it's no longer maintained upstream. Consider using 'ecwolf'"; # Added 2025-03-02