nixos/nebula: don't run as root; support relays

Changed files
+66 -26
nixos
doc
manual
from_md
release-notes
release-notes
modules
services
networking
tests
+22
nixos/doc/manual/from_md/release-notes/rl-2305.section.xml
···
</listitem>
<listitem>
<para>
+
Nebula now runs as a system user and group created for each
+
nebula network, using the <literal>CAP_NET_ADMIN</literal>
+
ambient capability on launch rather than starting as root.
+
Ensure that any files each Nebula instance needs to access are
+
owned by the correct user and group, by default
+
<literal>nebula-${networkName}</literal>.
+
</para>
+
</listitem>
+
<listitem>
+
<para>
In <literal>mastodon</literal> it is now necessary to specify
location of file with <literal>PostgreSQL</literal> database
password. In
···
<link xlink:href="options.html#opt-services.garage.package">services.garage.package</link>
or upgrade accordingly
<link xlink:href="options.html#opt-system.stateVersion">system.stateVersion</link>.
+
</para>
+
</listitem>
+
<listitem>
+
<para>
+
Nebula now supports the
+
<literal>services.nebula.networks.&lt;name&gt;.isRelay</literal>
+
and
+
<literal>services.nebula.networks.&lt;name&gt;.relays</literal>
+
configuration options for setting up or allowing traffic
+
relaying. See the
+
<link xlink:href="https://www.defined.net/blog/announcing-relay-support-in-nebula/">announcement</link>
+
for more details about relays.
</para>
</listitem>
<listitem>
+4
nixos/doc/manual/release-notes/rl-2305.section.md
···
- The [services.wordpress.sites.&lt;name&gt;.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.&lt;name&gt;.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name.
+
- Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`.
+
- In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`.
- The `--target-host` and `--build-host` options of `nixos-rebuild` no longer treat the `localhost` value specially – to build on/deploy to local machine, omit the relevant flag.
···
- Increased the minimum length of a response that will be gzipped.
- [Garage](https://garagehq.deuxfleurs.fr/) version is based on [system.stateVersion](options.html#opt-system.stateVersion), existing installations will keep using version 0.7. New installations will use version 0.8. In order to upgrade a Garage cluster, please follow [upstream instructions](https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/) and force [services.garage.package](options.html#opt-services.garage.package) or upgrade accordingly [system.stateVersion](options.html#opt-system.stateVersion).
+
+
- Nebula now supports the `services.nebula.networks.<name>.isRelay` and `services.nebula.networks.<name>.relays` configuration options for setting up or allowing traffic relaying. See the [announcement](https://www.defined.net/blog/announcing-relay-support-in-nebula/) for more details about relays.
- `hip` has been separated into `hip`, `hip-common` and `hipcc`.
+31 -18
nixos/modules/services/networking/nebula.nix
···
description = lib.mdDoc "Whether this node is a lighthouse.";
};
+
isRelay = mkOption {
+
type = types.bool;
+
default = false;
+
description = lib.mdDoc "Whether this node is a relay.";
+
};
+
lighthouses = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc ''
List of IPs of lighthouse hosts this node should report to and query from. This should be empty on lighthouse
nodes. The IPs should be the lighthouse's Nebula IPs, not their external IPs.
+
'';
+
example = [ "192.168.100.1" ];
+
};
+
+
relays = mkOption {
+
type = types.listOf types.str;
+
default = [];
+
description = lib.mdDoc ''
+
List of IPs of relays that this node should allow traffic from.
'';
example = [ "192.168.100.1" ];
};
···
am_lighthouse = netCfg.isLighthouse;
hosts = netCfg.lighthouses;
};
+
relay = {
+
am_relay = netCfg.isRelay;
+
relays = netCfg.relays;
+
};
listen = {
host = netCfg.listen.host;
port = netCfg.listen.port;
···
configFile = format.generate "nebula-config-${netName}.yml" settings;
in
{
-
# Create systemd service for Nebula.
+
# Create the systemd service for Nebula.
"nebula@${netName}" = {
description = "Nebula VPN service for ${netName}";
wants = [ "basic.target" ];
after = [ "basic.target" "network.target" ];
before = [ "sshd.service" ];
wantedBy = [ "multi-user.target" ];
-
serviceConfig = mkMerge [
-
{
-
Type = "simple";
-
Restart = "always";
-
ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
-
}
-
# The service needs to launch as root to access the tun device, if it's enabled.
-
(mkIf netCfg.tun.disable {
-
User = networkId;
-
Group = networkId;
-
})
-
];
+
serviceConfig = {
+
Type = "simple";
+
Restart = "always";
+
ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
+
CapabilityBoundingSet = "CAP_NET_ADMIN";
+
AmbientCapabilities = "CAP_NET_ADMIN";
+
User = networkId;
+
Group = networkId;
+
};
unitConfig.StartLimitIntervalSec = 0; # ensure Restart=always is always honoured (networks can go down for arbitrarily long)
};
}) enabledNetworks);
···
# Create the service users and groups.
users.users = mkMerge (mapAttrsToList (netName: netCfg:
-
mkIf netCfg.tun.disable {
+
{
${nameToId netName} = {
group = nameToId netName;
description = "Nebula service user for network ${netName}";
···
};
}) enabledNetworks);
-
users.groups = mkMerge (mapAttrsToList (netName: netCfg:
-
mkIf netCfg.tun.disable {
-
${nameToId netName} = {};
-
}) enabledNetworks);
+
users.groups = mkMerge (mapAttrsToList (netName: netCfg: { ${nameToId netName} = {}; }) enabledNetworks);
};
}
+9 -8
nixos/tests/nebula.nix
···
testScript = let
setUpPrivateKey = name: ''
-
${name}.succeed(
-
"mkdir -p /root/.ssh",
-
"chown 700 /root/.ssh",
-
"cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
-
"chown 600 /root/.ssh/id_snakeoil",
-
)
+
${name}.start()
+
${name}.succeed(
+
"mkdir -p /root/.ssh",
+
"chown 700 /root/.ssh",
+
"cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
+
"chown 600 /root/.ssh/id_snakeoil",
+
)
'';
# From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
···
${name}.succeed(
"scp ${sshOpts} 192.168.1.1:/tmp/${name}.crt /etc/nebula/${name}.crt",
"scp ${sshOpts} 192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
+
'(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true'
)
'';
in ''
-
start_all()
-
# Create the certificate and sign the lighthouse's keys.
${setUpPrivateKey "lighthouse"}
lighthouse.succeed(
"mkdir -p /etc/nebula",
'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
+
'chown -R nebula-smoke:nebula-smoke /etc/nebula'
)
# Reboot the lighthouse and verify that the nebula service comes up on boot.