nixos/firezone: init modules

oddlama 6c888f49 2fcd77b3

Changed files
+3009
nixos
+2
nixos/doc/manual/release-notes/rl-2505.section.md
···
- [Schroot](https://codeberg.org/shelter/reschroot), a lightweight virtualisation tool. Securely enter a chroot and run a command or login shell. Available as [programs.schroot](#opt-programs.schroot.enable).
+
- [Firezone](https://firezone.dev), an enterprise-ready zero-trust access platform built on WireGuard. This includes the server stack as [services.firezone.server.enable](#opt-services.firezone.server.enable), a TURN/STUN relay service as [services.firezone.relay.enable](#opt-services.firezone.relay.enable), a gateway service as [services.firezone.gateway.enable](#opt-services.firezone.gateway.enable), a headless client as [services.firezone.headless-client.enable](#opt-services.firezone.headless-client.enable) and a GUI client as [services.firezone.gui-client.enable](#opt-services.firezone.gui-client.enable).
+
- [crab-hole](https://github.com/LuckyTurtleDev/crab-hole), a cross platform Pi-hole clone written in Rust using hickory-dns/trust-dns. Available as [services.crab-hole](#opt-services.crab-hole.enable).
- [zwave-js-ui](https://zwave-js.github.io/zwave-js-ui/), a full featured Z-Wave Control Panel and MQTT Gateway. Available as [services.zwave-js-ui](#opt-services.zwave-js-ui.enable).
+5
nixos/modules/module-list.nix
···
./services/networking/firewall.nix
./services/networking/firewall-iptables.nix
./services/networking/firewall-nftables.nix
+
./services/networking/firezone/gateway.nix
+
./services/networking/firezone/gui-client.nix
+
./services/networking/firezone/headless-client.nix
+
./services/networking/firezone/relay.nix
+
./services/networking/firezone/server.nix
./services/networking/flannel.nix
./services/networking/freenet.nix
./services/networking/freeradius.nix
+159
nixos/modules/services/networking/firezone/gateway.nix
···
+
{
+
lib,
+
pkgs,
+
config,
+
...
+
}:
+
let
+
inherit (lib)
+
boolToString
+
getExe
+
mkEnableOption
+
mkIf
+
mkOption
+
mkPackageOption
+
types
+
;
+
+
cfg = config.services.firezone.gateway;
+
in
+
{
+
options = {
+
services.firezone.gateway = {
+
enable = mkOption {
+
default = false;
+
example = true;
+
description = ''
+
Whether to enable the firezone gateway.
+
+
You have to manually masquerade and forward traffic from the
+
tun-firezone interface to your resource! Refer to the
+
[upstream setup script](https://github.com/firezone/firezone/blob/8c7c0a9e8e33ae790aeb75fdb5a15432c2870b79/scripts/gateway-systemd-install.sh#L154-L168)
+
for a list of iptable commands.
+
+
See the firezone nixos test in this repository for an nftables based example.
+
'';
+
type = lib.types.bool;
+
};
+
package = mkPackageOption pkgs "firezone-gateway" { };
+
+
name = mkOption {
+
type = types.str;
+
description = "The name of this gateway as shown in firezone";
+
};
+
+
apiUrl = mkOption {
+
type = types.strMatching "^wss://.+/$";
+
example = "wss://firezone.example.com/api/";
+
description = ''
+
The URL of your firezone server's API. This should be the same
+
as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`,
+
but with `wss://` instead of `https://`.
+
'';
+
};
+
+
tokenFile = mkOption {
+
type = types.path;
+
example = "/run/secrets/firezone-gateway-token";
+
description = ''
+
A file containing the firezone gateway token. Do not use a nix-store path here
+
as it will make the token publicly readable!
+
+
This file will be passed via systemd credentials, it should only be accessible
+
by the root user.
+
'';
+
};
+
+
logLevel = mkOption {
+
type = types.str;
+
default = "info";
+
description = ''
+
The log level for the firezone application. See
+
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
+
for the format.
+
'';
+
};
+
+
enableTelemetry = mkEnableOption "telemetry";
+
};
+
};
+
+
config = mkIf cfg.enable {
+
systemd.services.firezone-gateway = {
+
description = "Gateway service for the Firezone zero-trust access platform";
+
after = [ "network.target" ];
+
wantedBy = [ "multi-user.target" ];
+
+
path = [ pkgs.util-linux ];
+
script = ''
+
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
+
if [[ -z "''${FIREZONE_ID:-}" ]]; then
+
if [[ ! -e gateway_id ]]; then
+
uuidgen -r > gateway_id
+
fi
+
export FIREZONE_ID=$(< gateway_id)
+
fi
+
+
export FIREZONE_TOKEN=$(< "$CREDENTIALS_DIRECTORY/firezone-token")
+
exec ${getExe cfg.package}
+
'';
+
+
environment = {
+
FIREZONE_API_URL = cfg.apiUrl;
+
FIREZONE_NAME = cfg.name;
+
FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry);
+
RUST_LOG = cfg.logLevel;
+
};
+
+
serviceConfig = {
+
Type = "exec";
+
DynamicUser = true;
+
User = "firezone-gateway";
+
LoadCredential = [ "firezone-token:${cfg.tokenFile}" ];
+
+
DeviceAllow = "/dev/net/tun";
+
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+
+
StateDirectory = "firezone-gateway";
+
WorkingDirectory = "/var/lib/firezone-gateway";
+
+
Restart = "on-failure";
+
RestartSec = 10;
+
+
LockPersonality = true;
+
MemoryDenyWriteExecute = true;
+
NoNewPrivileges = true;
+
PrivateMounts = true;
+
PrivateTmp = true;
+
PrivateUsers = false;
+
ProcSubset = "pid";
+
ProtectClock = true;
+
ProtectControlGroups = true;
+
ProtectHome = true;
+
ProtectHostname = true;
+
ProtectKernelLogs = true;
+
ProtectKernelModules = true;
+
ProtectKernelTunables = true;
+
ProtectProc = "invisible";
+
ProtectSystem = "strict";
+
RestrictAddressFamilies = [
+
"AF_INET"
+
"AF_INET6"
+
"AF_NETLINK"
+
];
+
RestrictNamespaces = true;
+
RestrictRealtime = true;
+
RestrictSUIDSGID = true;
+
SystemCallArchitectures = "native";
+
SystemCallFilter = "@system-service";
+
UMask = "077";
+
};
+
};
+
};
+
+
meta.maintainers = with lib.maintainers; [
+
oddlama
+
patrickdag
+
];
+
}
+138
nixos/modules/services/networking/firezone/gui-client.nix
···
+
{
+
lib,
+
pkgs,
+
config,
+
...
+
}:
+
let
+
inherit (lib)
+
boolToString
+
getExe'
+
mkEnableOption
+
mkIf
+
mkOption
+
mkPackageOption
+
types
+
;
+
+
cfg = config.services.firezone.gui-client;
+
in
+
{
+
options = {
+
services.firezone.gui-client = {
+
enable = mkEnableOption "the firezone gui client";
+
package = mkPackageOption pkgs "firezone-gui-client" { };
+
+
allowedUsers = mkOption {
+
type = types.listOf types.str;
+
default = [ ];
+
description = ''
+
All listed users will become part of the `firezone-client` group so
+
they can control the IPC service. This is a convenience option.
+
'';
+
};
+
+
name = mkOption {
+
type = types.str;
+
description = "The name of this client as shown in firezone";
+
};
+
+
logLevel = mkOption {
+
type = types.str;
+
default = "info";
+
description = ''
+
The log level for the firezone application. See
+
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
+
for the format.
+
'';
+
};
+
};
+
};
+
+
config = mkIf cfg.enable {
+
users.groups.firezone-client.members = cfg.allowedUsers;
+
+
# Required for deep-link mimetype registration
+
environment.systemPackages = [ cfg.package ];
+
+
# Required for the token store in the gui application
+
services.gnome.gnome-keyring.enable = true;
+
+
systemd.services.firezone-ipc-service = {
+
description = "GUI IPC service for the Firezone zero-trust access platform";
+
after = [ "network.target" ];
+
wantedBy = [ "multi-user.target" ];
+
+
path = [ pkgs.util-linux ];
+
script = ''
+
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
+
if [[ -z "''${FIREZONE_ID:-}" ]]; then
+
if [[ ! -e client_id ]]; then
+
uuidgen -r > client_id
+
fi
+
export FIREZONE_ID=$(< client_id)
+
fi
+
+
exec ${getExe' cfg.package "firezone-client-ipc"} run
+
'';
+
+
environment = {
+
FIREZONE_NAME = cfg.name;
+
LOG_DIR = "%L/dev.firezone.client";
+
RUST_LOG = cfg.logLevel;
+
};
+
+
serviceConfig = {
+
Type = "notify";
+
+
DeviceAllow = "/dev/net/tun";
+
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+
+
# This block contains hardcoded values in the client, we cannot change these :(
+
Group = "firezone-client";
+
RuntimeDirectory = "dev.firezone.client";
+
StateDirectory = "dev.firezone.client";
+
WorkingDirectory = "/var/lib/dev.firezone.client";
+
LogsDirectory = "dev.firezone.client";
+
+
Restart = "on-failure";
+
RestartSec = 10;
+
+
LockPersonality = true;
+
MemoryDenyWriteExecute = true;
+
NoNewPrivileges = true;
+
PrivateMounts = true;
+
PrivateTmp = true;
+
PrivateUsers = false;
+
ProcSubset = "pid";
+
ProtectClock = true;
+
ProtectControlGroups = true;
+
ProtectHome = true;
+
ProtectHostname = true;
+
ProtectKernelLogs = true;
+
ProtectKernelModules = true;
+
ProtectKernelTunables = true;
+
ProtectProc = "invisible";
+
ProtectSystem = "strict";
+
RestrictAddressFamilies = [
+
"AF_INET"
+
"AF_INET6"
+
"AF_NETLINK"
+
"AF_UNIX"
+
];
+
RestrictNamespaces = true;
+
RestrictRealtime = true;
+
RestrictSUIDSGID = true;
+
SystemCallArchitectures = "native";
+
SystemCallFilter = "@system-service";
+
UMask = "077";
+
};
+
};
+
};
+
+
meta.maintainers = with lib.maintainers; [
+
oddlama
+
patrickdag
+
];
+
}
+148
nixos/modules/services/networking/firezone/headless-client.nix
···
+
{
+
lib,
+
pkgs,
+
config,
+
...
+
}:
+
let
+
inherit (lib)
+
boolToString
+
getExe
+
mkEnableOption
+
mkIf
+
mkOption
+
mkPackageOption
+
types
+
;
+
+
cfg = config.services.firezone.headless-client;
+
in
+
{
+
options = {
+
services.firezone.headless-client = {
+
enable = mkEnableOption "the firezone headless client";
+
package = mkPackageOption pkgs "firezone-headless-client" { };
+
+
name = mkOption {
+
type = types.str;
+
description = "The name of this client as shown in firezone";
+
};
+
+
apiUrl = mkOption {
+
type = types.strMatching "^wss://.+/$";
+
example = "wss://firezone.example.com/api/";
+
description = ''
+
The URL of your firezone server's API. This should be the same
+
as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`,
+
but with `wss://` instead of `https://`.
+
'';
+
};
+
+
tokenFile = mkOption {
+
type = types.path;
+
example = "/run/secrets/firezone-client-token";
+
description = ''
+
A file containing the firezone client token. Do not use a nix-store path here
+
as it will make the token publicly readable!
+
+
This file will be passed via systemd credentials, it should only be accessible
+
by the root user.
+
'';
+
};
+
+
logLevel = mkOption {
+
type = types.str;
+
default = "info";
+
description = ''
+
The log level for the firezone application. See
+
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
+
for the format.
+
'';
+
};
+
+
enableTelemetry = mkEnableOption "telemetry";
+
};
+
};
+
+
config = mkIf cfg.enable {
+
systemd.services.firezone-headless-client = {
+
description = "headless client service for the Firezone zero-trust access platform";
+
after = [ "network.target" ];
+
wantedBy = [ "multi-user.target" ];
+
+
path = [ pkgs.util-linux ];
+
script = ''
+
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
+
if [[ -z "''${FIREZONE_ID:-}" ]]; then
+
if [[ ! -e client_id ]]; then
+
uuidgen -r > client_id
+
fi
+
export FIREZONE_ID=$(< client_id)
+
fi
+
+
exec ${getExe cfg.package}
+
'';
+
+
environment = {
+
FIREZONE_API_URL = cfg.apiUrl;
+
FIREZONE_NAME = cfg.name;
+
FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry);
+
FIREZONE_TOKEN_PATH = "%d/firezone-token";
+
LOG_DIR = "%L/dev.firezone.client";
+
RUST_LOG = cfg.logLevel;
+
};
+
+
serviceConfig = {
+
Type = "exec";
+
LoadCredential = [ "firezone-token:${cfg.tokenFile}" ];
+
+
DeviceAllow = "/dev/net/tun";
+
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+
+
# Hardcoded values in the client :(
+
RuntimeDirectory = "dev.firezone.client";
+
StateDirectory = "dev.firezone.client";
+
WorkingDirectory = "/var/lib/dev.firezone.client";
+
LogsDirectory = "dev.firezone.client";
+
+
Restart = "on-failure";
+
RestartSec = 10;
+
+
LockPersonality = true;
+
MemoryDenyWriteExecute = true;
+
NoNewPrivileges = true;
+
PrivateMounts = true;
+
PrivateTmp = true;
+
PrivateUsers = false;
+
ProcSubset = "pid";
+
ProtectClock = true;
+
ProtectControlGroups = true;
+
ProtectHome = true;
+
ProtectHostname = true;
+
ProtectKernelLogs = true;
+
ProtectKernelModules = true;
+
ProtectKernelTunables = true;
+
ProtectProc = "invisible";
+
ProtectSystem = "strict";
+
RestrictAddressFamilies = [
+
"AF_INET"
+
"AF_INET6"
+
"AF_NETLINK"
+
"AF_UNIX"
+
];
+
RestrictNamespaces = true;
+
RestrictRealtime = true;
+
RestrictSUIDSGID = true;
+
SystemCallArchitectures = "native";
+
SystemCallFilter = "@system-service";
+
UMask = "077";
+
};
+
};
+
};
+
+
meta.maintainers = with lib.maintainers; [
+
oddlama
+
patrickdag
+
];
+
}
+709
nixos/modules/services/networking/firezone/provision.exs
···
+
defmodule Provision do
+
alias Domain.{Repo, Accounts, Auth, Actors, Resources, Tokens, Gateways, Relays, Policies}
+
require Logger
+
+
# UUID Mapping handling
+
defmodule UuidMapping do
+
@mapping_file "provision-uuids.json"
+
+
# Loads the mapping from file
+
def load do
+
mappings = case File.read(@mapping_file) do
+
{:ok, content} ->
+
case Jason.decode(content) do
+
{:ok, mapping} -> mapping
+
_ -> %{"accounts" => %{}}
+
end
+
+
_ -> %{"accounts" => %{}}
+
end
+
Process.put(:uuid_mappings, mappings)
+
mappings
+
end
+
+
# Saves the current mapping (defaulting to the one in the process dictionary)
+
def save(mapping \\ Process.get(:uuid_mappings)) do
+
File.write!(@mapping_file, Jason.encode!(mapping))
+
end
+
+
# Retrieves the account-level mapping from a given mapping (or from Process)
+
def get_account(mapping \\ Process.get(:uuid_mappings), account_slug) do
+
get_in(mapping, ["accounts", account_slug]) || %{}
+
end
+
+
# Retrieves the entity mapping for a specific account and type
+
def get_entities(mapping \\ Process.get(:uuid_mappings), account_slug, type) do
+
get_in(mapping, ["accounts", account_slug, type]) || %{}
+
end
+
+
# Retrieves an entity mapping for a specific account, type and external_id
+
def get_entity(mapping \\ Process.get(:uuid_mappings), account_slug, type, external_id) do
+
get_in(mapping, ["accounts", account_slug, type, external_id])
+
end
+
+
# Updates (or creates) the account UUID mapping and stores it in the process dictionary.
+
def update_account(account_slug, uuid) do
+
mapping = Process.get(:uuid_mappings) || load()
+
mapping = ensure_account_exists(mapping, account_slug)
+
mapping = put_in(mapping, ["accounts", account_slug, "id"], uuid)
+
Process.put(:uuid_mappings, mapping)
+
mapping
+
end
+
+
# Ensures that the given account exists in the mapping.
+
def ensure_account_exists(mapping, account_slug) do
+
if not Map.has_key?(mapping["accounts"], account_slug) do
+
put_in(mapping, ["accounts", account_slug], %{})
+
else
+
mapping
+
end
+
end
+
+
# Updates (or creates) the mapping for entities of a given type for the account.
+
def update_entities(account_slug, type, new_entries) do
+
mapping = Process.get(:uuid_mappings) || load()
+
mapping = ensure_account_exists(mapping, account_slug)
+
current = get_entities(mapping, account_slug, type)
+
mapping = put_in(mapping, ["accounts", account_slug, type], Map.merge(current, new_entries))
+
Process.put(:uuid_mappings, mapping)
+
mapping
+
end
+
+
# Removes an entire account from the mapping.
+
def remove_account(account_slug) do
+
mapping = Process.get(:uuid_mappings) || load()
+
mapping = update_in(mapping, ["accounts"], fn accounts ->
+
Map.delete(accounts, account_slug)
+
end)
+
Process.put(:uuid_mappings, mapping)
+
mapping
+
end
+
+
# Removes a specific entity mapping for the account.
+
def remove_entity(account_slug, type, key) do
+
mapping = Process.get(:uuid_mappings) || load()
+
mapping = update_in(mapping, ["accounts", account_slug, type], fn entities ->
+
Map.delete(entities || %{}, key)
+
end)
+
Process.put(:uuid_mappings, mapping)
+
mapping
+
end
+
end
+
+
defp resolve_references(value) when is_map(value) do
+
Enum.into(value, %{}, fn {k, v} -> {k, resolve_references(v)} end)
+
end
+
+
defp resolve_references(value) when is_list(value) do
+
Enum.map(value, &resolve_references/1)
+
end
+
+
defp resolve_references(value) when is_binary(value) do
+
Regex.replace(~r/\{env:([^}]+)\}/, value, fn _, var ->
+
System.get_env(var) || raise "Environment variable #{var} not set"
+
end)
+
end
+
+
defp resolve_references(value), do: value
+
+
defp atomize_keys(map) when is_map(map) do
+
Enum.into(map, %{}, fn {k, v} ->
+
{
+
if(is_binary(k), do: String.to_atom(k), else: k),
+
if(is_map(v), do: atomize_keys(v), else: v)
+
}
+
end)
+
end
+
+
defp cleanup_account(uuid) do
+
case Accounts.fetch_account_by_id_or_slug(uuid) do
+
{:ok, value} when value.deleted_at == nil ->
+
Logger.info("Deleting removed account #{value.slug}")
+
value |> Ecto.Changeset.change(%{ deleted_at: DateTime.utc_now() }) |> Repo.update!()
+
_ -> :ok
+
end
+
end
+
+
defp cleanup_actor(uuid, subject) do
+
case Actors.fetch_actor_by_id(uuid, subject) do
+
{:ok, value} ->
+
Logger.info("Deleting removed actor #{value.name}")
+
{:ok, _} = Actors.delete_actor(value, subject)
+
_ -> :ok
+
end
+
end
+
+
defp cleanup_provider(uuid, subject) do
+
case Auth.fetch_provider_by_id(uuid, subject) do
+
{:ok, value} ->
+
Logger.info("Deleting removed provider #{value.name}")
+
{:ok, _} = Auth.delete_provider(value, subject)
+
_ -> :ok
+
end
+
end
+
+
defp cleanup_gateway_group(uuid, subject) do
+
case Gateways.fetch_group_by_id(uuid, subject) do
+
{:ok, value} ->
+
Logger.info("Deleting removed gateway group #{value.name}")
+
{:ok, _} = Gateways.delete_group(value, subject)
+
_ -> :ok
+
end
+
end
+
+
defp cleanup_relay_group(uuid, subject) do
+
case Relays.fetch_group_by_id(uuid, subject) do
+
{:ok, value} ->
+
Logger.info("Deleting removed relay group #{value.name}")
+
{:ok, _} = Relays.delete_group(value, subject)
+
_ -> :ok
+
end
+
end
+
+
defp cleanup_actor_group(uuid, subject) do
+
case Actors.fetch_group_by_id(uuid, subject) do
+
{:ok, value} ->
+
Logger.info("Deleting removed actor group #{value.name}")
+
{:ok, _} = Actors.delete_group(value, subject)
+
_ -> :ok
+
end
+
end
+
+
# Fetch resource by uuid, but follow the chain of replacements if any
+
defp fetch_resource(uuid, subject) do
+
case Resources.fetch_resource_by_id(uuid, subject) do
+
{:ok, resource} when resource.replaced_by_resource_id != nil -> fetch_resource(resource.replaced_by_resource_id, subject)
+
v -> v
+
end
+
end
+
+
defp cleanup_resource(uuid, subject) do
+
case fetch_resource(uuid, subject) do
+
{:ok, value} when value.deleted_at == nil ->
+
Logger.info("Deleting removed resource #{value.name}")
+
{:ok, _} = Resources.delete_resource(value, subject)
+
_ -> :ok
+
end
+
end
+
+
# Fetch policy by uuid, but follow the chain of replacements if any
+
defp fetch_policy(uuid, subject) do
+
case Policies.fetch_policy_by_id(uuid, subject) do
+
{:ok, policy} when policy.replaced_by_policy_id != nil -> fetch_policy(policy.replaced_by_policy_id, subject)
+
v -> v
+
end
+
end
+
+
defp cleanup_policy(uuid, subject) do
+
case fetch_policy(uuid, subject) do
+
{:ok, value} when value.deleted_at == nil ->
+
Logger.info("Deleting removed policy #{value.description}")
+
{:ok, _} = Policies.delete_policy(value, subject)
+
_ -> :ok
+
end
+
end
+
+
defp cleanup_entity_type(account_slug, entity_type, cleanup_fn, temp_admin_subject) do
+
# Get mapping for this entity type
+
existing_entities = UuidMapping.get_entities(account_slug, entity_type)
+
# Get current entities from account data
+
current_entities = Process.get(:current_entities)
+
# Determine which ones to remove
+
removed_entity_ids = Map.keys(existing_entities) -- (current_entities[entity_type] || [])
+
+
# Process each entity to remove
+
Enum.each(removed_entity_ids, fn entity_id ->
+
case existing_entities[entity_id] do
+
nil -> :ok
+
uuid ->
+
cleanup_fn.(uuid, temp_admin_subject)
+
UuidMapping.remove_entity(account_slug, entity_type, entity_id)
+
end
+
end)
+
end
+
+
defp collect_current_entities(account_data) do
+
%{
+
"actors" => Map.keys(account_data["actors"] || %{}),
+
"providers" => Map.keys(account_data["auth"] || %{}),
+
"gateway_groups" => Map.keys(account_data["gatewayGroups"] || %{}),
+
"relay_groups" => Map.keys(account_data["relayGroups"] || %{}),
+
"actor_groups" => Map.keys(account_data["groups"] || %{}) ++ ["everyone"],
+
"resources" => Map.keys(account_data["resources"] || %{}),
+
"policies" => Map.keys(account_data["policies"] || %{})
+
}
+
end
+
+
defp nil_if_deleted_or_not_found(value) do
+
case value do
+
nil -> nil
+
{:error, :not_found} -> nil
+
{:ok, value} when value.deleted_at != nil -> nil
+
v -> v
+
end
+
end
+
+
defp create_temp_admin(account, email_provider) do
+
temp_admin_actor_email = "firezone-provision@localhost.local"
+
temp_admin_actor_context = %Auth.Context{
+
type: :browser,
+
user_agent: "Unspecified/0.0",
+
remote_ip: {127, 0, 0, 1},
+
remote_ip_location_region: "N/A",
+
remote_ip_location_city: "N/A",
+
remote_ip_location_lat: 0.0,
+
remote_ip_location_lon: 0.0
+
}
+
+
{:ok, temp_admin_actor} =
+
Actors.create_actor(account, %{
+
type: :account_admin_user,
+
name: "Provisioning"
+
})
+
+
{:ok, temp_admin_actor_email_identity} =
+
Auth.create_identity(temp_admin_actor, email_provider, %{
+
provider_identifier: temp_admin_actor_email,
+
provider_identifier_confirmation: temp_admin_actor_email
+
})
+
+
{:ok, temp_admin_actor_token} =
+
Auth.create_token(temp_admin_actor_email_identity, temp_admin_actor_context, "temporarynonce", DateTime.utc_now() |> DateTime.add(1, :hour))
+
+
{:ok, temp_admin_subject} =
+
Auth.build_subject(temp_admin_actor_token, temp_admin_actor_context)
+
+
{temp_admin_subject, temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token}
+
end
+
+
defp cleanup_temp_admin(temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token, subject) do
+
Logger.info("Cleaning up temporary admin actor")
+
{:ok, _} = Tokens.delete_token(temp_admin_actor_token, subject)
+
{:ok, _} = Auth.delete_identity(temp_admin_actor_email_identity, subject)
+
{:ok, _} = Actors.delete_actor(temp_admin_actor, subject)
+
end
+
+
def provision() do
+
Logger.info("Starting provisioning")
+
+
# Load desired state
+
json_file = "provision-state.json"
+
{:ok, raw_json} = File.read(json_file)
+
{:ok, %{"accounts" => accounts}} = Jason.decode(raw_json)
+
accounts = resolve_references(accounts)
+
+
# Load existing UUID mappings into the process dictionary.
+
UuidMapping.load()
+
+
# Clean up removed accounts first
+
current_account_slugs = Map.keys(accounts)
+
existing_accounts = Map.keys(Process.get(:uuid_mappings)["accounts"])
+
removed_accounts = existing_accounts -- current_account_slugs
+
+
Enum.each(removed_accounts, fn slug ->
+
if uuid = get_in(Process.get(:uuid_mappings), ["accounts", slug, "id"]) do
+
cleanup_account(uuid)
+
# Remove the account from the UUID mapping.
+
UuidMapping.remove_account(slug)
+
end
+
end)
+
+
multi = Enum.reduce(accounts, Ecto.Multi.new(), fn {slug, account_data}, multi ->
+
account_attrs = atomize_keys(%{
+
name: account_data["name"],
+
slug: slug,
+
features: Map.get(account_data, "features", %{}),
+
metadata: Map.get(account_data, "metadata", %{}),
+
limits: Map.get(account_data, "limits", %{})
+
})
+
+
multi = multi
+
|> Ecto.Multi.run({:account, slug}, fn repo, _changes ->
+
case Accounts.fetch_account_by_id_or_slug(slug) do
+
{:ok, acc} ->
+
Logger.info("Updating existing account #{slug}")
+
updated_acc = acc |> Ecto.Changeset.change(account_attrs) |> repo.update!()
+
{:ok, {:existing, updated_acc}}
+
_ ->
+
Logger.info("Creating new account #{slug}")
+
{:ok, account} = Accounts.create_account(account_attrs)
+
+
Logger.info("Creating internet gateway group")
+
{:ok, internet_site} = Gateways.create_internet_group(account)
+
+
Logger.info("Creating internet resource")
+
{:ok, _internet_resource} = Resources.create_internet_resource(account, internet_site)
+
+
# Store mapping of slug to UUID
+
UuidMapping.update_account(slug, account.id)
+
{:ok, {:new, account}}
+
end
+
end)
+
|> Ecto.Multi.run({:everyone_group, slug}, fn _repo, changes ->
+
case Map.get(changes, {:account, slug}) do
+
{:new, account} ->
+
Logger.info("Creating everyone group for new account")
+
{:ok, actor_group} = Actors.create_managed_group(account, %{name: "Everyone", membership_rules: [%{operator: true}]})
+
UuidMapping.update_entities(slug, "actor_groups", %{"everyone" => actor_group.id})
+
{:ok, actor_group}
+
{:existing, _account} ->
+
{:ok, :skipped}
+
end
+
end)
+
|> Ecto.Multi.run({:email_provider, slug}, fn _repo, changes ->
+
case Map.get(changes, {:account, slug}) do
+
{:new, account} ->
+
Logger.info("Creating default email provider for new account")
+
Auth.create_provider(account, %{name: "Email", adapter: :email, adapter_config: %{}})
+
{:existing, account} ->
+
Auth.Provider.Query.not_disabled()
+
|> Auth.Provider.Query.by_adapter(:email)
+
|> Auth.Provider.Query.by_account_id(account.id)
+
|> Repo.fetch(Auth.Provider.Query, [])
+
end
+
end)
+
|> Ecto.Multi.run({:temp_admin, slug}, fn _repo, changes ->
+
{_, account} = changes[{:account, slug}]
+
email_provider = changes[{:email_provider, slug}]
+
{:ok, create_temp_admin(account, email_provider)}
+
end)
+
+
# Clean up removed entities for this account after we have an admin subject
+
multi = multi
+
|> Ecto.Multi.run({:cleanup_entities, slug}, fn _repo, changes ->
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
+
# Store current entities in process dictionary for our helper function
+
current_entities = collect_current_entities(account_data)
+
Process.put(:current_entities, current_entities)
+
+
# Define entity types and their cleanup functions
+
entity_types = [
+
{"actors", &cleanup_actor/2},
+
{"providers", &cleanup_provider/2},
+
{"gateway_groups", &cleanup_gateway_group/2},
+
{"relay_groups", &cleanup_relay_group/2},
+
{"actor_groups", &cleanup_actor_group/2},
+
{"resources", &cleanup_resource/2},
+
{"policies", &cleanup_policy/2}
+
]
+
+
# Clean up each entity type
+
Enum.each(entity_types, fn {entity_type, cleanup_fn} ->
+
cleanup_entity_type(slug, entity_type, cleanup_fn, temp_admin_subject)
+
end)
+
+
{:ok, :cleaned}
+
end)
+
+
# Create or update actors
+
multi = Enum.reduce(account_data["actors"] || %{}, multi, fn {external_id, actor_data}, multi ->
+
actor_attrs = atomize_keys(%{
+
name: actor_data["name"],
+
type: String.to_atom(actor_data["type"])
+
})
+
+
Ecto.Multi.run(multi, {:actor, slug, external_id}, fn _repo, changes ->
+
{_, account} = changes[{:account, slug}]
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "actors", external_id)
+
case uuid && Actors.fetch_actor_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
+
nil ->
+
Logger.info("Creating new actor #{actor_data["name"]}")
+
{:ok, actor} = Actors.create_actor(account, actor_attrs)
+
# Update the mapping without manually handling Process.get/put.
+
UuidMapping.update_entities(slug, "actors", %{external_id => actor.id})
+
{:ok, {:new, actor}}
+
{:ok, existing_actor} ->
+
Logger.info("Updating existing actor #{actor_data["name"]}")
+
{:ok, updated_act} = Actors.update_actor(existing_actor, actor_attrs, temp_admin_subject)
+
{:ok, {:existing, updated_act}}
+
end
+
end)
+
|> Ecto.Multi.run({:actor_identity, slug, external_id}, fn repo, changes ->
+
email_provider = changes[{:email_provider, slug}]
+
case Map.get(changes, {:actor, slug, external_id}) do
+
{:new, actor} ->
+
Logger.info("Creating actor email identity")
+
Auth.create_identity(actor, email_provider, %{
+
provider_identifier: actor_data["email"],
+
provider_identifier_confirmation: actor_data["email"]
+
})
+
{:existing, actor} ->
+
Logger.info("Updating actor email identity")
+
{:ok, identity} = Auth.Identity.Query.not_deleted()
+
|> Auth.Identity.Query.by_actor_id(actor.id)
+
|> Auth.Identity.Query.by_provider_id(email_provider.id)
+
|> Repo.fetch(Auth.Identity.Query, [])
+
+
{:ok, identity |> Ecto.Changeset.change(%{
+
provider_identifier: actor_data["email"]
+
}) |> repo.update!()}
+
end
+
end)
+
end)
+
+
# Create or update providers
+
multi = Enum.reduce(account_data["auth"] || %{}, multi, fn {external_id, provider_data}, multi ->
+
Ecto.Multi.run(multi, {:provider, slug, external_id}, fn repo, changes ->
+
provider_attrs = %{
+
name: provider_data["name"],
+
adapter: String.to_atom(provider_data["adapter"]),
+
adapter_config: provider_data["adapter_config"]
+
}
+
+
{_, account} = changes[{:account, slug}]
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "providers", external_id)
+
case uuid && Auth.fetch_provider_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
+
nil ->
+
Logger.info("Creating new provider #{provider_data["name"]}")
+
{:ok, provider} = Auth.create_provider(account, provider_attrs)
+
UuidMapping.update_entities(slug, "providers", %{external_id => provider.id})
+
{:ok, provider}
+
{:ok, existing} ->
+
Logger.info("Updating existing provider #{provider_data["name"]}")
+
{:ok, existing |> Ecto.Changeset.change(provider_attrs) |> repo.update!()}
+
end
+
end)
+
end)
+
+
# Create or update gateway_groups
+
multi = Enum.reduce(account_data["gatewayGroups"] || %{}, multi, fn {external_id, gateway_group_data}, multi ->
+
Ecto.Multi.run(multi, {:gateway_group, slug, external_id}, fn _repo, changes ->
+
gateway_group_attrs = %{
+
name: gateway_group_data["name"],
+
tokens: [%{}]
+
}
+
+
{_, account} = changes[{:account, slug}]
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "gateway_groups", external_id)
+
case uuid && Gateways.fetch_group_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
+
nil ->
+
Logger.info("Creating new gateway group #{gateway_group_data["name"]}")
+
gateway_group = account
+
|> Gateways.Group.Changeset.create(gateway_group_attrs, temp_admin_subject)
+
|> Repo.insert!()
+
UuidMapping.update_entities(slug, "gateway_groups", %{external_id => gateway_group.id})
+
{:ok, gateway_group}
+
{:ok, existing} ->
+
# Nothing to update
+
{:ok, existing}
+
end
+
end)
+
end)
+
+
# Create or update relay_groups
+
multi = Enum.reduce(account_data["relayGroups"] || %{}, multi, fn {external_id, relay_group_data}, multi ->
+
Ecto.Multi.run(multi, {:relay_group, slug, external_id}, fn _repo, changes ->
+
relay_group_attrs = %{
+
name: relay_group_data["name"]
+
}
+
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "relay_groups", external_id)
+
existing_relay_group = uuid && Relays.fetch_group_by_id(uuid, temp_admin_subject)
+
case existing_relay_group do
+
v when v in [nil, {:error, :not_found}] ->
+
Logger.info("Creating new relay group #{relay_group_data["name"]}")
+
{:ok, relay_group} = Relays.create_group(relay_group_attrs, temp_admin_subject)
+
UuidMapping.update_entities(slug, "relay_groups", %{external_id => relay_group.id})
+
{:ok, relay_group}
+
{:ok, existing} ->
+
# Nothing to update
+
{:ok, existing}
+
end
+
end)
+
end)
+
+
# Create or update actor_groups
+
multi = Enum.reduce(account_data["groups"] || %{}, multi, fn {external_id, actor_group_data}, multi ->
+
Ecto.Multi.run(multi, {:actor_group, slug, external_id}, fn _repo, changes ->
+
actor_group_attrs = %{
+
name: actor_group_data["name"],
+
type: :static
+
}
+
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "actor_groups", external_id)
+
case uuid && Actors.fetch_group_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
+
nil ->
+
Logger.info("Creating new actor group #{actor_group_data["name"]}")
+
{:ok, actor_group} = Actors.create_group(actor_group_attrs, temp_admin_subject)
+
UuidMapping.update_entities(slug, "actor_groups", %{external_id => actor_group.id})
+
{:ok, actor_group}
+
{:ok, existing} ->
+
# Nothing to update
+
{:ok, existing}
+
end
+
end)
+
|> Ecto.Multi.run({:actor_group_members, slug, external_id}, fn repo, changes ->
+
{_, account} = changes[{:account, slug}]
+
group_uuid = UuidMapping.get_entity(slug, "actor_groups", external_id)
+
+
memberships =
+
Actors.Membership.Query.all()
+
|> Actors.Membership.Query.by_group_id(group_uuid)
+
|> Actors.Membership.Query.returning_all()
+
|> Repo.all()
+
+
existing_members = Enum.map(memberships, fn membership -> membership.actor_id end)
+
desired_members = Enum.map(actor_group_data["members"] || [], fn member ->
+
uuid = UuidMapping.get_entity(slug, "actors", member)
+
if uuid == nil do
+
raise "Cannot find provisioned actor #{member} to add to group"
+
end
+
uuid
+
end)
+
+
missing_members = desired_members -- existing_members
+
untracked_members = existing_members -- desired_members
+
+
Logger.info("Updating members for actor group #{external_id}")
+
Enum.each(missing_members || [], fn actor_uuid ->
+
Logger.info("Adding member #{external_id}")
+
Actors.Membership.Changeset.upsert(account.id, %Actors.Membership{}, %{
+
group_id: group_uuid,
+
actor_id: actor_uuid
+
})
+
|> repo.insert!()
+
end)
+
+
if actor_group_data["forceMembers"] == true do
+
# Remove untracked members
+
to_delete = Enum.map(untracked_members, fn actor_uuid -> {group_uuid, actor_uuid} end)
+
if to_delete != [] do
+
Actors.Membership.Query.by_group_id_and_actor_id({:in, to_delete})
+
|> repo.delete_all()
+
end
+
end
+
+
{:ok, nil}
+
end)
+
end)
+
+
# Create or update resources
+
multi = Enum.reduce(account_data["resources"] || %{}, multi, fn {external_id, resource_data}, multi ->
+
Ecto.Multi.run(multi, {:resource, slug, external_id}, fn _repo, changes ->
+
resource_attrs = %{
+
type: String.to_atom(resource_data["type"]),
+
name: resource_data["name"],
+
address: resource_data["address"],
+
address_description: resource_data["address_description"],
+
connections: Enum.map(resource_data["gatewayGroups"] || [], fn group ->
+
%{gateway_group_id: UuidMapping.get_entity(slug, "gateway_groups", group)}
+
end),
+
filters: Enum.map(resource_data["filters"] || [], fn filter ->
+
%{
+
ports: filter["ports"] || [],
+
protocol: String.to_atom(filter["protocol"])
+
}
+
end)
+
}
+
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "resources", external_id)
+
case uuid && fetch_resource(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
+
nil ->
+
Logger.info("Creating new resource #{resource_data["name"]}")
+
{:ok, resource} = Resources.create_resource(resource_attrs, temp_admin_subject)
+
UuidMapping.update_entities(slug, "resources", %{external_id => resource.id})
+
{:ok, resource}
+
{:ok, existing} ->
+
existing = Repo.preload(existing, :connections)
+
Logger.info("Updating existing resource #{resource_data["name"]}")
+
only_updated_attrs = resource_attrs
+
|> Enum.reject(fn {key, value} ->
+
case key do
+
# Compare connections by gateway_group_id only
+
:connections -> value == Enum.map(existing.connections || [], fn conn -> Map.take(conn, [:gateway_group_id]) end)
+
# Compare filters by ports and protocol only
+
:filters -> value == Enum.map(existing.filters || [], fn filter -> Map.take(filter, [:ports, :protocol]) end)
+
_ -> Map.get(existing, key) == value
+
end
+
end)
+
|> Enum.into(%{})
+
+
if only_updated_attrs == %{} do
+
{:ok, existing}
+
else
+
resource = case existing |> Resources.update_resource(resource_attrs, temp_admin_subject) do
+
{:replaced, _old, new} ->
+
UuidMapping.update_entities(slug, "resources", %{external_id => new.id})
+
new
+
{:updated, value} -> value
+
x -> x
+
end
+
+
{:ok, resource}
+
end
+
end
+
end)
+
end)
+
+
# Create or update policies
+
multi = Enum.reduce(account_data["policies"] || %{}, multi, fn {external_id, policy_data}, multi ->
+
Ecto.Multi.run(multi, {:policy, slug, external_id}, fn _repo, changes ->
+
policy_attrs = %{
+
description: policy_data["description"],
+
actor_group_id: UuidMapping.get_entity(slug, "actor_groups", policy_data["group"]),
+
resource_id: UuidMapping.get_entity(slug, "resources", policy_data["resource"])
+
}
+
+
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
+
uuid = UuidMapping.get_entity(slug, "policies", external_id)
+
case uuid && fetch_policy(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
+
nil ->
+
Logger.info("Creating new policy #{policy_data["name"]}")
+
{:ok, policy} = Policies.create_policy(policy_attrs, temp_admin_subject)
+
UuidMapping.update_entities(slug, "policies", %{external_id => policy.id})
+
{:ok, policy}
+
{:ok, existing} ->
+
Logger.info("Updating existing policy #{policy_data["name"]}")
+
only_updated_attrs = policy_attrs
+
|> Enum.reject(fn {key, value} -> Map.get(existing, key) == value end)
+
|> Enum.into(%{})
+
+
if only_updated_attrs == %{} do
+
{:ok, existing}
+
else
+
policy = case existing |> Policies.update_policy(policy_attrs, temp_admin_subject) do
+
{:replaced, _old, new} ->
+
UuidMapping.update_entities(slug, "policies", %{external_id => new.id})
+
new
+
{:updated, value} -> value
+
x -> x
+
end
+
+
{:ok, policy}
+
end
+
end
+
end)
+
end)
+
+
# Clean up temporary admin after all operations
+
multi |> Ecto.Multi.run({:cleanup_temp_admin, slug}, fn _repo, changes ->
+
{temp_admin_subject, temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token} =
+
changes[{:temp_admin, slug}]
+
+
cleanup_temp_admin(temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token, temp_admin_subject)
+
{:ok, :cleaned}
+
end)
+
end)
+
|> Ecto.Multi.run({:save_state}, fn _repo, _changes ->
+
# Save all UUID mappings to disk.
+
UuidMapping.save()
+
{:ok, :saved}
+
end)
+
+
case Repo.transaction(multi) do
+
{:ok, _result} ->
+
Logger.info("Provisioning completed successfully")
+
{:error, step, reason, _changes} ->
+
Logger.error("Provisioning failed at step #{inspect(step)}, no changes were applied: #{inspect(reason)}")
+
end
+
end
+
end
+
+
Provision.provision()
+202
nixos/modules/services/networking/firezone/relay.nix
···
+
{
+
lib,
+
pkgs,
+
config,
+
...
+
}:
+
let
+
inherit (lib)
+
boolToString
+
getExe
+
mkEnableOption
+
mkIf
+
mkOption
+
mkPackageOption
+
types
+
;
+
+
cfg = config.services.firezone.relay;
+
in
+
{
+
options = {
+
services.firezone.relay = {
+
enable = mkEnableOption "the firezone relay server";
+
package = mkPackageOption pkgs "firezone-relay" { };
+
+
name = mkOption {
+
type = types.str;
+
example = "My relay";
+
description = "The name of this gateway as shown in firezone";
+
};
+
+
publicIpv4 = mkOption {
+
type = types.nullOr types.str;
+
default = null;
+
description = "The public ipv4 address of this relay";
+
};
+
+
publicIpv6 = mkOption {
+
type = types.nullOr types.str;
+
default = null;
+
description = "The public ipv6 address of this relay";
+
};
+
+
openFirewall = mkOption {
+
type = types.bool;
+
default = true;
+
description = "Opens up the main STUN port and the TURN allocation range.";
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 3478;
+
description = "The port to listen on for STUN messages";
+
};
+
+
lowestPort = mkOption {
+
type = types.port;
+
default = 49152;
+
description = "The lowest port to use in TURN allocation";
+
};
+
+
highestPort = mkOption {
+
type = types.port;
+
default = 65535;
+
description = "The highest port to use in TURN allocation";
+
};
+
+
apiUrl = mkOption {
+
type = types.strMatching "^wss://.+/$";
+
example = "wss://firezone.example.com/api/";
+
description = ''
+
The URL of your firezone server's API. This should be the same
+
as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`,
+
but with `wss://` instead of `https://`.
+
'';
+
};
+
+
tokenFile = mkOption {
+
type = types.path;
+
example = "/run/secrets/firezone-relay-token";
+
description = ''
+
A file containing the firezone relay token. Do not use a nix-store path here
+
as it will make the token publicly readable!
+
+
This file will be passed via systemd credentials, it should only be accessible
+
by the root user.
+
'';
+
};
+
+
logLevel = mkOption {
+
type = types.str;
+
default = "info";
+
description = ''
+
The log level for the firezone application. See
+
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
+
for the format.
+
'';
+
};
+
+
enableTelemetry = mkEnableOption "telemetry";
+
};
+
};
+
+
config = mkIf cfg.enable {
+
assertions = [
+
{
+
assertion = cfg.publicIpv4 != null || cfg.publicIpv6 != null;
+
message = "At least one of `services.firezone.relay.publicIpv4` and `services.firezone.relay.publicIpv6` must be set";
+
}
+
];
+
+
networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
networking.firewall.allowedUDPPortRanges = mkIf cfg.openFirewall [
+
{
+
from = cfg.lowestPort;
+
to = cfg.highestPort;
+
}
+
];
+
+
systemd.services.firezone-relay = {
+
description = "relay service for the Firezone zero-trust access platform";
+
after = [ "network.target" ];
+
wantedBy = [ "multi-user.target" ];
+
+
path = [ pkgs.util-linux ];
+
script = ''
+
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
+
if [[ -z "''${FIREZONE_ID:-}" ]]; then
+
if [[ ! -e relay_id ]]; then
+
uuidgen -r > relay_id
+
fi
+
export FIREZONE_ID=$(< relay_id)
+
fi
+
+
export FIREZONE_TOKEN=$(< "$CREDENTIALS_DIRECTORY/firezone-token")
+
exec ${getExe cfg.package}
+
'';
+
+
environment = {
+
FIREZONE_API_URL = cfg.apiUrl;
+
FIREZONE_NAME = cfg.name;
+
FIREZONE_TELEMETRY = boolToString cfg.enableTelemetry;
+
+
PUBLIC_IP4_ADDR = cfg.publicIpv4;
+
PUBLIC_IP6_ADDR = cfg.publicIpv6;
+
+
LISTEN_PORT = toString cfg.port;
+
LOWEST_PORT = toString cfg.lowestPort;
+
HIGHEST_PORT = toString cfg.highestPort;
+
+
RUST_LOG = cfg.logLevel;
+
LOG_FORMAT = "human";
+
};
+
+
serviceConfig = {
+
Type = "exec";
+
DynamicUser = true;
+
User = "firezone-relay";
+
LoadCredential = [ "firezone-token:${cfg.tokenFile}" ];
+
+
StateDirectory = "firezone-relay";
+
WorkingDirectory = "/var/lib/firezone-relay";
+
+
Restart = "on-failure";
+
RestartSec = 10;
+
+
LockPersonality = true;
+
MemoryDenyWriteExecute = true;
+
NoNewPrivileges = true;
+
PrivateMounts = true;
+
PrivateTmp = true;
+
PrivateUsers = false;
+
ProcSubset = "pid";
+
ProtectClock = true;
+
ProtectControlGroups = true;
+
ProtectHome = true;
+
ProtectHostname = true;
+
ProtectKernelLogs = true;
+
ProtectKernelModules = true;
+
ProtectKernelTunables = true;
+
ProtectProc = "invisible";
+
ProtectSystem = "strict";
+
RestrictAddressFamilies = [
+
"AF_INET"
+
"AF_INET6"
+
"AF_NETLINK"
+
];
+
RestrictNamespaces = true;
+
RestrictRealtime = true;
+
RestrictSUIDSGID = true;
+
SystemCallArchitectures = "native";
+
SystemCallFilter = "@system-service";
+
UMask = "077";
+
};
+
};
+
};
+
+
meta.maintainers = with lib.maintainers; [
+
oddlama
+
patrickdag
+
];
+
}
+1210
nixos/modules/services/networking/firezone/server.nix
···
+
{
+
lib,
+
pkgs,
+
config,
+
...
+
}:
+
let
+
inherit (lib)
+
attrNames
+
boolToString
+
concatLines
+
concatLists
+
concatMapAttrs
+
concatStringsSep
+
filterAttrs
+
filterAttrsRecursive
+
flip
+
forEach
+
getExe
+
isBool
+
mapAttrs
+
mapAttrsToList
+
mkDefault
+
mkEnableOption
+
mkIf
+
mkMerge
+
mkOption
+
mkPackageOption
+
optionalAttrs
+
optionalString
+
recursiveUpdate
+
subtractLists
+
toUpper
+
types
+
;
+
+
cfg = config.services.firezone.server;
+
jsonFormat = pkgs.formats.json { };
+
availableAuthAdapters = [
+
"email"
+
"openid_connect"
+
"userpass"
+
"token"
+
"google_workspace"
+
"microsoft_entra"
+
"okta"
+
"jumpcloud"
+
];
+
+
typePortRange =
+
types.coercedTo types.port
+
(x: {
+
from = x;
+
to = x;
+
})
+
(
+
types.submodule {
+
options = {
+
from = mkOption {
+
type = types.port;
+
description = "The start of the port range, inclusive.";
+
};
+
+
to = mkOption {
+
type = types.port;
+
description = "The end of the port range, inclusive.";
+
};
+
};
+
}
+
);
+
+
# All non-secret environment variables or the given component
+
collectEnvironment =
+
component:
+
mapAttrs (_: v: if isBool v then boolToString v else toString v) (
+
cfg.settings // cfg.${component}.settings
+
);
+
+
# All mandatory secrets which were not explicitly provided by the user will
+
# have to be generated, if they do not yet exist.
+
generateSecrets =
+
let
+
requiredSecrets = filterAttrs (_: v: v == null) cfg.settingsSecret;
+
in
+
''
+
mkdir -p secrets
+
chmod 700 secrets
+
''
+
+ concatLines (
+
forEach (attrNames requiredSecrets) (secret: ''
+
if [[ ! -e secrets/${secret} ]]; then
+
echo "Generating ${secret}"
+
# Some secrets like TOKENS_KEY_BASE require a value >=64 bytes.
+
head -c 64 /dev/urandom | base64 -w 0 > secrets/${secret}
+
chmod 600 secrets/${secret}
+
fi
+
'')
+
);
+
+
# All secrets given in `cfg.settingsSecret` must be loaded from a file and
+
# exported into the environment. Also exclude any variables that were
+
# overwritten by the local component settings.
+
loadSecretEnvironment =
+
component:
+
let
+
relevantSecrets = subtractLists (attrNames cfg.${component}.settings) (
+
attrNames cfg.settingsSecret
+
);
+
in
+
concatLines (
+
forEach relevantSecrets (
+
secret:
+
''export ${secret}=$(< ${
+
if cfg.settingsSecret.${secret} == null then
+
"secrets/${secret}"
+
else
+
"\"$CREDENTIALS_DIRECTORY/${secret}\""
+
})''
+
)
+
);
+
+
provisionStateJson =
+
let
+
# Convert clientSecretFile options into the real counterpart
+
augmentedAccounts = flip mapAttrs cfg.provision.accounts (
+
accountName: account:
+
account
+
// {
+
auth = flip mapAttrs account.auth (
+
authName: auth:
+
recursiveUpdate auth (
+
optionalAttrs (auth.adapter_config.clientSecretFile != null) {
+
adapter_config.client_secret = "{env:AUTH_CLIENT_SECRET_${toUpper accountName}_${toUpper authName}}";
+
}
+
)
+
);
+
}
+
);
+
in
+
jsonFormat.generate "provision-state.json" {
+
# Do not include any clientSecretFile attributes in the resulting json
+
accounts = filterAttrsRecursive (k: _: k != "clientSecretFile") augmentedAccounts;
+
};
+
+
commonServiceConfig = {
+
AmbientCapablities = [ ];
+
CapabilityBoundingSet = [ ];
+
LockPersonality = true;
+
MemoryDenyWriteExecute = true;
+
NoNewPrivileges = true;
+
PrivateMounts = true;
+
PrivateTmp = true;
+
PrivateUsers = false;
+
ProcSubset = "pid";
+
ProtectClock = true;
+
ProtectControlGroups = true;
+
ProtectHome = true;
+
ProtectHostname = true;
+
ProtectKernelLogs = true;
+
ProtectKernelModules = true;
+
ProtectKernelTunables = true;
+
ProtectProc = "invisible";
+
ProtectSystem = "strict";
+
RestrictAddressFamilies = [
+
"AF_INET"
+
"AF_INET6"
+
"AF_NETLINK"
+
"AF_UNIX"
+
];
+
RestrictNamespaces = true;
+
RestrictRealtime = true;
+
RestrictSUIDSGID = true;
+
SystemCallArchitectures = "native";
+
SystemCallFilter = "@system-service";
+
UMask = "077";
+
+
DynamicUser = true;
+
User = "firezone";
+
+
Slice = "system-firezone.slice";
+
StateDirectory = "firezone";
+
WorkingDirectory = "/var/lib/firezone";
+
+
LoadCredential = mapAttrsToList (secretName: secretFile: "${secretName}:${secretFile}") (
+
filterAttrs (_: v: v != null) cfg.settingsSecret
+
);
+
Type = "exec";
+
Restart = "on-failure";
+
RestartSec = 10;
+
};
+
+
componentOptions = component: {
+
enable = mkEnableOption "the Firezone ${component} server";
+
package = mkPackageOption pkgs "firezone-server-${component}" { };
+
+
settings = mkOption {
+
description = ''
+
Environment variables for this component of the Firezone server. For a
+
list of available variables, please refer to the [upstream definitions](https://github.com/firezone/firezone/blob/main/elixir/apps/domain/lib/domain/config/definitions.ex).
+
Some variables like `OUTBOUND_EMAIL_ADAPTER_OPTS` require json values
+
for which you can use `VAR = builtins.toJSON { /* ... */ }`.
+
+
This component will automatically inherit all variables defined via
+
{option}`services.firezone.server.settings` and
+
{option}`services.firezone.server.settingsSecret`, but which can be
+
overwritten by this option.
+
'';
+
default = { };
+
type = types.submodule {
+
freeformType = types.attrsOf (
+
types.oneOf [
+
types.bool
+
types.float
+
types.int
+
types.str
+
types.path
+
types.package
+
]
+
);
+
};
+
};
+
};
+
in
+
{
+
options.services.firezone.server = {
+
enable = mkEnableOption "all Firezone components";
+
enableLocalDB = mkEnableOption "a local postgresql database for Firezone";
+
nginx.enable = mkEnableOption "nginx virtualhost definition";
+
+
openClusterFirewall = mkOption {
+
type = types.bool;
+
default = false;
+
description = ''
+
Opens up the erlang distribution port of all enabled components to
+
allow reaching the server cluster from the internet. You only need to
+
set this if you are actually distributing your cluster across multiple
+
machines.
+
'';
+
};
+
+
clusterHosts = mkOption {
+
type = types.listOf types.str;
+
default = [
+
"api@localhost.localdomain"
+
"web@localhost.localdomain"
+
"domain@localhost.localdomain"
+
];
+
description = ''
+
A list of components and their hosts that are part of this cluster. For
+
a single-machine setup, the default value will be sufficient. This
+
value will automatically set `ERLANG_CLUSTER_ADAPTER_CONFIG`.
+
+
The format is `<COMPONENT_NAME>@<HOSTNAME>`.
+
'';
+
};
+
+
settingsSecret = mkOption {
+
default = { };
+
description = ''
+
This is a convenience option which allows you to set secret values for
+
environment variables by specifying a file which will contain the value
+
at runtime. Before starting the server, the content of each file will
+
be loaded into the respective environment variable.
+
+
Otherwise, this option is equivalent to
+
{option}`services.firezone.server.settings`. Refer to the settings
+
option for more information regarding the actual variables and how
+
filtering rules are applied for each component.
+
'';
+
type = types.submodule {
+
freeformType = types.attrsOf types.path;
+
options = {
+
RELEASE_COOKIE = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique secret identifier for the Erlang
+
cluster. All Firezone components in your cluster must use the
+
same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
+
TOKENS_KEY_BASE = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique base64 encoded secret for the
+
`TOKENS_KEY_BASE`. All Firezone components in your cluster must
+
use the same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
+
SECRET_KEY_BASE = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique base64 encoded secret for the
+
`SECRET_KEY_BASE`. All Firezone components in your cluster must
+
use the same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
+
TOKENS_SALT = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique base64 encoded secret for the
+
`TOKENS_SALT`. All Firezone components in your cluster must
+
use the same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
+
LIVE_VIEW_SIGNING_SALT = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique base64 encoded secret for the
+
`LIVE_VIEW_SIGNING_SALT`. All Firezone components in your cluster must
+
use the same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
+
COOKIE_SIGNING_SALT = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique base64 encoded secret for the
+
`COOKIE_SIGNING_SALT`. All Firezone components in your cluster must
+
use the same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
+
COOKIE_ENCRYPTION_SALT = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a unique base64 encoded secret for the
+
`COOKIE_ENCRYPTION_SALT`. All Firezone components in your cluster must
+
use the same value.
+
+
If this is `null`, a shared value will automatically be generated
+
on startup and used for all components on this machine. You do
+
not need to set this except when you spread your cluster over
+
multiple hosts.
+
'';
+
};
+
};
+
};
+
};
+
+
settings = mkOption {
+
description = ''
+
Environment variables for the Firezone server. For a list of available
+
variables, please refer to the [upstream definitions](https://github.com/firezone/firezone/blob/main/elixir/apps/domain/lib/domain/config/definitions.ex).
+
Some variables like `OUTBOUND_EMAIL_ADAPTER_OPTS` require json values
+
for which you can use `VAR = builtins.toJSON { /* ... */ }`.
+
+
Each component has an additional `settings` option which allows you to
+
override specific variables passed to that component.
+
'';
+
default = { };
+
type = types.submodule {
+
freeformType = types.attrsOf (
+
types.oneOf [
+
types.bool
+
types.float
+
types.int
+
types.str
+
types.path
+
types.package
+
]
+
);
+
};
+
};
+
+
smtp = {
+
configureManually = mkOption {
+
type = types.bool;
+
default = false;
+
description = ''
+
Outbound email configuration is mandatory for Firezone and supports
+
many different delivery adapters. Yet, most users will only need an
+
SMTP relay to send emails, so this configuration enforced by default.
+
+
If you want to utilize an alternative way to send emails (e.g. via a
+
supportd API-based service), enable this option and define
+
`OUTBOUND_EMAIL_FROM`, `OUTBOUND_EMAIL_ADAPTER` and
+
`OUTBOUND_EMAIL_ADAPTER_OPTS` manually via
+
{option}`services.firezone.server.settings` and/or
+
{option}`services.firezone.server.settingsSecret`.
+
+
The Firezone documentation holds [a list of supported Swoosh adapters](https://github.com/firezone/firezone/blob/main/website/src/app/docs/reference/env-vars/readme.mdx#outbound-emails).
+
'';
+
};
+
+
from = mkOption {
+
type = types.str;
+
example = "firezone@example.com";
+
description = "Outbound SMTP FROM address";
+
};
+
+
host = mkOption {
+
type = types.str;
+
example = "mail.example.com";
+
description = "Outbound SMTP host";
+
};
+
+
port = mkOption {
+
type = types.port;
+
example = 465;
+
description = "Outbound SMTP port";
+
};
+
+
implicitTls = mkOption {
+
type = types.bool;
+
default = false;
+
description = "Whether to use implicit TLS instead of STARTTLS (usually port 465)";
+
};
+
+
username = mkOption {
+
type = types.str;
+
example = "firezone@example.com";
+
description = "Username to authenticate against the SMTP relay";
+
};
+
+
passwordFile = mkOption {
+
type = types.path;
+
example = "/run/secrets/smtp-password";
+
description = "File containing the password for the given username. Beware that a file in the nix store will be world readable.";
+
};
+
};
+
+
domain = componentOptions "domain";
+
+
web = componentOptions "web" // {
+
externalUrl = mkOption {
+
type = types.strMatching "^https://.+/$";
+
example = "https://firezone.example.com/";
+
description = ''
+
The external URL under which you will serve the web interface. You
+
need to setup a reverse proxy for TLS termination, either with
+
{option}`services.firezone.server.nginx.enable` or manually.
+
'';
+
};
+
+
address = mkOption {
+
type = types.str;
+
default = "127.0.0.1";
+
description = "The address to listen on";
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8080;
+
description = "The port under which the web interface will be served locally";
+
};
+
+
trustedProxies = mkOption {
+
type = types.listOf types.str;
+
default = [ ];
+
description = "A list of trusted proxies";
+
};
+
};
+
+
api = componentOptions "api" // {
+
externalUrl = mkOption {
+
type = types.strMatching "^https://.+/$";
+
example = "https://firezone.example.com/api/";
+
description = ''
+
The external URL under which you will serve the api. You need to
+
setup a reverse proxy for TLS termination, either with
+
{option}`services.firezone.server.nginx.enable` or manually.
+
'';
+
};
+
+
address = mkOption {
+
type = types.str;
+
default = "127.0.0.1";
+
description = "The address to listen on";
+
};
+
+
port = mkOption {
+
type = types.port;
+
default = 8081;
+
description = "The port under which the api will be served locally";
+
};
+
+
trustedProxies = mkOption {
+
type = types.listOf types.str;
+
default = [ ];
+
description = "A list of trusted proxies";
+
};
+
};
+
+
provision = {
+
enable = mkEnableOption "provisioning of the Firezone domain server";
+
accounts = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
freeformType = jsonFormat.type;
+
options = {
+
name = mkOption {
+
type = types.str;
+
description = "The account name";
+
example = "My Organization";
+
};
+
+
features =
+
let
+
mkFeatureOption =
+
name: default:
+
mkOption {
+
type = types.bool;
+
inherit default;
+
description = "Whether to enable the `${name}` feature for this account.";
+
};
+
in
+
{
+
flow_activities = mkFeatureOption "flow_activities" true;
+
policy_conditions = mkFeatureOption "policy_conditions" true;
+
multi_site_resources = mkFeatureOption "multi_site_resources" true;
+
traffic_filters = mkFeatureOption "traffic_filters" true;
+
self_hosted_relays = mkFeatureOption "self_hosted_relays" true;
+
idp_sync = mkFeatureOption "idp_sync" true;
+
rest_api = mkFeatureOption "rest_api" true;
+
internet_resource = mkFeatureOption "internet_resource" true;
+
};
+
+
actors = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
options = {
+
type = mkOption {
+
type = types.enum [
+
"account_admin_user"
+
"account_user"
+
"service_account"
+
"api_client"
+
];
+
description = "The account type";
+
};
+
+
name = mkOption {
+
type = types.str;
+
description = "The name of this actor";
+
};
+
+
email = mkOption {
+
type = types.str;
+
description = "The email address used to authenticate as this account";
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
admin = {
+
type = "account_admin_user";
+
name = "Admin";
+
email = "admin@myorg.example.com";
+
};
+
};
+
description = ''
+
All actors (users) to provision. The attribute name will only
+
be used to track the actor and does not have any significance
+
for Firezone.
+
'';
+
};
+
+
auth = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
freeformType = jsonFormat.type;
+
options = {
+
name = mkOption {
+
type = types.str;
+
description = "The name of this authentication provider";
+
};
+
+
adapter = mkOption {
+
type = types.enum availableAuthAdapters;
+
description = "The auth adapter type";
+
};
+
+
adapter_config.clientSecretFile = mkOption {
+
type = types.nullOr types.path;
+
default = null;
+
description = ''
+
A file containing a the client secret for an openid_connect adapter.
+
You only need to set this if this is an openid_connect provider.
+
'';
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
myoidcprovider = {
+
adapter = "openid_connect";
+
adapter_config = {
+
client_id = "clientid";
+
clientSecretFile = "/run/secrets/oidc-client-secret";
+
response_type = "code";
+
scope = "openid email name";
+
discovery_document_uri = "https://auth.example.com/.well-known/openid-configuration";
+
};
+
};
+
};
+
description = ''
+
All authentication providers to provision. The attribute name
+
will only be used to track the provider and does not have any
+
significance for Firezone.
+
'';
+
};
+
+
resources = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
options = {
+
type = mkOption {
+
type = types.enum [
+
"dns"
+
"cidr"
+
"ip"
+
];
+
description = "The resource type";
+
};
+
+
name = mkOption {
+
type = types.str;
+
description = "The name of this resource";
+
};
+
+
address = mkOption {
+
type = types.str;
+
description = "The address of this resource. Depending on the resource type, this should be an ip, ip with cidr mask or a domain.";
+
};
+
+
addressDescription = mkOption {
+
type = types.nullOr types.str;
+
default = null;
+
description = "An optional description for resource address, usually a full link to the resource including a schema.";
+
};
+
+
gatewayGroups = mkOption {
+
type = types.nonEmptyListOf types.str;
+
description = "A list of gateway groups (sites) which can reach the resource and may be used to connect to it.";
+
};
+
+
filters = mkOption {
+
type = types.listOf (
+
types.submodule {
+
options = {
+
protocol = mkOption {
+
type = types.enum [
+
"icmp"
+
"tcp"
+
"udp"
+
];
+
description = "The protocol to allow";
+
};
+
+
ports = mkOption {
+
type = types.listOf typePortRange;
+
example = [
+
443
+
{
+
from = 8080;
+
to = 8100;
+
}
+
];
+
default = [ ];
+
apply =
+
xs: map (x: if x.from == x.to then toString x.from else "${toString x.from} - ${toString x.to}") xs;
+
description = "Either a single port or port range to allow. Both bounds are inclusive.";
+
};
+
};
+
}
+
);
+
default = [ ];
+
description = "A list of filter to restrict traffic. If no filters are given, all traffic is allowed.";
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
vaultwarden = {
+
type = "dns";
+
name = "Vaultwarden";
+
address = "vault.example.com";
+
address_description = "https://vault.example.com";
+
gatewayGroups = [ "my-site" ];
+
filters = [
+
{ protocol = "icmp"; }
+
{
+
protocol = "tcp";
+
ports = [
+
80
+
443
+
];
+
}
+
];
+
};
+
};
+
description = ''
+
All resources to provision. The attribute name will only be used to
+
track the resource and does not have any significance for Firezone.
+
'';
+
};
+
+
policies = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
options = {
+
description = mkOption {
+
type = types.nullOr types.str;
+
description = "The description of this policy";
+
};
+
+
group = mkOption {
+
type = types.str;
+
description = "The group which should be allowed access to the given resource.";
+
};
+
+
resource = mkOption {
+
type = types.str;
+
description = "The resource to which access should be allowed.";
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
access_vaultwarden = {
+
name = "Allow anyone to access vaultwarden";
+
group = "everyone";
+
resource = "vaultwarden";
+
};
+
};
+
description = ''
+
All policies to provision. The attribute name will only be used to
+
track the policy and does not have any significance for Firezone.
+
'';
+
};
+
+
groups = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
options = {
+
name = mkOption {
+
type = types.str;
+
description = "The name of this group";
+
};
+
+
members = mkOption {
+
type = types.listOf types.str;
+
default = [ ];
+
description = "The members of this group";
+
};
+
+
forceMembers = mkOption {
+
type = types.bool;
+
default = false;
+
description = "Ensure that only the given members are part of this group at every server start.";
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
users = {
+
name = "Users";
+
};
+
};
+
description = ''
+
All groups to provision. The attribute name will only be used
+
to track the group and does not have any significance for
+
Firezone.
+
+
A group named `everyone` will automatically be managed by Firezone.
+
'';
+
};
+
+
relayGroups = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
options = {
+
name = mkOption {
+
type = types.str;
+
description = "The name of this relay group";
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
my-relays = {
+
name = "My Relays";
+
};
+
};
+
description = ''
+
All relay groups to provision. The attribute name
+
will only be used to track the relay group and does not have any
+
significance for Firezone.
+
'';
+
};
+
+
gatewayGroups = mkOption {
+
type = types.attrsOf (
+
types.submodule {
+
options = {
+
name = mkOption {
+
type = types.str;
+
description = "The name of this gateway group";
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
my-gateways = {
+
name = "My Gateways";
+
};
+
};
+
description = ''
+
All gateway groups (sites) to provision. The attribute name
+
will only be used to track the gateway group and does not have any
+
significance for Firezone.
+
'';
+
};
+
};
+
}
+
);
+
default = { };
+
example = {
+
main = {
+
name = "My Account / Organization";
+
metadata.stripe.billing_email = "org@myorg.example.com";
+
features.rest_api = false;
+
};
+
};
+
description = ''
+
All accounts to provision. The attribute name specified here will
+
become the account slug. By using `"{file:/path/to/file}"` as a
+
string value anywhere in these settings, the provisioning script will
+
replace that value with the content of the given file at runtime.
+
+
Please refer to the [Firezone source code](https://github.com/firezone/firezone/blob/main/elixir/apps/domain/lib/domain/accounts/account.ex)
+
for all available properties.
+
'';
+
};
+
};
+
};
+
+
config = mkMerge [
+
{
+
assertions =
+
[
+
{
+
assertion = cfg.provision.enable -> cfg.domain.enable;
+
message = "Provisioning must be done on a machine running the firezone domain server";
+
}
+
]
+
++ concatLists (
+
flip mapAttrsToList cfg.provision.accounts (
+
accountName: accountCfg:
+
[
+
{
+
assertion = (builtins.match "^[[:lower:]_-]+$" accountName) != null;
+
message = "An account name must contain only lowercase characters and underscores, as it will be used as the URL slug for this account.";
+
}
+
]
+
++ flip mapAttrsToList accountCfg.auth (
+
authName: _: {
+
assertion = (builtins.match "^[[:alnum:]_-]+$" authName) != null;
+
message = "The authentication provider attribute key must contain only letters, numbers, underscores or dashes.";
+
}
+
)
+
)
+
);
+
}
+
# Enable all components if the main server is enabled
+
(mkIf cfg.enable {
+
services.firezone.server.domain.enable = true;
+
services.firezone.server.web.enable = true;
+
services.firezone.server.api.enable = true;
+
})
+
# Create (and configure) a local database if desired
+
(mkIf cfg.enableLocalDB {
+
services.postgresql = {
+
enable = true;
+
ensureUsers = [
+
{
+
name = "firezone";
+
ensureDBOwnership = true;
+
}
+
];
+
ensureDatabases = [ "firezone" ];
+
};
+
+
services.firezone.server.settings = {
+
DATABASE_SOCKET_DIR = "/run/postgresql";
+
DATABASE_PORT = "5432";
+
DATABASE_NAME = "firezone";
+
DATABASE_USER = "firezone";
+
DATABASE_PASSWORD = "firezone";
+
};
+
})
+
# Create a local nginx reverse proxy
+
(mkIf cfg.nginx.enable {
+
services.nginx = mkMerge [
+
{
+
enable = true;
+
}
+
(
+
let
+
urlComponents = builtins.elemAt (builtins.split "https://([^/]*)(/?.*)" cfg.web.externalUrl) 1;
+
domain = builtins.elemAt urlComponents 0;
+
location = builtins.elemAt urlComponents 1;
+
in
+
{
+
virtualHosts.${domain} = {
+
forceSSL = mkDefault true;
+
locations.${location} = {
+
# The trailing slash is important to strip the location prefix from the request
+
proxyPass = "http://${cfg.web.address}:${toString cfg.web.port}/";
+
proxyWebsockets = true;
+
};
+
};
+
}
+
)
+
(
+
let
+
urlComponents = builtins.elemAt (builtins.split "https://([^/]*)(/?.*)" cfg.api.externalUrl) 1;
+
domain = builtins.elemAt urlComponents 0;
+
location = builtins.elemAt urlComponents 1;
+
in
+
{
+
virtualHosts.${domain} = {
+
forceSSL = mkDefault true;
+
locations.${location} = {
+
# The trailing slash is important to strip the location prefix from the request
+
proxyPass = "http://${cfg.api.address}:${toString cfg.api.port}/";
+
proxyWebsockets = true;
+
};
+
};
+
}
+
)
+
];
+
})
+
# Specify sensible defaults
+
{
+
services.firezone.server = {
+
settings = {
+
LOG_LEVEL = mkDefault "info";
+
RELEASE_HOSTNAME = mkDefault "localhost.localdomain";
+
+
ERLANG_CLUSTER_ADAPTER = mkDefault "Elixir.Cluster.Strategy.Epmd";
+
ERLANG_CLUSTER_ADAPTER_CONFIG = mkDefault (
+
builtins.toJSON {
+
hosts = cfg.clusterHosts;
+
}
+
);
+
+
TZDATA_DIR = mkDefault "/var/lib/firezone/tzdata";
+
TELEMETRY_ENABLED = mkDefault false;
+
+
# By default this will open nproc * 2 connections for each component,
+
# which can exceeds the (default) maximum of 100 connections for
+
# postgresql on a 12 core +SMT machine. 16 connections will be
+
# sufficient for small to medium deployments
+
DATABASE_POOL_SIZE = "16";
+
+
AUTH_PROVIDER_ADAPTERS = mkDefault (concatStringsSep "," availableAuthAdapters);
+
+
FEATURE_FLOW_ACTIVITIES_ENABLED = mkDefault true;
+
FEATURE_POLICY_CONDITIONS_ENABLED = mkDefault true;
+
FEATURE_MULTI_SITE_RESOURCES_ENABLED = mkDefault true;
+
FEATURE_SELF_HOSTED_RELAYS_ENABLED = mkDefault true;
+
FEATURE_IDP_SYNC_ENABLED = mkDefault true;
+
FEATURE_REST_API_ENABLED = mkDefault true;
+
FEATURE_INTERNET_RESOURCE_ENABLED = mkDefault true;
+
FEATURE_TRAFFIC_FILTERS_ENABLED = mkDefault true;
+
+
FEATURE_SIGN_UP_ENABLED = mkDefault (!cfg.provision.enable);
+
+
WEB_EXTERNAL_URL = mkDefault cfg.web.externalUrl;
+
API_EXTERNAL_URL = mkDefault cfg.api.externalUrl;
+
};
+
+
domain.settings = {
+
ERLANG_DISTRIBUTION_PORT = mkDefault 9000;
+
HEALTHZ_PORT = mkDefault 4000;
+
BACKGROUND_JOBS_ENABLED = mkDefault true;
+
};
+
+
web.settings = {
+
ERLANG_DISTRIBUTION_PORT = mkDefault 9001;
+
HEALTHZ_PORT = mkDefault 4001;
+
BACKGROUND_JOBS_ENABLED = mkDefault false;
+
+
PHOENIX_LISTEN_ADDRESS = mkDefault cfg.web.address;
+
PHOENIX_EXTERNAL_TRUSTED_PROXIES = mkDefault (builtins.toJSON cfg.web.trustedProxies);
+
PHOENIX_HTTP_WEB_PORT = mkDefault cfg.web.port;
+
PHOENIX_HTTP_API_PORT = mkDefault cfg.api.port;
+
PHOENIX_SECURE_COOKIES = mkDefault true; # enforce HTTPS on cookies
+
};
+
+
api.settings = {
+
ERLANG_DISTRIBUTION_PORT = mkDefault 9002;
+
HEALTHZ_PORT = mkDefault 4002;
+
BACKGROUND_JOBS_ENABLED = mkDefault false;
+
+
PHOENIX_LISTEN_ADDRESS = mkDefault cfg.api.address;
+
PHOENIX_EXTERNAL_TRUSTED_PROXIES = mkDefault (builtins.toJSON cfg.api.trustedProxies);
+
PHOENIX_HTTP_WEB_PORT = mkDefault cfg.web.port;
+
PHOENIX_HTTP_API_PORT = mkDefault cfg.api.port;
+
PHOENIX_SECURE_COOKIES = mkDefault true; # enforce HTTPS on cookies
+
};
+
};
+
}
+
(mkIf (!cfg.smtp.configureManually) {
+
services.firezone.server.settings = {
+
OUTBOUND_EMAIL_ADAPTER = "Elixir.Swoosh.Adapters.Mua";
+
OUTBOUND_EMAIL_ADAPTER_OPTS = builtins.toJSON { };
+
OUTBOUND_EMAIL_FROM = cfg.smtp.from;
+
OUTBOUND_EMAIL_SMTP_HOST = cfg.smtp.host;
+
OUTBOUND_EMAIL_SMTP_PORT = toString cfg.smtp.port;
+
OUTBOUND_EMAIL_SMTP_PROTOCOL = if cfg.smtp.implicitTls then "ssl" else "tcp";
+
OUTBOUND_EMAIL_SMTP_USERNAME = cfg.smtp.username;
+
};
+
services.firezone.server.settingsSecret = {
+
OUTBOUND_EMAIL_SMTP_PASSWORD = cfg.smtp.passwordFile;
+
};
+
})
+
(mkIf cfg.provision.enable {
+
# Load client secrets from authentication providers
+
services.firezone.server.settingsSecret = flip concatMapAttrs cfg.provision.accounts (
+
accountName: accountCfg:
+
flip concatMapAttrs accountCfg.auth (
+
authName: authCfg:
+
optionalAttrs (authCfg.adapter_config.clientSecretFile != null) {
+
"AUTH_CLIENT_SECRET_${toUpper accountName}_${toUpper authName}" =
+
authCfg.adapter_config.clientSecretFile;
+
}
+
)
+
);
+
})
+
(mkIf (cfg.openClusterFirewall && cfg.domain.enable) {
+
networking.firewall.allowedTCPPorts = [
+
cfg.domain.settings.ERLANG_DISTRIBUTION_PORT
+
];
+
})
+
(mkIf (cfg.openClusterFirewall && cfg.web.enable) {
+
networking.firewall.allowedTCPPorts = [
+
cfg.web.settings.ERLANG_DISTRIBUTION_PORT
+
];
+
})
+
(mkIf (cfg.openClusterFirewall && cfg.api.enable) {
+
networking.firewall.allowedTCPPorts = [
+
cfg.api.settings.ERLANG_DISTRIBUTION_PORT
+
];
+
})
+
(mkIf (cfg.domain.enable || cfg.web.enable || cfg.api.enable) {
+
systemd.slices.system-firezone = {
+
description = "Firezone Slice";
+
};
+
+
systemd.targets.firezone = {
+
description = "Common target for all Firezone services.";
+
wantedBy = [ "multi-user.target" ];
+
};
+
+
systemd.services.firezone-initialize = {
+
description = "Backend initialization service for the Firezone zero-trust access platform";
+
+
after = mkIf cfg.enableLocalDB [ "postgresql.service" ];
+
requires = mkIf cfg.enableLocalDB [ "postgresql.service" ];
+
wantedBy = [ "firezone.target" ];
+
partOf = [ "firezone.target" ];
+
+
script = ''
+
mkdir -p "$TZDATA_DIR"
+
+
# Generate and load secrets
+
${generateSecrets}
+
${loadSecretEnvironment "domain"}
+
+
echo "Running migrations"
+
${getExe cfg.domain.package} eval Domain.Release.migrate
+
'';
+
+
# We use the domain environment to be able to run migrations
+
environment = collectEnvironment "domain";
+
serviceConfig = commonServiceConfig // {
+
Type = "oneshot";
+
RemainAfterExit = true;
+
};
+
};
+
+
systemd.services.firezone-server-domain = mkIf cfg.domain.enable {
+
description = "Backend domain server for the Firezone zero-trust access platform";
+
after = [ "firezone-initialize.service" ];
+
bindsTo = [ "firezone-initialize.service" ];
+
wantedBy = [ "firezone.target" ];
+
partOf = [ "firezone.target" ];
+
+
script = ''
+
${loadSecretEnvironment "domain"}
+
exec ${getExe cfg.domain.package} start;
+
'';
+
+
path = [ pkgs.curl ];
+
postStart =
+
''
+
# Wait for the firezone server to come online
+
count=0
+
while [[ "$(curl -s "http://localhost:${toString cfg.domain.settings.HEALTHZ_PORT}" 2>/dev/null || echo)" != '{"status":"ok"}' ]]
+
do
+
sleep 1
+
if [[ "$count" -eq 30 ]]; then
+
echo "Tried for at least 30 seconds, giving up..."
+
exit 1
+
fi
+
count=$((count++))
+
done
+
''
+
+ optionalString cfg.provision.enable ''
+
# Wait for server to fully come up. Not ideal to use sleep, but at least it works.
+
sleep 1
+
+
${loadSecretEnvironment "domain"}
+
ln -sTf ${provisionStateJson} provision-state.json
+
${getExe cfg.domain.package} rpc 'Code.eval_file("${./provision.exs}")'
+
'';
+
+
environment = collectEnvironment "domain";
+
serviceConfig = commonServiceConfig;
+
};
+
+
systemd.services.firezone-server-web = mkIf cfg.web.enable {
+
description = "Backend web server for the Firezone zero-trust access platform";
+
after = [ "firezone-initialize.service" ];
+
bindsTo = [ "firezone-initialize.service" ];
+
wantedBy = [ "firezone.target" ];
+
partOf = [ "firezone.target" ];
+
+
script = ''
+
${loadSecretEnvironment "web"}
+
exec ${getExe cfg.web.package} start;
+
'';
+
+
environment = collectEnvironment "web";
+
serviceConfig = commonServiceConfig;
+
};
+
+
systemd.services.firezone-server-api = mkIf cfg.api.enable {
+
description = "Backend api server for the Firezone zero-trust access platform";
+
after = [ "firezone-initialize.service" ];
+
bindsTo = [ "firezone-initialize.service" ];
+
wantedBy = [ "firezone.target" ];
+
partOf = [ "firezone.target" ];
+
+
script = ''
+
${loadSecretEnvironment "api"}
+
exec ${getExe cfg.api.package} start;
+
'';
+
+
environment = collectEnvironment "api";
+
serviceConfig = commonServiceConfig;
+
};
+
})
+
];
+
+
meta.maintainers = with lib.maintainers; [
+
oddlama
+
patrickdag
+
];
+
}
+1
nixos/tests/all-tests.nix
···
firewall = handleTest ./firewall.nix { nftables = false; };
firewall-nftables = handleTest ./firewall.nix { nftables = true; };
fish = runTest ./fish.nix;
+
firezone = handleTest ./firezone/firezone.nix {};
flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
flaresolverr = handleTest ./flaresolverr.nix {};
flood = handleTest ./flood.nix {};
+86
nixos/tests/firezone/create-tokens.exs
···
+
alias Domain.{Repo, Accounts, Auth, Actors, Tokens}
+
+
mappings = case File.read("provision-uuids.json") do
+
{:ok, content} ->
+
case Jason.decode(content) do
+
{:ok, mapping} -> mapping
+
_ -> %{"accounts" => %{}}
+
end
+
_ -> %{"accounts" => %{}}
+
end
+
+
IO.puts("INFO: Fetching account")
+
{:ok, account} = Accounts.fetch_account_by_id_or_slug("main")
+
+
IO.puts("INFO: Fetching email provider")
+
{:ok, email_provider} = Auth.Provider.Query.not_disabled()
+
|> Auth.Provider.Query.by_adapter(:email)
+
|> Auth.Provider.Query.by_account_id(account.id)
+
|> Repo.fetch(Auth.Provider.Query, [])
+
+
temp_admin_actor_email = "firezone-provision@localhost.local"
+
temp_admin_actor_context = %Auth.Context{
+
type: :browser,
+
user_agent: "Unspecified/0.0",
+
remote_ip: {127, 0, 0, 1},
+
remote_ip_location_region: "N/A",
+
remote_ip_location_city: "N/A",
+
remote_ip_location_lat: 0.0,
+
remote_ip_location_lon: 0.0
+
}
+
+
{:ok, temp_admin_actor} =
+
Actors.create_actor(account, %{
+
type: :account_admin_user,
+
name: "Token Provisioning"
+
})
+
+
{:ok, temp_admin_actor_email_identity} =
+
Auth.create_identity(temp_admin_actor, email_provider, %{
+
provider_identifier: temp_admin_actor_email,
+
provider_identifier_confirmation: temp_admin_actor_email
+
})
+
+
{:ok, temp_admin_actor_token} =
+
Auth.create_token(temp_admin_actor_email_identity, temp_admin_actor_context, "temporarynonce", DateTime.utc_now() |> DateTime.add(1, :hour))
+
+
{:ok, temp_admin_subject} =
+
Auth.build_subject(temp_admin_actor_token, temp_admin_actor_context)
+
+
{:ok, relay_group_token} =
+
Tokens.create_token(%{
+
"type" => :relay_group,
+
"expires_at" => DateTime.utc_now() |> DateTime.add(1, :hour),
+
"secret_fragment" => Domain.Crypto.random_token(32, encoder: :hex32),
+
"relay_group_id" => get_in(mappings, ["accounts", "main", "relay_groups", "my-relays"])
+
})
+
+
relay_group_encoded_token = Tokens.encode_fragment!(relay_group_token)
+
IO.puts("Created relay token: #{relay_group_encoded_token}")
+
File.write("relay_token.txt", relay_group_encoded_token)
+
+
{:ok, gateway_group_token} =
+
Tokens.create_token(%{
+
"type" => :gateway_group,
+
"expires_at" => DateTime.utc_now() |> DateTime.add(1, :hour),
+
"secret_fragment" => Domain.Crypto.random_token(32, encoder: :hex32),
+
"account_id" => get_in(mappings, ["accounts", "main", "id"]),
+
"gateway_group_id" => get_in(mappings, ["accounts", "main", "gateway_groups", "site"])
+
}, temp_admin_subject)
+
+
gateway_group_encoded_token = Tokens.encode_fragment!(gateway_group_token)
+
IO.puts("Created gateway group token: #{gateway_group_encoded_token}")
+
File.write("gateway_token.txt", gateway_group_encoded_token)
+
+
{:ok, service_account_actor_token} =
+
Tokens.create_token(%{
+
"type" => :client,
+
"expires_at" => DateTime.utc_now() |> DateTime.add(1, :hour),
+
"secret_fragment" => Domain.Crypto.random_token(32, encoder: :hex32),
+
"account_id" => get_in(mappings, ["accounts", "main", "id"]),
+
"actor_id" => get_in(mappings, ["accounts", "main", "actors", "client"])
+
})
+
+
service_account_actor_encoded_token = Tokens.encode_fragment!(service_account_actor_token)
+
IO.puts("Created service actor token: #{service_account_actor_encoded_token}")
+
File.write("client_token.txt", service_account_actor_encoded_token)
+349
nixos/tests/firezone/firezone.nix
···
+
import ../make-test-python.nix (
+
{ pkgs, ... }:
+
let
+
certs = import ../common/acme/server/snakeoil-certs.nix;
+
domain = certs.domain;
+
in
+
{
+
name = "firezone";
+
meta.maintainers = with pkgs.lib.maintainers; [ oddlama ];
+
+
nodes = {
+
server =
+
{
+
config,
+
lib,
+
pkgs,
+
...
+
}:
+
{
+
security.pki.certificateFiles = [ certs.ca.cert ];
+
+
networking.extraHosts = ''
+
${config.networking.primaryIPAddress} ${domain}
+
${config.networking.primaryIPv6Address} ${domain}
+
'';
+
+
networking.firewall.allowedTCPPorts = [
+
80
+
443
+
];
+
+
services.nginx = {
+
enable = true;
+
virtualHosts.${domain} = {
+
sslCertificate = certs.${domain}.cert;
+
sslCertificateKey = certs.${domain}.key;
+
};
+
};
+
+
services.firezone.server = {
+
enable = true;
+
enableLocalDB = true;
+
nginx.enable = true;
+
+
# Doesn't need to work for this test, but needs to be configured
+
# otherwise the server will not start.
+
smtp = {
+
from = "firezone@example.com";
+
host = "mail.localhost";
+
port = 465;
+
implicitTls = true;
+
username = "firezone@example.com";
+
passwordFile = pkgs.writeText "tmpmailpasswd" "supermailpassword";
+
};
+
+
provision = {
+
enable = true;
+
accounts.main = {
+
name = "My Account";
+
relayGroups.my-relays.name = "Relays";
+
gatewayGroups.site.name = "Site";
+
actors = {
+
admin = {
+
type = "account_admin_user";
+
name = "Admin";
+
email = "admin@example.com";
+
};
+
client = {
+
type = "service_account";
+
name = "A client";
+
email = "client@example.com";
+
};
+
};
+
resources.res1 = {
+
type = "dns";
+
name = "Dns Resource";
+
address = "resource.example.com";
+
gatewayGroups = [ "site" ];
+
filters = [
+
{ protocol = "icmp"; }
+
{
+
protocol = "tcp";
+
ports = [ 80 ];
+
}
+
];
+
};
+
resources.res2 = {
+
type = "ip";
+
name = "Ip Resource";
+
address = "172.20.2.1";
+
gatewayGroups = [ "site" ];
+
};
+
resources.res3 = {
+
type = "cidr";
+
name = "Cidr Resource";
+
address = "172.20.1.0/24";
+
gatewayGroups = [ "site" ];
+
};
+
policies.pol1 = {
+
description = "Allow anyone res1 access";
+
group = "everyone";
+
resource = "res1";
+
};
+
policies.pol2 = {
+
description = "Allow anyone res2 access";
+
group = "everyone";
+
resource = "res2";
+
};
+
policies.pol3 = {
+
description = "Allow anyone res3 access";
+
group = "everyone";
+
resource = "res3";
+
};
+
};
+
};
+
+
api.externalUrl = "https://${domain}/api/";
+
web.externalUrl = "https://${domain}/";
+
};
+
+
systemd.services.firezone-server-domain.postStart = lib.mkAfter ''
+
${lib.getExe config.services.firezone.server.domain.package} rpc 'Code.eval_file("${./create-tokens.exs}")'
+
'';
+
};
+
+
relay =
+
{
+
nodes,
+
config,
+
lib,
+
...
+
}:
+
{
+
security.pki.certificateFiles = [ certs.ca.cert ];
+
networking.extraHosts = ''
+
${nodes.server.networking.primaryIPAddress} ${domain}
+
${nodes.server.networking.primaryIPv6Address} ${domain}
+
'';
+
+
services.firezone.relay = {
+
enable = true;
+
logLevel = "debug";
+
name = "test-relay";
+
apiUrl = "wss://${domain}/api/";
+
tokenFile = "/tmp/shared/relay_token.txt";
+
publicIpv4 = config.networking.primaryIPAddress;
+
publicIpv6 = config.networking.primaryIPv6Address;
+
openFirewall = true;
+
};
+
+
# Don't auto-start so we can wait until the token was provisioned
+
systemd.services.firezone-relay.wantedBy = lib.mkForce [ ];
+
};
+
+
# A resource that is only connected to the gateway,
+
# allowing us to confirm the VPN works
+
resource = {
+
virtualisation.vlans = [
+
1
+
2
+
];
+
+
networking.interfaces.eth1.ipv4.addresses = [
+
{
+
address = "172.20.1.1";
+
prefixLength = 24;
+
}
+
];
+
+
networking.interfaces.eth2.ipv4.addresses = [
+
{
+
address = "172.20.2.1";
+
prefixLength = 24;
+
}
+
];
+
+
networking.firewall.allowedTCPPorts = [
+
80
+
];
+
+
services.nginx = {
+
enable = true;
+
virtualHosts = {
+
"localhost" = {
+
default = true;
+
locations."/".extraConfig = ''
+
return 200 'greetings from the resource';
+
add_header Content-Type text/plain;
+
'';
+
};
+
};
+
};
+
};
+
+
gateway =
+
{
+
nodes,
+
lib,
+
...
+
}:
+
{
+
virtualisation.vlans = [
+
1
+
2
+
];
+
+
networking = {
+
interfaces.eth1.ipv4.addresses = [
+
{
+
address = "172.20.1.2";
+
prefixLength = 24;
+
}
+
];
+
+
interfaces.eth2.ipv4.addresses = [
+
{
+
address = "172.20.2.2";
+
prefixLength = 24;
+
}
+
];
+
+
firewall.enable = false;
+
nftables.enable = true;
+
nftables.tables."filter".family = "inet";
+
nftables.tables."filter".content = ''
+
chain incoming {
+
type filter hook input priority 0; policy accept;
+
}
+
+
chain postrouting {
+
type nat hook postrouting priority srcnat; policy accept;
+
meta protocol ip iifname "tun-firezone" oifname { "eth1", "eth2" } masquerade random
+
}
+
+
chain forward {
+
type filter hook forward priority 0; policy drop;
+
iifname "tun-firezone" accept
+
oifname "tun-firezone" accept
+
}
+
+
chain output {
+
type filter hook output priority 0; policy accept;
+
}
+
'';
+
};
+
+
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+
# boot.kernel.sysctl."net.ipv4.conf.all.src_valid_mark" = "1";
+
boot.kernel.sysctl."net.ipv6.conf.default.forwarding" = "1";
+
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = "1";
+
+
security.pki.certificateFiles = [ certs.ca.cert ];
+
networking.extraHosts = ''
+
${nodes.server.networking.primaryIPAddress} ${domain}
+
${nodes.server.networking.primaryIPv6Address} ${domain}
+
172.20.1.1 resource.example.com
+
'';
+
+
services.firezone.gateway = {
+
enable = true;
+
logLevel = "debug";
+
name = "test-gateway";
+
apiUrl = "wss://${domain}/api/";
+
tokenFile = "/tmp/shared/gateway_token.txt";
+
};
+
+
# Don't auto-start so we can wait until the token was provisioned
+
systemd.services.firezone-gateway.wantedBy = lib.mkForce [ ];
+
};
+
+
client =
+
{
+
nodes,
+
lib,
+
...
+
}:
+
{
+
security.pki.certificateFiles = [ certs.ca.cert ];
+
networking.useNetworkd = true;
+
networking.extraHosts = ''
+
${nodes.server.networking.primaryIPAddress} ${domain}
+
${nodes.server.networking.primaryIPv6Address} ${domain}
+
'';
+
+
services.firezone.headless-client = {
+
enable = true;
+
logLevel = "debug";
+
name = "test-client-somebody";
+
apiUrl = "wss://${domain}/api/";
+
tokenFile = "/tmp/shared/client_token.txt";
+
};
+
+
# Don't auto-start so we can wait until the token was provisioned
+
systemd.services.firezone-headless-client.wantedBy = lib.mkForce [ ];
+
};
+
};
+
+
testScript =
+
{ ... }:
+
''
+
start_all()
+
+
with subtest("Start server"):
+
server.wait_for_unit("firezone.target")
+
server.wait_until_succeeds("curl -Lsf https://${domain} | grep 'Welcome to Firezone'")
+
server.wait_until_succeeds("curl -Ls https://${domain}/api | grep 'Not Found'")
+
+
# Wait for tokens and copy them to shared folder
+
server.wait_for_file("/var/lib/private/firezone/relay_token.txt")
+
server.wait_for_file("/var/lib/private/firezone/gateway_token.txt")
+
server.wait_for_file("/var/lib/private/firezone/client_token.txt")
+
server.succeed("cp /var/lib/private/firezone/*_token.txt /tmp/shared")
+
+
with subtest("Connect relay"):
+
relay.succeed("systemctl start firezone-relay")
+
relay.wait_for_unit("firezone-relay.service")
+
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Connected to portal.*${domain}'", timeout=30)
+
+
with subtest("Connect gateway"):
+
gateway.succeed("systemctl start firezone-gateway")
+
gateway.wait_for_unit("firezone-gateway.service")
+
gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Connected to portal.*${domain}'", timeout=30)
+
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv4'", timeout=30)
+
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv6'", timeout=30)
+
+
# Assert both relay ips are known
+
gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Updated allocation.*relay_ip4.*Some.*relay_ip6.*Some'", timeout=30)
+
+
with subtest("Connect headless-client"):
+
client.succeed("systemctl start firezone-headless-client")
+
client.wait_for_unit("firezone-headless-client.service")
+
client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Connected to portal.*${domain}'", timeout=30)
+
client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Tunnel ready'", timeout=30)
+
+
with subtest("Check DNS based access"):
+
# Check that we can access the resource through the VPN via DNS
+
client.wait_until_succeeds("curl -4 -Lsf http://resource.example.com | grep 'greetings from the resource'")
+
client.wait_until_succeeds("curl -6 -Lsf http://resource.example.com | grep 'greetings from the resource'")
+
+
with subtest("Check CIDR based access"):
+
# Check that we can access the resource through the VPN via CIDR
+
client.wait_until_succeeds("ping -c1 -W1 172.20.1.1")
+
+
with subtest("Check IP based access"):
+
# Check that we can access the resource through the VPN via IP
+
client.wait_until_succeeds("ping -c1 -W1 172.20.2.1")
+
'';
+
}
+
)