hosts/plover: try out host-specific module structure

This commit is contained in:
Gabriel Arazas 2023-12-11 16:30:00 +08:00
parent 396a25f797
commit 9762042848
No known key found for this signature in database
GPG Key ID: ADE0C41DAB221FCC
24 changed files with 1695 additions and 1487 deletions

View File

@ -388,27 +388,14 @@
# Don't create the user directories since they are assumed to
# be already created by a pre-installed system (which should
# already handle them).
xdg.userDirs.createDirectories = lib.mkDefault false;
# To be able to use the most of our config as possible, we want
# both to use the same overlays.
nixpkgs.overlays = overlays;
# Stallman-senpai will be disappointed. :/
nixpkgs.config.allowUnfree = lib.mkDefault true;
# Find Nix files with these! Even if nix-index is already enabled, it
# is better to make it explicit.
programs.nix-index.enable = lib.mkDefault true;
xdg.userDirs.createDirectories = lib.mkForce false;
# Setting the homely options.
home.username = lib.mkForce name;
home.homeDirectory = lib.mkForce metadata.home-directory or "/home/${config.home.username}";
# home-manager configurations are expected to be deployed on
# non-NixOS systems so it is safe to set this.
programs.home-manager.enable = lib.mkDefault true;
targets.genericLinux.enable = lib.mkDefault true;
programs.home-manager.enable = lib.mkForce true;
targets.genericLinux.enable = true;
})
userSharedConfig
nixSettingsSharedConfig

View File

@ -1,8 +1,5 @@
{ config, lib, pkgs, modulesPath, ... }:
let
inherit (import ./modules/hardware/networks.nix) interfaces;
in
{
imports = [
# Since this will be rarely configured, make sure to import the appropriate
@ -17,69 +14,34 @@ in
# Hardened profile from nixpkgs.
"${modulesPath}/profiles/hardened.nix"
# Of course, what is a server without a backup? A professionally-handled
# production system. However, we're not professionals so we do have
# backups.
./modules/services/borgbackup.nix
# The primary DNS server that is completely hidden.
./modules/services/bind.nix
# The reverse proxy of choice.
./modules/services/nginx.nix
# The single-sign on setup.
./modules/services/kanidm.nix
./modules/services/vouch-proxy.nix
# The monitoring stack.
./modules/services/prometheus.nix
./modules/services/grafana.nix
# The database of choice which is used by most self-managed services on
# this server.
./modules/services/postgresql.nix
# The application services for this server. They are modularized since
# configuring it here will make it too big.
./modules/services/atuin.nix
./modules/services/gitea.nix
./modules/services/vaultwarden.nix
./modules/services/wireguard.nix
./modules/services/wezterm-mux-server.nix
./modules
];
# Host-specific modules structuring.
hosts.plover.services = {
# The essential services.
backup.enable = true;
database.enable = true;
firewall.enable = true;
dns-server.enable = true;
idm.enable = true;
monitoring.enable = true;
reverse-proxy.enable = true;
fail2ban.enable = true;
# The self-hosted services.
atuin.enable = true;
gitea.enable = true;
grafana.enable = true;
vaultwarden.enable = true;
wireguard.enable = true;
};
# Automatic format and partitioning.
disko.devices = import ./disko.nix {
disks = [ "/dev/sda" ];
};
networking = {
nftables.enable = true;
domain = "foodogsquared.one";
firewall = {
enable = true;
allowedTCPPorts = [
22 # Secure Shells.
];
};
};
services.fail2ban = {
ignoreIP = [
# VPN clients.
"${interfaces.wireguard0.IPv4.address}/13"
"${interfaces.wireguard0.IPv6.address}/64"
];
# We're going to be unforgiving with this one since we only have key
# authentication and password authentication is disabled anyways.
jails.sshd.settings = {
enabled = true;
maxretry = 1;
};
};
# Offline SSH!?!
programs.mosh.enable = true;

View File

@ -0,0 +1,38 @@
# Take note only optional modules should be imported here.
{
imports = [
# Of course, what is a server without a backup? A professionally-handled
# production system. However, we're not professionals so we do have
# backups.
./services/backup.nix
# The database of choice which is used by most self-managed services on
# this server.
./services/database.nix
# The primary DNS server that is completely hidden.
./services/dns-server.nix
# The single-sign on setup.
./services/idm.nix
# The reverse proxy of choice.
./services/reverse-proxy.nix
# The firewall of choice.
./services/firewall.nix
# The VPN setup of choice.
./services/wireguard.nix
# The rest of the self-hosted applications.
./services/atuin.nix
./services/fail2ban.nix
./services/gitea.nix
./services/grafana.nix
./services/monitoring.nix
./services/vouch-proxy.nix
./services/vaultwarden.nix
./services/wezterm-mux-server.nix
];
}

View File

@ -4,36 +4,49 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.atuin;
inherit (import ../hardware/networks.nix) interfaces;
atuinInternalDomain = "atuin.${config.networking.fqdn}";
host = interfaces.lan.IPv4.address;
in
{
# Atuin sync server because why not.
services.atuin = {
enable = true;
openRegistration = true;
options.hosts.plover.services.atuin.enable = lib.mkEnableOption "Atuin sync server setup";
inherit host;
port = 8965;
};
config = lib.mkIf cfg.enable (lib.mkMerge [
{
# Atuin sync server because why not.
services.atuin = {
enable = true;
openRegistration = true;
# Putting a neat little script to create the appropriate schema since we're
# using secure schema usage pattern as encouraged from PostgreSQL
# documentation.
systemd.services.atuin = {
path = [ config.services.postgresql.package ];
preStart = ''
psql -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='atuin';" \
grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS atuin;"
'';
};
inherit host;
port = 8965;
};
}
# Putting it altogether in the reverse proxy of choice.
services.nginx.virtualHosts."${atuinInternalDomain}" = {
locations."/" = {
proxyPass = "http://${host}:${toString config.services.atuin.port}";
};
};
(lib.mkIf hostCfg.services.reverse-proxy.enable {
# Putting it altogether in the reverse proxy of choice.
services.nginx.virtualHosts."${atuinInternalDomain}" = {
locations."/" = {
proxyPass = "http://${host}:${toString config.services.atuin.port}";
};
};
})
(lib.mkIf hostCfg.services.database.enable {
# Putting a neat little script to create the appropriate schema since we're
# using secure schema usage pattern as encouraged from PostgreSQL
# documentation.
systemd.services.atuin = {
path = [ config.services.postgresql.package ];
preStart = ''
psql -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='atuin';" \
grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS atuin;"
'';
};
})
]);
}

View File

@ -0,0 +1,86 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.backup;
# The head of the Borgbase hostname.
hetzner-boxes-user = "u332477";
hetzner-boxes-server = "${hetzner-boxes-user}.your-storagebox.de";
borgRepo = path: "ssh://${hetzner-boxes-user}@${hetzner-boxes-server}:23/./borg/plover/${path}";
jobCommonSettings = { patternFiles ? [ ], patterns ? [ ], paths ? [ ], repo, passCommand }: {
inherit paths repo;
compression = "zstd,11";
dateFormat = "+%F-%H-%M-%S-%z";
doInit = true;
encryption = {
inherit passCommand;
mode = "repokey-blake2";
};
extraCreateArgs =
let
args = lib.flatten [
(builtins.map
(patternFile: "--patterns-from ${lib.escapeShellArg patternFile}")
patternFiles)
(builtins.map
(pattern: "--pattern ${lib.escapeShellArg pattern}")
patterns)
];
in
lib.concatStringsSep " " args;
extraInitArgs = "--make-parent-dirs";
persistentTimer = true;
preHook = ''
extraCreateArgs="$extraCreateArgs --stats"
'';
prune.keep = {
weekly = 4;
monthly = 12;
yearly = 6;
};
startAt = "monthly";
environment.BORG_RSH = "ssh -i ${config.sops.secrets."borg/ssh-key".path}";
};
in
{
options.hosts.plover.services.backup.enable = lib.mkEnableOption "backup service";
config = lib.mkIf cfg.enable {
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"borg/repos/host/patterns/keys" = { };
"borg/repos/host/password" = { };
"borg/repos/services/password" = { };
"borg/ssh-key" = { };
};
services.borgbackup.jobs = {
# Backup for host-specific files. They don't change much so it is
# acceptable for it to be backed up monthly.
host-backup = jobCommonSettings {
patternFiles = [
config.sops.secrets."borg/repos/host/patterns/keys".path
];
repo = borgRepo "host";
passCommand = "cat ${config.sops.secrets."borg/repos/host/password".path}";
};
# Backups for various services.
services-backup = jobCommonSettings
{
paths = [
# ACME accounts and TLS certificates
"/var/lib/acme"
];
repo = borgRepo "services";
passCommand = "cat ${config.sops.secrets."borg/repos/services/password".path}";
} // { startAt = "daily"; };
};
programs.ssh.extraConfig = ''
Host ${hetzner-boxes-server}
IdentityFile ${config.sops.secrets."borg/ssh-key".path}
'';
};
}

View File

@ -1,347 +0,0 @@
# The DNS server for my domains. Take note it uses a hidden master setup with
# the secondary nameservers of the service (as of 2023-10-05, we're using
# Hetzner's secondary nameservers).
{ config, lib, pkgs, ... }:
let
inherit (config.networking) domain fqdn;
inherit (import ../hardware/networks.nix) interfaces clientNetworks serverNetworks secondaryNameServers;
secondaryNameServersIPs = lib.foldl'
(total: addresses: total ++ addresses.IPv4 ++ addresses.IPv6)
[ ]
(lib.attrValues secondaryNameServers);
domainZone = pkgs.substituteAll {
src = ../../config/dns/${domain}.zone;
ploverWANIPv4 = interfaces.wan.IPv4.address;
ploverWANIPv6 = interfaces.wan.IPv6.address;
};
fqdnZone = pkgs.substituteAll {
src = ../../config/dns/${fqdn}.zone;
ploverLANIPv4 = interfaces.lan.IPv4.address;
ploverLANIPv6 = interfaces.lan.IPv6.address;
};
zonesDir = "/etc/bind/zones";
zoneFile = domain: "${zonesDir}/${domain}.zone";
dnsSubdomain = "ns1.${domain}";
dnsOverHTTPSPort = 8443;
in
{
sops.secrets =
let
dnsFileAttribute = {
owner = config.users.users.named.name;
group = config.users.users.named.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"dns/${domain}/mailbox-security-key" = dnsFileAttribute;
"dns/${domain}/mailbox-security-key-record" = dnsFileAttribute;
"dns/${domain}/keybase-verification-key" = dnsFileAttribute;
"dns/${domain}/rfc2136-key" = dnsFileAttribute // {
reloadUnits = [ "bind.service" ];
};
};
# Install the utilities.
environment.systemPackages = [ config.services.bind.package ];
services.bind = {
enable = true;
forward = "first";
cacheNetworks = [
"127.0.0.1"
"::1"
];
listenOn = [
"127.0.0.1"
interfaces.lan.IPv4.address
interfaces.wan.IPv4.address
];
listenOnIpv6 = [
"::1"
interfaces.lan.IPv6.address
interfaces.wan.IPv6.address
];
# Welp, since the template is pretty limited, we'll have to go with our
# own. This is partially based from the NixOS Bind module except without
# the template for filling in zones since we use views.
configFile =
let
cfg = config.services.bind;
certDir = path: "/run/credentials/bind.service/${path}";
listenInterfaces = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOn;
listenInterfacesIpv6 = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6;
in
pkgs.writeText "named.conf" ''
include "/etc/bind/rndc.key";
include "${config.sops.secrets."dns/${domain}/rfc2136-key".path}";
controls {
inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
};
tls ${dnsSubdomain} {
key-file "${certDir "key.pem"}";
cert-file "${certDir "cert.pem"}";
dhparam-file "${config.security.dhparams.params.bind.path}";
ciphers "HIGH:!kRSA:!aNULL:!eNULL:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!SHA1:!SHA256:!SHA384";
prefer-server-ciphers yes;
session-tickets no;
};
http ${dnsSubdomain} {
endpoints { "/dns-query"; };
};
acl trusted { ${lib.concatStringsSep "; " (clientNetworks ++ serverNetworks)}; localhost; };
acl cachenetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
acl badnetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
options {
# Native DNS.
listen-on { ${listenInterfaces} };
listen-on-v6 { ${listenInterfacesIpv6} };
# DNS-over-TLS.
listen-on tls ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} { ${listenInterfacesIpv6} };
# DNS-over-HTTPS.
https-port ${builtins.toString dnsOverHTTPSPort};
listen-on tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfacesIpv6} };
allow-query { cachenetworks; };
blackhole { badnetworks; };
forward ${cfg.forward};
forwarders { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
directory "${cfg.directory}";
pid-file "/run/named/named.pid";
};
view internal {
match-clients { trusted; };
allow-query { any; };
allow-recursion { any; };
// We'll use systemd-resolved as our forwarder.
forwarders { 127.0.0.53 port 53; };
zone "${fqdn}" {
type primary;
file "${zoneFile fqdn}";
};
zone "${domain}" {
type primary;
file "${zoneFile domain}";
allow-transfer { ${lib.concatStringsSep "; " secondaryNameServersIPs}; };
update-policy {
grant rfc2136key.${domain}. zonesub TXT;
};
};
};
view external {
match-clients { any; };
forwarders { };
empty-zones-enable yes;
allow-query { any; };
allow-recursion { none; };
zone "${domain}" {
in-view internal;
};
};
${cfg.extraConfig}
'';
};
systemd.services.bind = {
path = with pkgs; [ replace-secret ];
preStart =
let
domainZone' = zoneFile domain;
fqdnZone' = zoneFile fqdn;
secretPath = path: config.sops.secrets."dns/${path}".path;
rndc = lib.getExe' config.services.bind.package "rndc";
in
lib.mkAfter ''
# Install the domain zone.
{
install -Dm0600 '${domainZone}' '${domainZone'}'
replace-secret '#mailboxSecurityKey#' '${secretPath "${domain}/mailbox-security-key"}' '${domainZone'}'
replace-secret '#mailboxSecurityKeyRecord#' '${secretPath "${domain}/mailbox-security-key-record"}' '${domainZone'}'
#${rndc} sync "${domain}" IN external
}
# Install the internal DNS zones.
install -Dm0600 '${fqdnZone}' '${fqdnZone'}'
'';
serviceConfig = {
# Additional service hardening. You can see most of the options from
# systemd.exec(5) manual. Run it as an unprivileged user.
User = config.users.users.named.name;
Group = config.users.users.named.group;
UMask = "0037";
# Get the credentials into the service.
LoadCredential =
let
certDirectory = config.security.acme.certs."${dnsSubdomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
LogFilterPatterns = [
# systemd-resolved doesn't have DNS cookie support, it seems.
"~missing expected cookie from 127.0.0.53#53"
];
# Lock and protect various system components.
LockPersonality = true;
PrivateTmp = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
ProtectHome = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
ProtectProc = "invisible";
# Make the filesystem invisible to the service.
ProtectSystem = "strict";
ReadWritePaths = [
config.services.bind.directory
"/etc/bind"
];
ReadOnlyPaths = [
config.security.dhparams.params.bind.path
config.security.acme.certs."${dnsSubdomain}".directory
];
# Set up writable directories.
RuntimeDirectory = "named";
RuntimeDirectoryMode = "0750";
CacheDirectory = "named";
CacheDirectoryMode = "0750";
ConfigurationDirectory = "bind";
ConfigurationDirectoryMode = "0755";
# Filtering system calls.
SystemCallFilter = [ "@system-service" ];
SystemCallErrorNumber = "EPERM";
SystemCallArchitectures = "native";
# Granting and restricting its capabilities. Take note we're not using
# syslog for this even if the application can so no syslog capability.
# Additionally, we're using omitting the program's ability to chroot and
# chown since the user and the directories are already configured.
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
# Restrict what address families can it access.
RestrictAddressFamilies = [
"AF_LOCAL"
"AF_NETLINK"
"AF_BRIDGE"
"AF_INET"
"AF_INET6"
];
# Restricting what namespaces it can create.
RestrictNamespaces = true;
};
};
# Set up the firewall. Take note the ports with the transport layer being
# accepted in Bind.
networking.firewall =
let
ports = [
53 # DNS
853 # DNS-over-TLS/DNS-over-QUIC
];
in
{
allowedUDPPorts = ports;
allowedTCPPorts = ports;
};
# Making this with nginx.
services.nginx.upstreams.local-dns = {
extraConfig = ''
zone dns 64k;
'';
servers = {
"127.0.0.1:${builtins.toString dnsOverHTTPSPort}" = { };
};
};
services.nginx.virtualHosts."${dnsSubdomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
extraConfig = ''
add_header Strict-Transport-Security max-age=31536000;
'';
kTLS = true;
locations = {
"/".return = "444";
"/dns-query".extraConfig = ''
grpc_pass grpcs://local-dns;
grpc_socket_keepalive on;
grpc_connect_timeout 10s;
grpc_ssl_verify off;
grpc_ssl_protocols TLSv1.3 TLSv1.2;
'';
};
};
services.nginx.streamConfig = ''
upstream dns_servers {
server localhost:53;
}
server {
listen 53 udp reuseport;
proxy_timeout 20s;
proxy_pass dns_servers;
}
'';
# Then generate a DH parameter for the application.
security.dhparams.params.bind.bits = 4096;
# Set up a fail2ban which is apparently already available in the package.
services.fail2ban.jails."named-refused".settings = {
enabled = true;
backend = "systemd";
filter = "named-refused[journalmatch='_SYSTEMD_UNIT=bind.service']";
maxretry = 3;
};
# Add the following to be backed up.
services.borgbackup.jobs.services-backup.paths = [ zonesDir ];
}

View File

@ -1,79 +0,0 @@
{ config, lib, pkgs, ... }:
let
# The head of the Borgbase hostname.
hetzner-boxes-user = "u332477";
hetzner-boxes-server = "${hetzner-boxes-user}.your-storagebox.de";
borgRepo = path: "ssh://${hetzner-boxes-user}@${hetzner-boxes-server}:23/./borg/plover/${path}";
jobCommonSettings = { patternFiles ? [ ], patterns ? [ ], paths ? [ ], repo, passCommand }: {
inherit paths repo;
compression = "zstd,11";
dateFormat = "+%F-%H-%M-%S-%z";
doInit = true;
encryption = {
inherit passCommand;
mode = "repokey-blake2";
};
extraCreateArgs =
let
args = lib.flatten [
(builtins.map
(patternFile: "--patterns-from ${lib.escapeShellArg patternFile}")
patternFiles)
(builtins.map
(pattern: "--pattern ${lib.escapeShellArg pattern}")
patterns)
];
in
lib.concatStringsSep " " args;
extraInitArgs = "--make-parent-dirs";
persistentTimer = true;
preHook = ''
extraCreateArgs="$extraCreateArgs --stats"
'';
prune.keep = {
weekly = 4;
monthly = 12;
yearly = 6;
};
startAt = "monthly";
environment.BORG_RSH = "ssh -i ${config.sops.secrets."borg/ssh-key".path}";
};
in
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"borg/repos/host/patterns/keys" = { };
"borg/repos/host/password" = { };
"borg/repos/services/password" = { };
"borg/ssh-key" = { };
};
services.borgbackup.jobs = {
# Backup for host-specific files. They don't change much so it is
# acceptable for it to be backed up monthly.
host-backup = jobCommonSettings {
patternFiles = [
config.sops.secrets."borg/repos/host/patterns/keys".path
];
repo = borgRepo "host";
passCommand = "cat ${config.sops.secrets."borg/repos/host/password".path}";
};
# Backups for various services.
services-backup = jobCommonSettings
{
paths = [
# ACME accounts and TLS certificates
"/var/lib/acme"
];
repo = borgRepo "services";
passCommand = "cat ${config.sops.secrets."borg/repos/services/password".path}";
} // { startAt = "daily"; };
};
programs.ssh.extraConfig = ''
Host ${hetzner-boxes-server}
IdentityFile ${config.sops.secrets."borg/ssh-key".path}
'';
}

View File

@ -0,0 +1,92 @@
# The database service of choice. Most services can use this so far
# (thankfully).
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.database;
postgresqlDomain = "postgres.${config.networking.domain}";
in
{
options.hosts.plover.services.database.enable = lib.mkEnableOption "preferred service SQL database";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
services.postgresql = {
enable = true;
package = pkgs.postgresql_16;
enableTCPIP = true;
# Create per-user schema as documented from Usage Patterns. This is to make
# use of the secure schema usage pattern they encouraged to do.
#
# Now, you just have to keep in mind about applications making use of them.
# Most of them should have the setting to set the schema to be used. If
# not, then screw them (or just file an issue and politely ask for the
# feature).
initialScript =
let
# This will be run once anyways so it is acceptable to create users
# "forcibly".
perUserSchemas = lib.lists.map
(user: ''
CREATE USER ${user.name};
CREATE SCHEMA AUTHORIZATION ${user.name};
'')
config.services.postgresql.ensureUsers;
in
pkgs.writeText "plover-initial-postgresql-script" ''
${lib.concatStringsSep "\n" perUserSchemas}
'';
settings =
let
credsDir = path: "/run/credentials/postgresql.service/${path}";
in
{
# Still doing the secure schema usage pattern.
search_path = ''"$user"'';
ssl_cert_file = credsDir "cert.pem";
ssl_key_file = credsDir "key.pem";
ssl_ca_file = credsDir "fullchain.pem";
};
};
# With a database comes a dumping.
services.postgresqlBackup = {
enable = true;
compression = "zstd";
compressionLevel = 11;
# Start at every 3 days starting from the first day of the month.
startAt = "*-*-1/3";
};
# Setting this up for TLS.
systemd.services.postgresql = {
requires = [ "acme-finished-${postgresqlDomain}.target" ];
serviceConfig.LoadCredential =
let
certDirectory = config.security.acme.certs."${postgresqlDomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
};
security.acme.certs."${postgresqlDomain}".postRun = ''
systemctl restart postgresql.service
'';
}
(lib.mkIf hostCfg.services.backup.enable {
# Add the dumps to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.postgresqlBackup.location ];
})
]);
}

View File

@ -0,0 +1,365 @@
# The DNS server for my domains. Take note it uses a hidden master setup with
# the secondary nameservers of the service (as of 2023-10-05, we're using
# Hetzner's secondary nameservers).
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.dns-server;
inherit (config.networking) domain fqdn;
inherit (import ../hardware/networks.nix) interfaces clientNetworks serverNetworks secondaryNameServers;
secondaryNameServersIPs = lib.foldl'
(total: addresses: total ++ addresses.IPv4 ++ addresses.IPv6)
[ ]
(lib.attrValues secondaryNameServers);
domainZone = pkgs.substituteAll {
src = ../../config/dns/${domain}.zone;
ploverWANIPv4 = interfaces.wan.IPv4.address;
ploverWANIPv6 = interfaces.wan.IPv6.address;
};
fqdnZone = pkgs.substituteAll {
src = ../../config/dns/${fqdn}.zone;
ploverLANIPv4 = interfaces.lan.IPv4.address;
ploverLANIPv6 = interfaces.lan.IPv6.address;
};
zonesDir = "/etc/bind/zones";
zoneFile = domain: "${zonesDir}/${domain}.zone";
dnsSubdomain = "ns1.${domain}";
dnsOverHTTPSPort = 8443;
in
{
options.hosts.plover.services.dns-server.enable = lib.mkEnableOption "preferred DNS server";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets =
let
dnsFileAttribute = {
owner = config.users.users.named.name;
group = config.users.users.named.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"dns/${domain}/mailbox-security-key" = dnsFileAttribute;
"dns/${domain}/mailbox-security-key-record" = dnsFileAttribute;
"dns/${domain}/keybase-verification-key" = dnsFileAttribute;
"dns/${domain}/rfc2136-key" = dnsFileAttribute // {
reloadUnits = [ "bind.service" ];
};
};
# Install the utilities.
environment.systemPackages = [ config.services.bind.package ];
services.bind = {
enable = true;
forward = "first";
cacheNetworks = [
"127.0.0.1"
"::1"
];
listenOn = [
"127.0.0.1"
interfaces.lan.IPv4.address
interfaces.wan.IPv4.address
];
listenOnIpv6 = [
"::1"
interfaces.lan.IPv6.address
interfaces.wan.IPv6.address
];
# Welp, since the template is pretty limited, we'll have to go with our
# own. This is partially based from the NixOS Bind module except without
# the template for filling in zones since we use views.
configFile =
let
cfg = config.services.bind;
certDir = path: "/run/credentials/bind.service/${path}";
listenInterfaces = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOn;
listenInterfacesIpv6 = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6;
in
pkgs.writeText "named.conf" ''
include "/etc/bind/rndc.key";
include "${config.sops.secrets."dns/${domain}/rfc2136-key".path}";
controls {
inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
};
tls ${dnsSubdomain} {
key-file "${certDir "key.pem"}";
cert-file "${certDir "cert.pem"}";
dhparam-file "${config.security.dhparams.params.bind.path}";
ciphers "HIGH:!kRSA:!aNULL:!eNULL:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!SHA1:!SHA256:!SHA384";
prefer-server-ciphers yes;
session-tickets no;
};
http ${dnsSubdomain} {
endpoints { "/dns-query"; };
};
acl trusted { ${lib.concatStringsSep "; " (clientNetworks ++ serverNetworks)}; localhost; };
acl cachenetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
acl badnetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
options {
# Native DNS.
listen-on { ${listenInterfaces} };
listen-on-v6 { ${listenInterfacesIpv6} };
# DNS-over-TLS.
listen-on tls ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} { ${listenInterfacesIpv6} };
# DNS-over-HTTPS.
https-port ${builtins.toString dnsOverHTTPSPort};
listen-on tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfacesIpv6} };
allow-query { cachenetworks; };
blackhole { badnetworks; };
forward ${cfg.forward};
forwarders { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
directory "${cfg.directory}";
pid-file "/run/named/named.pid";
};
view internal {
match-clients { trusted; };
allow-query { any; };
allow-recursion { any; };
// We'll use systemd-resolved as our forwarder.
forwarders { 127.0.0.53 port 53; };
zone "${fqdn}" {
type primary;
file "${zoneFile fqdn}";
};
zone "${domain}" {
type primary;
file "${zoneFile domain}";
allow-transfer { ${lib.concatStringsSep "; " secondaryNameServersIPs}; };
update-policy {
grant rfc2136key.${domain}. zonesub TXT;
};
};
};
view external {
match-clients { any; };
forwarders { };
empty-zones-enable yes;
allow-query { any; };
allow-recursion { none; };
zone "${domain}" {
in-view internal;
};
};
${cfg.extraConfig}
'';
};
systemd.services.bind = {
path = with pkgs; [ replace-secret ];
preStart =
let
domainZone' = zoneFile domain;
fqdnZone' = zoneFile fqdn;
secretPath = path: config.sops.secrets."dns/${path}".path;
rndc = lib.getExe' config.services.bind.package "rndc";
in
lib.mkAfter ''
# Install the domain zone.
{
install -Dm0600 '${domainZone}' '${domainZone'}'
replace-secret '#mailboxSecurityKey#' '${secretPath "${domain}/mailbox-security-key"}' '${domainZone'}'
replace-secret '#mailboxSecurityKeyRecord#' '${secretPath "${domain}/mailbox-security-key-record"}' '${domainZone'}'
#${rndc} sync "${domain}" IN external
}
# Install the internal DNS zones.
install -Dm0600 '${fqdnZone}' '${fqdnZone'}'
'';
serviceConfig = {
# Additional service hardening. You can see most of the options from
# systemd.exec(5) manual. Run it as an unprivileged user.
User = config.users.users.named.name;
Group = config.users.users.named.group;
UMask = "0037";
# Get the credentials into the service.
LoadCredential =
let
certDirectory = config.security.acme.certs."${dnsSubdomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
LogFilterPatterns = [
# systemd-resolved doesn't have DNS cookie support, it seems.
"~missing expected cookie from 127.0.0.53#53"
];
# Lock and protect various system components.
LockPersonality = true;
PrivateTmp = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
ProtectHome = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
ProtectProc = "invisible";
# Make the filesystem invisible to the service.
ProtectSystem = "strict";
ReadWritePaths = [
config.services.bind.directory
"/etc/bind"
];
ReadOnlyPaths = [
config.security.dhparams.params.bind.path
config.security.acme.certs."${dnsSubdomain}".directory
];
# Set up writable directories.
RuntimeDirectory = "named";
RuntimeDirectoryMode = "0750";
CacheDirectory = "named";
CacheDirectoryMode = "0750";
ConfigurationDirectory = "bind";
ConfigurationDirectoryMode = "0755";
# Filtering system calls.
SystemCallFilter = [ "@system-service" ];
SystemCallErrorNumber = "EPERM";
SystemCallArchitectures = "native";
# Granting and restricting its capabilities. Take note we're not using
# syslog for this even if the application can so no syslog capability.
# Additionally, we're using omitting the program's ability to chroot and
# chown since the user and the directories are already configured.
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
# Restrict what address families can it access.
RestrictAddressFamilies = [
"AF_LOCAL"
"AF_NETLINK"
"AF_BRIDGE"
"AF_INET"
"AF_INET6"
];
# Restricting what namespaces it can create.
RestrictNamespaces = true;
};
};
# Then generate a DH parameter for the application.
security.dhparams.params.bind.bits = 4096;
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
# Making this with nginx.
services.nginx.upstreams.local-dns = {
extraConfig = ''
zone dns 64k;
'';
servers = {
"127.0.0.1:${builtins.toString dnsOverHTTPSPort}" = { };
};
};
services.nginx.virtualHosts."${dnsSubdomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
extraConfig = ''
add_header Strict-Transport-Security max-age=31536000;
'';
kTLS = true;
locations = {
"/".return = "444";
"/dns-query".extraConfig = ''
grpc_pass grpcs://local-dns;
grpc_socket_keepalive on;
grpc_connect_timeout 10s;
grpc_ssl_verify off;
grpc_ssl_protocols TLSv1.3 TLSv1.2;
'';
};
};
services.nginx.streamConfig = ''
upstream dns_servers {
server localhost:53;
}
server {
listen 53 udp reuseport;
proxy_timeout 20s;
proxy_pass dns_servers;
}
'';
})
# Set up the firewall. Take note the ports with the transport layer being
# accepted in Bind.
(lib.mkIf hostCfg.services.firewall.enable {
networking.firewall =
let
ports = [
53 # DNS
853 # DNS-over-TLS/DNS-over-QUIC
];
in
{
allowedUDPPorts = ports;
allowedTCPPorts = ports;
};
})
# Add the following to be backed up.
(lib.mkIf hostCfg.services.backup.enable {
services.borgbackup.jobs.services-backup.paths = [ zonesDir ];
})
# Set up a fail2ban which is apparently already available in the package.
(lib.mkIf hostCfg.services.fail2ban.enable {
services.fail2ban.jails."named-refused".settings = {
enabled = true;
backend = "systemd";
filter = "named-refused[journalmatch='_SYSTEMD_UNIT=bind.service']";
maxretry = 3;
};
})
]);
}

View File

@ -0,0 +1,29 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.fail2ban;
inherit (import ../hardware/networks.nix) interfaces;
in
{
options.hosts.plover.services.fail2ban.enable = lib.mkEnableOption "fail2ban monitoring";
config = lib.mkIf cfg.enable {
services.fail2ban = {
enable = true;
ignoreIP = [
# VPN clients.
"${interfaces.wireguard0.IPv4.address}/13"
"${interfaces.wireguard0.IPv6.address}/64"
];
# We're going to be unforgiving with this one since we only have key
# authentication and password authentication is disabled anyways.
jails.sshd.settings = {
enabled = true;
maxretry = 1;
};
};
};
}

View File

@ -0,0 +1,22 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.firewall;
in
{
options.hosts.plover.services.firewall.enable = lib.mkEnableOption "firewall setup";
config = lib.mkIf cfg.enable {
networking = {
nftables.enable = true;
domain = "foodogsquared.one";
firewall = {
enable = true;
allowedTCPPorts = [
22 # Secure Shells.
];
};
};
};
}

View File

@ -5,226 +5,242 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.gitea;
codeForgeDomain = "code.${config.networking.domain}";
giteaUser = config.users.users."${config.services.gitea.user}".name;
giteaDatabaseUser = config.services.gitea.user;
in
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"gitea/db/password".owner = giteaUser;
"gitea/smtp/password".owner = giteaUser;
};
options.hosts.plover.services.gitea.enable = lib.mkEnableOption "Gitea server for ${config.networking.domain}";
services.gitea = {
enable = true;
appName = "foodogsquared's code forge";
database = {
type = "postgres";
passwordFile = config.sops.secrets."gitea/db/password".path;
};
# Allow Gitea to take a dump.
dump = {
enable = true;
interval = "weekly";
};
# There are a lot of services in port 3000 so we'll change it.
lfs.enable = true;
mailerPasswordFile = config.sops.secrets."gitea/smtp/password".path;
# You can see the available configuration options at
# https://docs.gitea.io/en-us/config-cheat-sheet/.
settings = {
server = {
ROOT_URL = "https://${codeForgeDomain}";
HTTP_PORT = 8432;
DOMAIN = codeForgeDomain;
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"gitea/db/password".owner = giteaUser;
"gitea/smtp/password".owner = giteaUser;
};
"repository.pull_request" = {
WORK_IN_PROGRESS_PREFIXES = "WIP:,[WIP],DRAFT,[DRAFT]";
ADD_CO_COMMITTERS_TRAILERS = true;
services.gitea = {
enable = true;
appName = "foodogsquared's code forge";
database = {
type = "postgres";
passwordFile = config.sops.secrets."gitea/db/password".path;
};
# Allow Gitea to take a dump.
dump = {
enable = true;
interval = "weekly";
};
# There are a lot of services in port 3000 so we'll change it.
lfs.enable = true;
mailerPasswordFile = config.sops.secrets."gitea/smtp/password".path;
# You can see the available configuration options at
# https://docs.gitea.io/en-us/config-cheat-sheet/.
settings = {
server = {
ROOT_URL = "https://${codeForgeDomain}";
HTTP_PORT = 8432;
DOMAIN = codeForgeDomain;
};
"repository.pull_request" = {
WORK_IN_PROGRESS_PREFIXES = "WIP:,[WIP],DRAFT,[DRAFT]";
ADD_CO_COMMITTERS_TRAILERS = true;
};
ui = {
DEFAULT_THEME = "auto";
EXPLORE_PAGING_SUM = 15;
GRAPH_MAX_COMMIT_NUM = 200;
};
"ui.meta" = {
AUTHOR = "foodogsquared's code forge";
DESCRIPTION = "foodogsquared's personal projects and some archived and mirrored codebases.";
KEYWORDS = "foodogsquared,gitea,self-hosted";
};
# It's a personal instance so nah...
service.DISABLE_REGISTRATION = true;
repository = {
ENABLE_PUSH_CREATE_USER = true;
DEFAULT_PRIVATE = "public";
DEFAULT_PRIVATE_PUSH_CREATE = true;
};
"markup.asciidoc" = {
ENABLED = true;
NEED_POSTPROCESS = true;
FILE_EXTENSIONS = ".adoc,.asciidoc";
RENDER_COMMAND = "${pkgs.asciidoctor}/bin/asciidoctor --embedded --out-file=- -";
IS_INPUT_FILE = false;
};
# Mailer service.
mailer = {
ENABLED = true;
PROTOCOL = "smtp+starttls";
SMTP_ADDRESS = "smtp.sendgrid.net";
SMTP_PORT = 587;
USER = "apikey";
FROM = "bot+gitea@foodogsquared.one";
SEND_AS_PLAIN_TEXT = true;
SENDMAIL_PATH = "${pkgs.system-sendmail}/bin/sendmail";
};
# Reduce the logs to be filled with. You also have to keep in mind this
# to be configured with fail2ban.
log.LEVEL = "Warn";
# Well, collaboration between forges is nice...
federation.ENABLED = true;
# Enable mirroring feature...
mirror.ENABLED = true;
# Session configuration.
session.COOKIE_SECURE = true;
# Some more database configuration.
database.SCHEMA = config.services.gitea.user;
# Run various periodic services.
"cron.update_mirrors".SCHEDULE = "@every 3h";
other = {
SHOW_FOOTER_VERSION = true;
ENABLE_SITEMAP = true;
ENABLE_FEED = true;
};
};
};
ui = {
DEFAULT_THEME = "auto";
EXPLORE_PAGING_SUM = 15;
GRAPH_MAX_COMMIT_NUM = 200;
# Disk space is always assumed to be limited so we're really only limited
# with 2 dumps.
systemd.services.gitea-dump.preStart = lib.mkAfter ''
${pkgs.findutils}/bin/find ${lib.escapeShellArg config.services.gitea.dump.backupDir} \
-maxdepth 1 -type f -iname '*.${config.services.gitea.dump.type}' -ctime 21 \
| tail -n -3 | xargs rm
'';
# Customizing Gitea which you can see more details at
# https://docs.gitea.io/en-us/customizing-gitea/. We're just using
# systemd-tmpfiles to make this work which is pretty convenient.
systemd.tmpfiles.rules =
let
# To be used similarly to $GITEA_CUSTOM variable.
giteaCustomDir = config.services.gitea.customDir;
in
[
"L+ ${giteaCustomDir}/templates/home.tmpl - - - - ${../../files/gitea/home.tmpl}"
"L+ ${giteaCustomDir}/public/img/logo.svg - - - - ${../../files/gitea/logo.svg}"
"L+ ${giteaCustomDir}/public/img/logo.png - - - - ${../../files/gitea/logo.png}"
];
}
(lib.mkIf hostCfg.services.database.enable {
# Making sure this plays nicely with the database service of choice. Take
# note, we're mainly using secure schema usage pattern here as described from
# the PostgreSQL documentation at
# https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PATTERNS.
services.postgresql = {
ensureUsers = [{
name = config.services.gitea.user;
ensurePermissions = {
"SCHEMA ${config.services.gitea.user}" = "ALL PRIVILEGES";
};
}];
};
"ui.meta" = {
AUTHOR = "foodogsquared's code forge";
DESCRIPTION = "foodogsquared's personal projects and some archived and mirrored codebases.";
KEYWORDS = "foodogsquared,gitea,self-hosted";
# Setting up Gitea for PostgreSQL secure schema usage.
systemd.services.gitea = {
# Gitea service module will have to set up certain things first which is
# why we have to go first.
preStart =
let
gitea = lib.getExe' config.services.gitea.package "gitea";
giteaAdminUsername = lib.escapeShellArg "foodogsquared";
psql = lib.getExe' config.services.postgresql.package "psql";
in
lib.mkMerge [
(lib.mkBefore ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
${psql} -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${giteaDatabaseUser};"
'')
(lib.mkAfter ''
# Setting up the administrator account automated.
${gitea} admin user list --admin | grep -q ${giteaAdminUsername} \
|| ${gitea} admin user create \
--username ${giteaAdminUsername} --email foodogsquared@${config.networking.domain} \
--random-password --random-password-length 76 --admin
'')
];
};
})
(lib.mkIf hostCfg.services.reverse-proxy.enable {
# Attaching it altogether with the reverse proxy of choice.
services.nginx.virtualHosts."${codeForgeDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/" = {
proxyPass = "http://gitea";
};
extraConfig = ''
proxy_cache ${config.services.nginx.proxyCachePath.apps.keysZoneName};
'';
};
# It's a personal instance so nah...
service.DISABLE_REGISTRATION = true;
services.nginx.upstreams."gitea" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}" = { };
};
};
})
repository = {
ENABLE_PUSH_CREATE_USER = true;
DEFAULT_PRIVATE = "public";
DEFAULT_PRIVATE_PUSH_CREATE = true;
(lib.mkIf hostCfg.services.fail2ban.enable {
# Configuring fail2ban for this service which thankfully has a dedicated page
# at https://docs.gitea.io/en-us/fail2ban-setup/.
services.fail2ban.jails = {
gitea.settings = {
enabled = true;
backend = "systemd";
filter = "gitea[journalmatch='_SYSTEMD_UNIT=gitea.service + _COMM=gitea']";
maxretry = 8;
};
};
"markup.asciidoc" = {
ENABLED = true;
NEED_POSTPROCESS = true;
FILE_EXTENSIONS = ".adoc,.asciidoc";
RENDER_COMMAND = "${pkgs.asciidoctor}/bin/asciidoctor --embedded --out-file=- -";
IS_INPUT_FILE = false;
environment.etc = {
"fail2ban/filter.d/gitea.conf".text = ''
[Includes]
before = common.conf
# Thankfully, Gitea also has a dedicated page for configuring fail2ban
# for the service at https://docs.gitea.io/en-us/fail2ban-setup/
[Definition]
failregex = ^.*(Failed authentication attempt|invalid credentials|Attempted access of unknown user).* from <HOST>
ignoreregex =
'';
};
})
# Mailer service.
mailer = {
ENABLED = true;
PROTOCOL = "smtp+starttls";
SMTP_ADDRESS = "smtp.sendgrid.net";
SMTP_PORT = 587;
USER = "apikey";
FROM = "bot+gitea@foodogsquared.one";
SEND_AS_PLAIN_TEXT = true;
SENDMAIL_PATH = "${pkgs.system-sendmail}/bin/sendmail";
};
# Reduce the logs to be filled with. You also have to keep in mind this
# to be configured with fail2ban.
log.LEVEL = "Warn";
# Well, collaboration between forges is nice...
federation.ENABLED = true;
# Enable mirroring feature...
mirror.ENABLED = true;
# Session configuration.
session.COOKIE_SECURE = true;
# Some more database configuration.
database.SCHEMA = config.services.gitea.user;
# Run various periodic services.
"cron.update_mirrors".SCHEDULE = "@every 3h";
other = {
SHOW_FOOTER_VERSION = true;
ENABLE_SITEMAP = true;
ENABLE_FEED = true;
};
};
};
# Disk space is always assumed to be limited so we're really only limited
# with 2 dumps.
systemd.services.gitea-dump.preStart = lib.mkAfter ''
${pkgs.findutils}/bin/find ${lib.escapeShellArg config.services.gitea.dump.backupDir} \
-maxdepth 1 -type f -iname '*.${config.services.gitea.dump.type}' -ctime 21 \
| tail -n -3 | xargs rm
'';
# Making sure this plays nicely with the database service of choice. Take
# note, we're mainly using secure schema usage pattern here as described from
# the PostgreSQL documentation at
# https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PATTERNS.
services.postgresql = {
ensureUsers = [{
name = config.services.gitea.user;
ensurePermissions = {
"SCHEMA ${config.services.gitea.user}" = "ALL PRIVILEGES";
};
}];
};
# Setting up Gitea for PostgreSQL secure schema usage.
systemd.services.gitea = {
# Gitea service module will have to set up certain things first which is
# why we have to go first.
preStart =
let
gitea = lib.getExe' config.services.gitea.package "gitea";
giteaAdminUsername = lib.escapeShellArg "foodogsquared";
psql = lib.getExe' config.services.postgresql.package "psql";
in
lib.mkMerge [
(lib.mkBefore ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
${psql} -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${giteaDatabaseUser};"
'')
(lib.mkAfter ''
# Setting up the administrator account automated.
${gitea} admin user list --admin | grep -q ${giteaAdminUsername} \
|| ${gitea} admin user create \
--username ${giteaAdminUsername} --email foodogsquared@${config.networking.domain} \
--random-password --random-password-length 76 --admin
'')
];
};
# Attaching it altogether with the reverse proxy of choice.
services.nginx.virtualHosts."${codeForgeDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/" = {
proxyPass = "http://gitea";
};
extraConfig = ''
proxy_cache ${config.services.nginx.proxyCachePath.apps.keysZoneName};
'';
};
services.nginx.upstreams."gitea" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}" = { };
};
};
# Configuring fail2ban for this service which thankfully has a dedicated page
# at https://docs.gitea.io/en-us/fail2ban-setup/.
services.fail2ban.jails = {
gitea.settings = {
enabled = true;
backend = "systemd";
filter = "gitea[journalmatch='_SYSTEMD_UNIT=gitea.service + _COMM=gitea']";
maxretry = 8;
};
};
environment.etc = {
"fail2ban/filter.d/gitea.conf".text = ''
[Includes]
before = common.conf
# Thankfully, Gitea also has a dedicated page for configuring fail2ban
# for the service at https://docs.gitea.io/en-us/fail2ban-setup/
[Definition]
failregex = ^.*(Failed authentication attempt|invalid credentials|Attempted access of unknown user).* from <HOST>
ignoreregex =
'';
};
# Customizing Gitea which you can see more details at
# https://docs.gitea.io/en-us/customizing-gitea/. We're just using
# systemd-tmpfiles to make this work which is pretty convenient.
systemd.tmpfiles.rules =
let
# To be used similarly to $GITEA_CUSTOM variable.
giteaCustomDir = config.services.gitea.customDir;
in
[
"L+ ${giteaCustomDir}/templates/home.tmpl - - - - ${../../files/gitea/home.tmpl}"
"L+ ${giteaCustomDir}/public/img/logo.svg - - - - ${../../files/gitea/logo.svg}"
"L+ ${giteaCustomDir}/public/img/logo.png - - - - ${../../files/gitea/logo.png}"
];
# Add the following files to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.gitea.dump.backupDir ];
(lib.mkIf hostCfg.services.backup.enable {
# Add the following files to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.gitea.dump.backupDir ];
})
]);
}

View File

@ -1,6 +1,9 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.grafana;
monitoringDomain = "monitoring.${config.networking.domain}";
grafanaDatabaseUser = config.services.grafana.settings.database.user;
grafanaDatabaseName = config.services.grafana.settings.database.name;
@ -12,17 +15,157 @@ let
vouchSettings = config.services.vouch-proxy.instances."${vouchDomain}".settings;
in
{
services.grafana = {
enable = true;
options.hosts.plover.services.grafana.enable = lib.mkEnableOption "monitoring dashboard for ${config.networking.hostName}";
settings = {
auth = {
disable_login_form = true;
login_maximum_inactive_lifetime_duration = "3d";
login_maximum_lifetime_duration = "14d";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets =
let
grafanaFileAttributes = {
owner = config.users.users.grafana.name;
group = config.users.users.grafana.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"grafana/database/password" = grafanaFileAttributes;
"grafana/users/admin/password" = grafanaFileAttributes;
};
services.grafana = {
enable = true;
settings = {
auth = {
disable_login_form = true;
login_maximum_inactive_lifetime_duration = "3d";
login_maximum_lifetime_duration = "14d";
};
database = rec {
host = "127.0.0.1:${builtins.toString config.services.postgresql.port}";
password = "$__file{${config.sops.secrets."grafana/database/password".path}}";
type = "postgres";
name = "grafana";
user = name;
};
log = {
level = "warn";
mode = "syslog";
};
security = {
admin_email = config.security.acme.defaults.email;
admin_password = "$__file{${config.sops.secrets."grafana/users/admin/password".path}}";
cookie_secure = true;
csrf_trusted_origins = [
vouchDomain
"auth.${config.networking.domain}"
];
strict_transport_security = true;
strict_transport_security_subdomains = true;
};
users = {
default_theme = "system";
default_language = "detect";
};
server = {
enable_gzip = true;
enforce_domain = true;
http_addr = "127.0.0.1";
http_port = 3000;
root_url = "${monitoringDomain}/grafana";
serve_from_sub_path = true;
};
};
};
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${monitoringDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
extraConfig = ''
auth_request /validate;
# If the user is not logged in, redirect them to Vouch's login URL
error_page 401 = @error401;
location @error401 {
return 302 http://${vouchDomain}/login?url=$scheme://$http_host$request_uri&vouch-failcount=$auth_resp_failcount&X-Vouch-Token=$auth_resp_jwt&error=$auth_resp_err;
}
'';
locations = {
"= /validate" = {
proxyPass = "http://${vouchSettings.vouch.listen}:${builtins.toString vouchSettings.vouch.port}";
extraConfig = ''
proxy_pass_request_body off;
# These will be passed to @error_401 call.
auth_request_set $auth_resp_x_vouch_user $upstream_http_x_vouch_user;
auth_request_set $auth_resp_jwt $upstream_http_x_vouch_jwt;
auth_request_set $auth_resp_err $upstream_http_x_vouch_err;
auth_request_set $auth_resp_failcount $upstream_http_x_vouch_failcount;
'';
};
# Make Grafana as the default to be redirected.
"= /".return = "301 /grafana";
# Serving Grafana with a subpath.
"/grafana" = {
proxyPass = "http://grafana";
extraConfig = ''
proxy_set_header X-Vouch-User $auth_resp_x_vouch_user;
'';
};
};
};
"auth.generic_oauth" = {
services.nginx.upstreams."grafana" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString config.services.grafana.settings.server.http_port}" = { };
};
};
})
(lib.mkIf hostCfg.services.database.enable {
# Setting up with secure schema usage pattern.
systemd.services.grafana = {
preStart =
let
psql = lib.getExe' config.services.postgresql.package "psql";
in
lib.mkBefore ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
${psql} -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${grafanaDatabaseUser};"
'';
};
# Setting up PostgreSQL with secure schema.
services.postgresql = {
ensureDatabases = [ grafanaDatabaseName ];
ensureUsers = [{
name = grafanaDatabaseName;
ensurePermissions = {
"DATABASE ${grafanaDatabaseName}" = "ALL PRIVILEGES";
"SCHEMA ${grafanaDatabaseUser}" = "ALL PRIVILEGES";
};
}];
};
})
(lib.mkIf hostCfg.services.vouch-proxy.enable {
services.grafana.settings."auth.generic_oauth" = {
api_url = authSubpath "oauth2/authorise";
client_id = "grafana";
client_secret = "$__file{${config.sops.secrets."vouch-proxy/client/secret".path}";
@ -32,133 +175,6 @@ in
scopes = lib.concatStringsSep " " [ "openid" "email" "profile" ];
token_url = authSubpath "oauth2/token";
};
database = rec {
host = "127.0.0.1:${builtins.toString config.services.postgresql.port}";
password = "$__file{${config.sops.secrets."grafana/database/password".path}}";
type = "postgres";
name = "grafana";
user = name;
};
log = {
level = "warn";
mode = "syslog";
};
security = {
admin_email = config.security.acme.defaults.email;
admin_password = "$__file{${config.sops.secrets."grafana/users/admin/password".path}}";
cookie_secure = true;
csrf_trusted_origins = [
vouchDomain
"auth.${config.networking.domain}"
];
strict_transport_security = true;
strict_transport_security_subdomains = true;
};
users = {
default_theme = "system";
default_language = "detect";
};
server = {
enable_gzip = true;
enforce_domain = true;
http_addr = "127.0.0.1";
http_port = 3000;
root_url = "${monitoringDomain}/grafana";
serve_from_sub_path = true;
};
};
};
services.nginx.virtualHosts."${monitoringDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
extraConfig = ''
auth_request /validate;
# If the user is not logged in, redirect them to Vouch's login URL
error_page 401 = @error401;
location @error401 {
return 302 http://${vouchDomain}/login?url=$scheme://$http_host$request_uri&vouch-failcount=$auth_resp_failcount&X-Vouch-Token=$auth_resp_jwt&error=$auth_resp_err;
}
'';
locations = {
"= /validate" = {
proxyPass = "http://${vouchSettings.vouch.listen}:${builtins.toString vouchSettings.vouch.port}";
extraConfig = ''
proxy_pass_request_body off;
# These will be passed to @error_401 call.
auth_request_set $auth_resp_x_vouch_user $upstream_http_x_vouch_user;
auth_request_set $auth_resp_jwt $upstream_http_x_vouch_jwt;
auth_request_set $auth_resp_err $upstream_http_x_vouch_err;
auth_request_set $auth_resp_failcount $upstream_http_x_vouch_failcount;
'';
};
# Make Grafana as the default to be redirected.
"= /".return = "301 /grafana";
# Serving Grafana with a subpath.
"/grafana" = {
proxyPass = "http://grafana";
extraConfig = ''
proxy_set_header X-Vouch-User $auth_resp_x_vouch_user;
'';
};
};
};
services.nginx.upstreams."grafana" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString config.services.grafana.settings.server.http_port}" = { };
};
};
# Setting up with secure schema usage pattern.
systemd.services.grafana = {
preStart =
let
psql = lib.getExe' config.services.postgresql.package "psql";
in
lib.mkBefore ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
${psql} -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${grafanaDatabaseUser};"
'';
};
# Setting up PostgreSQL with secure schema.
services.postgresql = {
ensureDatabases = [ grafanaDatabaseName ];
ensureUsers = [{
name = grafanaDatabaseName;
ensurePermissions = {
"DATABASE ${grafanaDatabaseName}" = "ALL PRIVILEGES";
"SCHEMA ${grafanaDatabaseUser}" = "ALL PRIVILEGES";
};
}];
};
sops.secrets =
let
grafanaFileAttributes = {
owner = config.users.users.grafana.name;
group = config.users.users.grafana.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"grafana/database/password" = grafanaFileAttributes;
"grafana/users/admin/password" = grafanaFileAttributes;
};
})
]);
}

View File

@ -0,0 +1,78 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.idm;
authDomain = "auth.${config.networking.domain}";
port = 9443;
certsDir = config.security.acme.certs."${authDomain}".directory;
backupsDir = "/var/lib/kanidm/backups";
in
{
options.hosts.plover.services.idm.enable = lib.mkEnableOption "preferred IDM server";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
hosts.plover.services.vouch-proxy.enable = lib.mkDefault true;
services.kanidm = {
enableServer = true;
serverSettings = {
domain = authDomain;
origin = "https://${authDomain}:${builtins.toString port}";
bindaddress = "127.0.0.1:${builtins.toString port}";
ldapbindaddress = "127.0.0.1:3636";
role = "WriteReplica";
trust_x_forward_for = true;
tls_chain = "${certsDir}/fullchain.pem";
tls_key = "${certsDir}/key.pem";
online_backup = {
path = backupsDir;
schedule = "0 0 * * *";
};
};
};
# The kanidm Nix module already sets the certificates directory to be
# read-only with systemd so no need for it though we may need to set the
# backups directory.
systemd.services.kanidm = {
preStart = lib.mkBefore ''
mkdir -p "${backupsDir}"
'';
serviceConfig = {
SupplementaryGroups = [ config.security.acme.certs."${authDomain}".group ];
};
};
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${authDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/".proxyPass = "https://kanidm";
};
services.nginx.upstreams."kanidm" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString port}" = { };
};
};
})
(lib.mkIf hostCfg.services.backup.enable {
# Add the following to be backed up.
services.borgbackup.jobs.services-backup.paths = [ backupsDir ];
})
]);
}

View File

@ -1,63 +0,0 @@
{ config, lib, pkgs, ... }:
let
authDomain = "auth.${config.networking.domain}";
port = 9443;
certsDir = config.security.acme.certs."${authDomain}".directory;
backupsDir = "/var/lib/kanidm/backups";
in
{
services.kanidm = {
enableServer = true;
serverSettings = {
domain = authDomain;
origin = "https://${authDomain}:${builtins.toString port}";
bindaddress = "127.0.0.1:${builtins.toString port}";
ldapbindaddress = "127.0.0.1:3636";
role = "WriteReplica";
trust_x_forward_for = true;
tls_chain = "${certsDir}/fullchain.pem";
tls_key = "${certsDir}/key.pem";
online_backup = {
path = backupsDir;
schedule = "0 0 * * *";
};
};
};
# The kanidm Nix module already sets the certificates directory to be
# read-only with systemd so no need for it though we may need to set the
# backups directory.
systemd.services.kanidm = {
preStart = lib.mkBefore ''
mkdir -p "${backupsDir}"
'';
serviceConfig = {
SupplementaryGroups = [ config.security.acme.certs."${authDomain}".group ];
};
};
services.nginx.virtualHosts."${authDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/".proxyPass = "https://kanidm";
};
services.nginx.upstreams."kanidm" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString port}" = { };
};
};
# Add the following to be backed up.
services.borgbackup.jobs.services-backup.paths = [ backupsDir ];
}

View File

@ -0,0 +1,52 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.monitoring;
bindStatsPort = 8053;
prometheusExports = config.services.prometheus.exporters;
in
{
options.hosts.plover.services.monitoring.enable = lib.mkEnableOption "preferred monitoring stack";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
services.prometheus = {
enable = true;
exporters = {
bind = {
enable = true;
bindURI = "http://127.0.0.1/${builtins.toString bindStatsPort}";
};
nginx.enable = true;
nginxlog.enable = true;
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [{
targets = [ "127.0.0.1:${builtins.toString prometheusExports.node.port}" ];
}];
}
];
};
# Requiring this for Prometheus being able to monitor my services.
services.nginx.statusPage = true;
services.bind.extraConfig = ''
statistics-channels {
inet 127.0.0.1 port ${builtins.toString bindStatsPort} allow { 127.0.0.1; };
};
'';
}
]);
}

View File

@ -1,74 +0,0 @@
# The reverse proxy of choice. Logs should be rotated weekly.
{ config, lib, pkgs, ... }:
{
# The main server where it will tie all of the services in one neat little
# place. Take note, the virtual hosts definition are all in their respective
# modules.
services.nginx = {
enable = true;
enableReload = true;
package = pkgs.nginxMainline;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
# Some more server-sided compressions.
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedZstdSettings = true;
proxyCachePath.apps = {
enable = true;
keysZoneName = "apps";
};
appendConfig = ''
worker_processes auto;
'';
# We're avoiding any service to be the default server especially that it
# could be used for enter a service with unencrypted HTTP. So we're setting
# up one with an unresponsive server response.
appendHttpConfig = ''
# https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/
proxy_cache_min_uses 5;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_no_cache $http_pragma $http_authorization;
server {
listen 80 default_server;
listen [::]:80 default_server;
return 444;
}
'';
# This is defined for other services.
upstreams."nginx" = {
extraConfig = ''
zone services 64k;
'';
servers = {
"localhost:80" = { };
};
};
};
networking.firewall.allowedTCPPorts = [
80 # HTTP servers.
443 # HTTPS servers.
];
# Some fail2ban policies to apply for nginx.
services.fail2ban.jails = {
nginx-http-auth.settings = { enabled = true; };
nginx-botsearch.settings = { enabled = true; };
nginx-bad-request.settings = { enabled = true; };
};
# Generate a DH parameters for nginx-specific security configurations.
security.dhparams.params.nginx.bits = 4096;
}

View File

@ -1,81 +0,0 @@
# The database service of choice. Most services can use this so far
# (thankfully).
{ config, lib, pkgs, ... }:
let
postgresqlDomain = "postgres.${config.networking.domain}";
in
{
services.postgresql = {
enable = true;
package = pkgs.postgresql_15;
enableTCPIP = true;
# Create per-user schema as documented from Usage Patterns. This is to make
# use of the secure schema usage pattern they encouraged to do.
#
# Now, you just have to keep in mind about applications making use of them.
# Most of them should have the setting to set the schema to be used. If
# not, then screw them (or just file an issue and politely ask for the
# feature).
initialScript =
let
# This will be run once anyways so it is acceptable to create users
# "forcibly".
perUserSchemas = lib.lists.map
(user: ''
CREATE USER ${user.name};
CREATE SCHEMA AUTHORIZATION ${user.name};
'')
config.services.postgresql.ensureUsers;
in
pkgs.writeText "plover-initial-postgresql-script" ''
${lib.concatStringsSep "\n" perUserSchemas}
'';
settings =
let
credsDir = path: "/run/credentials/postgresql.service/${path}";
in
{
# Still doing the secure schema usage pattern.
search_path = ''"$user"'';
ssl_cert_file = credsDir "cert.pem";
ssl_key_file = credsDir "key.pem";
ssl_ca_file = credsDir "fullchain.pem";
};
};
# With a database comes a dumping.
services.postgresqlBackup = {
enable = true;
compression = "zstd";
compressionLevel = 11;
# Start at every 3 days starting from the first day of the month.
startAt = "*-*-1/3";
};
# Setting this up for TLS.
systemd.services.postgresql = {
requires = [ "acme-finished-${postgresqlDomain}.target" ];
serviceConfig.LoadCredential =
let
certDirectory = config.security.acme.certs."${postgresqlDomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
};
security.acme.certs."${postgresqlDomain}".postRun = ''
systemctl restart postgresql.service
'';
# Add the dumps to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.postgresqlBackup.location ];
}

View File

@ -1,43 +0,0 @@
{ config, lib, pkgs, ... }:
let
bindStatsPort = 8053;
prometheusExports = config.services.prometheus.exporters;
in
{
services.prometheus = {
enable = true;
exporters = {
bind = {
enable = true;
bindURI = "http://127.0.0.1/${builtins.toString bindStatsPort}";
};
nginx.enable = true;
nginxlog.enable = true;
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [{
targets = [ "127.0.0.1:${builtins.toString prometheusExports.node.port}" ];
}];
}
];
};
# Requiring this for Prometheus being able to monitor my services.
services.nginx.statusPage = true;
services.bind.extraConfig = ''
statistics-channels {
inet 127.0.0.1 port ${builtins.toString bindStatsPort} allow { 127.0.0.1; };
};
'';
}

View File

@ -0,0 +1,86 @@
# The reverse proxy of choice. Logs should be rotated weekly.
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.reverse-proxy;
in
{
options.hosts.plover.services.reverse-proxy.enable = lib.mkEnableOption "preferred public-facing reverse proxy";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
# The main server where it will tie all of the services in one neat little
# place. Take note, the virtual hosts definition are all in their respective
# modules.
services.nginx = {
enable = true;
enableReload = true;
package = pkgs.nginxMainline;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
# Some more server-sided compressions.
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedZstdSettings = true;
proxyCachePath.apps = {
enable = true;
keysZoneName = "apps";
};
appendConfig = ''
worker_processes auto;
'';
# We're avoiding any service to be the default server especially that it
# could be used for enter a service with unencrypted HTTP. So we're setting
# up one with an unresponsive server response.
appendHttpConfig = ''
# https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/
proxy_cache_min_uses 5;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_no_cache $http_pragma $http_authorization;
server {
listen 80 default_server;
listen [::]:80 default_server;
return 444;
}
'';
# This is defined for other services.
upstreams."nginx" = {
extraConfig = ''
zone services 64k;
'';
servers = {
"localhost:80" = { };
};
};
};
networking.firewall.allowedTCPPorts = [
80 # HTTP servers.
443 # HTTPS servers.
];
# Generate a DH parameters for nginx-specific security configurations.
security.dhparams.params.nginx.bits = 4096;
}
(lib.mkIf config.profiles.server.enable {
# Some fail2ban policies to apply for nginx.
services.fail2ban.jails = {
nginx-http-auth.settings = { enabled = true; };
nginx-botsearch.settings = { enabled = true; };
nginx-bad-request.settings = { enabled = true; };
};
})
]);
}

View File

@ -3,6 +3,9 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.vaultwarden;
passwordManagerDomain = "pass.${config.networking.domain}";
# This should be set from service module from nixpkgs.
@ -12,211 +15,227 @@ let
vaultwardenDbName = "vaultwarden";
in
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"vaultwarden/env".owner = vaultwardenUser;
};
options.hosts.plover.services.vaultwarden.enable = lib.mkEnableOption "Vaultwarden instance";
services.vaultwarden = {
enable = true;
dbBackend = "postgresql";
environmentFile = config.sops.secrets."vaultwarden/env".path;
config = {
DOMAIN = "https://${passwordManagerDomain}";
# Configuring the server.
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = 8222;
# Ehh... It's only a few (or even one) users anyways so nah. Since this
# instance will not configure SMTP server, this pretty much means
# invitation is only via email at this point.
SHOW_PASSWORD_HINT = false;
# Configuring some parts of account management which is almost
# nonexistent because this is just intended for me (at least right now).
SIGNUPS_ALLOWED = false;
SIGNUPS_VERIFY = true;
# Invitations...
INVITATIONS_ALLOWED = true;
INVITATION_ORG_NAME = "foodogsquared's Vaultwarden";
# Notifications...
WEBSOCKET_ENABLED = true;
WEBSOCKET_PORT = 3012;
WEBSOCKET_ADDRESS = "0.0.0.0";
# Enabling web vault with whatever nixpkgs comes in.
WEB_VAULT_ENABLED = true;
# Databasifications...
DATABASE_URL = "postgresql://${vaultwardenUser}@/${vaultwardenDbName}";
# Mailer service configuration (except the user and password).
SMTP_HOST = "smtp.sendgrid.net";
SMTP_PORT = 587;
SMTP_FROM_NAME = "Vaultwarden";
SMTP_FROM = "bot+vaultwarden@foodogsquared.one";
};
};
services.postgresql = {
ensureDatabases = [ vaultwardenDbName ];
ensureUsers = [{
name = vaultwardenUser;
ensurePermissions = {
"DATABASE ${vaultwardenDbName}" = "ALL PRIVILEGES";
"SCHEMA ${vaultwardenDbName}" = "ALL PRIVILEGES";
};
}];
};
systemd.services.vaultwarden = {
path = [ config.services.postgresql.package ];
# Making it comply with PostgreSQL secure schema usage pattern.
preStart = lib.mkAfter ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
psql -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='${vaultwardenUser}';" \
| grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${vaultwardenUser};"
'';
# We do a little service hardening. Even though the Vaultwarden NixOS
# module is already doing some of those things, we'll just add some of
# them.
serviceConfig = lib.mkAfter {
LockPersonality = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
RestrictRealtime = true;
ProtectClock = true;
ProtectKernelLogs = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectHostname = true;
ProtectControlGroups = true;
ProtectProc = "invisible";
# Filtering system calls.
SystemCallFilter = [
"@system-service"
"~@privileged"
];
SystemCallErrorNumber = "EPERM";
SystemCallArchitectures = "native";
# Restricting what capabilities it has access to which it
# has none.
CapabilityBoundingSet = [ "" ];
AmbientCapabilities = lib.mkForce [ "" ];
# Restrict what address families this service can interact
# with. Since it is a web service, we expect it will only
# interact with web service stuff like IPs.
RestrictAddressFamilies = [
# It's required especially it can communicate with the local system.
"AF_LOCAL"
# The IPs.
"AF_INET"
"AF_INET6"
];
# Restrict what namespaces it can create which is none.
RestrictNamespaces = true;
};
};
# Attaching it to our reverse proxy of choice.
services.nginx.virtualHosts."${passwordManagerDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations =
let
address = config.services.vaultwarden.config.ROCKET_ADDRESS;
websocketPort = config.services.vaultwarden.config.WEBSOCKET_PORT;
in
{
"/" = {
proxyPass = "http://vaultwarden";
proxyWebsockets = true;
};
"/notifications/hub" = {
proxyPass = "http://${address}:${toString websocketPort}";
proxyWebsockets = true;
};
"/notifications/hub/negotiate" = {
proxyPass = "http://vaultwarden";
proxyWebsockets = true;
};
};
extraConfig = ''
proxy_cache ${config.services.nginx.proxyCachePath.apps.keysZoneName};
'';
};
services.nginx.upstreams."vaultwarden" = {
extraConfig = ''
zone services;
keepalive 2;
'';
servers = let
address = config.services.vaultwarden.config.ROCKET_ADDRESS;
port = config.services.vaultwarden.config.ROCKET_PORT;
in
config = lib.mkIf cfg.enable (lib.mkMerge [
{
"${address}:${builtins.toString port}" = { };
};
};
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"vaultwarden/env".owner = vaultwardenUser;
};
# Configuring fail2ban for this service which thankfully has a dedicated page
# at https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup.
services.fail2ban.jails = {
vaultwarden-user.settings = {
enabled = true;
backend = "systemd";
filter = "vaultwarden-user[journalmatch='_SYSTEMD_UNIT=vaultwarden.service + _COMM=vaultwarden']";
maxretry = 5;
};
services.vaultwarden = {
enable = true;
environmentFile = config.sops.secrets."vaultwarden/env".path;
config = {
DOMAIN = "https://${passwordManagerDomain}";
vaultwarden-admin.settings = {
enabled = true;
backend = "systemd";
filter = "vaultwarden-admin[journalmatch='_SYSTEMD_UNIT=vaultwarden.service + _COMM=vaultwarden']";
maxretry = 3;
};
};
# Configuring the server.
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = 8222;
environment.etc = {
"fail2ban/filter.d/vaultwarden-user.conf".text = ''
[Includes]
before = common.conf
# Ehh... It's only a few (or even one) users anyways so nah. Since this
# instance will not configure SMTP server, this pretty much means
# invitation is only via email at this point.
SHOW_PASSWORD_HINT = false;
# For more information, Vaultwarden knowledge base has a dedicated page
# for configuring fail2ban with the application (i.e.,
# https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup).
[Definition]
failregex = ^.*Username or password is incorrect\. Try again\. IP: <HOST>\. Username:.*$
ignoreregex =
'';
# Configuring some parts of account management which is almost
# nonexistent because this is just intended for me (at least right now).
SIGNUPS_ALLOWED = false;
SIGNUPS_VERIFY = true;
"fail2ban/filter.d/vaultwarden-admin.conf".text = ''
[Includes]
before = common.conf
# Invitations...
INVITATIONS_ALLOWED = true;
INVITATION_ORG_NAME = "foodogsquared's Vaultwarden";
# For more information, Vaultwarden knowledge base has a dedicated page
# for configuring fail2ban with the application (i.e.,
# https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup).
[Definition]
failregex = ^.*Invalid admin token\. IP: <HOST>.*$
ignoreregex =
'';
};
# Notifications...
WEBSOCKET_ENABLED = true;
WEBSOCKET_PORT = 3012;
WEBSOCKET_ADDRESS = "0.0.0.0";
# Add the data directory to be backed up.
services.borgbackup.jobs.services-backup.paths = [ "/var/lib/bitwarden_rs" ];
# Enabling web vault with whatever nixpkgs comes in.
WEB_VAULT_ENABLED = true;
# Mailer service configuration (except the user and password).
SMTP_HOST = "smtp.sendgrid.net";
SMTP_PORT = 587;
SMTP_FROM_NAME = "Vaultwarden";
SMTP_FROM = "bot+vaultwarden@foodogsquared.one";
};
};
# We do a little service hardening. Even though the Vaultwarden NixOS
# module is already doing some of those things, we'll just add some of
# them.
systemd.services.vaultwarden = {
serviceConfig = lib.mkAfter {
LockPersonality = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
RestrictRealtime = true;
ProtectClock = true;
ProtectKernelLogs = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectHostname = true;
ProtectControlGroups = true;
ProtectProc = "invisible";
# Filtering system calls.
SystemCallFilter = [
"@system-service"
"~@privileged"
];
SystemCallErrorNumber = "EPERM";
SystemCallArchitectures = "native";
# Restricting what capabilities it has access to which it
# has none.
CapabilityBoundingSet = [ "" ];
AmbientCapabilities = lib.mkForce [ "" ];
# Restrict what address families this service can interact
# with. Since it is a web service, we expect it will only
# interact with web service stuff like IPs.
RestrictAddressFamilies = [
# It's required especially it can communicate with the local system.
"AF_LOCAL"
# The IPs.
"AF_INET"
"AF_INET6"
];
# Restrict what namespaces it can create which is none.
RestrictNamespaces = true;
};
};
}
(lib.mkIf hostCfg.services.database.enable {
services.vaultwarden = {
dbBackend = "postgresql";
config.DATABASE_URL = "postgresql://${vaultwardenUser}@/${vaultwardenDbName}";
};
services.postgresql = {
ensureDatabases = [ vaultwardenDbName ];
ensureUsers = [{
name = vaultwardenUser;
ensurePermissions = {
"DATABASE ${vaultwardenDbName}" = "ALL PRIVILEGES";
"SCHEMA ${vaultwardenDbName}" = "ALL PRIVILEGES";
};
}];
};
systemd.services.vaultwarden = {
path = [ config.services.postgresql.package ];
# Making it comply with PostgreSQL secure schema usage pattern.
preStart = lib.mkAfter ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
psql -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='${vaultwardenUser}';" \
| grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${vaultwardenUser};"
'';
};
})
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${passwordManagerDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations =
let
address = config.services.vaultwarden.config.ROCKET_ADDRESS;
websocketPort = config.services.vaultwarden.config.WEBSOCKET_PORT;
in
{
"/" = {
proxyPass = "http://vaultwarden";
proxyWebsockets = true;
};
"/notifications/hub" = {
proxyPass = "http://${address}:${toString websocketPort}";
proxyWebsockets = true;
};
"/notifications/hub/negotiate" = {
proxyPass = "http://vaultwarden";
proxyWebsockets = true;
};
};
extraConfig = ''
proxy_cache ${config.services.nginx.proxyCachePath.apps.keysZoneName};
'';
};
services.nginx.upstreams."vaultwarden" = {
extraConfig = ''
zone services;
keepalive 2;
'';
servers = let
address = config.services.vaultwarden.config.ROCKET_ADDRESS;
port = config.services.vaultwarden.config.ROCKET_PORT;
in
{
"${address}:${builtins.toString port}" = { };
};
};
})
(lib.mkIf hostCfg.services.backup.enable {
# Add the data directory to be backed up.
services.borgbackup.jobs.services-backup.paths = [ "/var/lib/bitwarden_rs" ];
})
(lib.mkIf hostCfg.services.fail2ban.enable {
# Configuring fail2ban for this service which thankfully has a dedicated page
# at https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup.
services.fail2ban.jails = {
vaultwarden-user.settings = {
enabled = true;
backend = "systemd";
filter = "vaultwarden-user[journalmatch='_SYSTEMD_UNIT=vaultwarden.service + _COMM=vaultwarden']";
maxretry = 5;
};
vaultwarden-admin.settings = {
enabled = true;
backend = "systemd";
filter = "vaultwarden-admin[journalmatch='_SYSTEMD_UNIT=vaultwarden.service + _COMM=vaultwarden']";
maxretry = 3;
};
};
environment.etc = {
"fail2ban/filter.d/vaultwarden-user.conf".text = ''
[Includes]
before = common.conf
# For more information, Vaultwarden knowledge base has a dedicated page
# for configuring fail2ban with the application (i.e.,
# https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup).
[Definition]
failregex = ^.*Username or password is incorrect\. Try again\. IP: <HOST>\. Username:.*$
ignoreregex =
'';
"fail2ban/filter.d/vaultwarden-admin.conf".text = ''
[Includes]
before = common.conf
# For more information, Vaultwarden knowledge base has a dedicated page
# for configuring fail2ban with the application (i.e.,
# https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup).
[Definition]
failregex = ^.*Invalid admin token\. IP: <HOST>.*$
ignoreregex =
'';
};
})
]);
}

View File

@ -1,67 +1,78 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.vouch-proxy;
inherit (config.services.vouch-proxy.instances."${vouchDomain}") settings;
vouchDomain = "vouch.${config.networking.domain}";
authDomain = config.services.kanidm.serverSettings.domain;
in
{
sops.secrets = let
vouchPermissions = rec {
owner = "vouch-proxy";
group = owner;
mode = "0400";
};
in lib.getSecrets ../../secrets/secrets.yaml {
"vouch-proxy/jwt/secret" = vouchPermissions;
"vouch-proxy/client/secret" = vouchPermissions;
};
options.hosts.plover.services.vouch-proxy.enable = lib.mkEnableOption "Vouch proxy setup";
services.vouch-proxy = {
enable = true;
instances."${vouchDomain}".settings = {
vouch = {
listen = "127.0.0.1";
port = 19900;
domains = [ "foodogsquared.one" ];
jwt.secret._secret = config.sops.secrets."vouch-proxy/jwt/secret".path;
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets = let
vouchPermissions = rec {
owner = "vouch-proxy";
group = owner;
mode = "0400";
};
in lib.getSecrets ../../secrets/secrets.yaml {
"vouch-proxy/jwt/secret" = vouchPermissions;
"vouch-proxy/client/secret" = vouchPermissions;
};
oauth = rec {
provider = "oidc";
client_id = "vouch";
client_secret._secret = config.sops.secrets."vouch-proxy/client/secret".path;
code_challenge_method = "S256";
auth_url = "https://${authDomain}/ui/oauth2";
token_url = "https://${authDomain}/oauth2/token";
user_info_url = "https://${authDomain}/oauth2/openid/${client_id}/userinfo";
scopes = [ "openid" "email" "profile" ];
callback_url = "https://${vouchDomain}/auth";
services.vouch-proxy = {
enable = true;
instances."${vouchDomain}".settings = {
vouch = {
listen = "127.0.0.1";
port = 19900;
domains = [ "foodogsquared.one" ];
jwt.secret._secret = config.sops.secrets."vouch-proxy/jwt/secret".path;
};
oauth = rec {
provider = "oidc";
client_id = "vouch";
client_secret._secret = config.sops.secrets."vouch-proxy/client/secret".path;
code_challenge_method = "S256";
auth_url = "https://${authDomain}/ui/oauth2";
token_url = "https://${authDomain}/oauth2/token";
user_info_url = "https://${authDomain}/oauth2/openid/${client_id}/userinfo";
scopes = [ "openid" "email" "profile" ];
callback_url = "https://${vouchDomain}/auth";
};
};
};
};
};
}
services.nginx.virtualHosts."${vouchDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/" = {
proxyPass = "http://vouch-proxy";
extraConfig = ''
proxy_set_header Host ${vouchDomain};
proxy_set_header X-Forwarded-Proto https;
'';
};
};
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${vouchDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/" = {
proxyPass = "http://vouch-proxy";
extraConfig = ''
proxy_set_header Host ${vouchDomain};
proxy_set_header X-Forwarded-Proto https;
'';
};
};
services.nginx.upstreams."vouch-proxy" = {
extraConfig = ''
zone services;
'';
servers = {
"${settings.vouch.listen}:${builtins.toString settings.vouch.port}" = { };
};
};
services.nginx.upstreams."vouch-proxy" = {
extraConfig = ''
zone services;
'';
servers = {
"${settings.vouch.listen}:${builtins.toString settings.vouch.port}" = { };
};
};
})
]);
}

View File

@ -2,6 +2,9 @@
# We're setting up Wezterm mux server with TLS domains.
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.wezterm-mux-server;
weztermDomain = "mux.${config.networking.domain}";
port = 9801;
listenAddress = "localhost:${builtins.toString port}";
@ -12,40 +15,49 @@ let
};
in
{
services.wezterm-mux-server = {
enable = true;
inherit configFile;
};
options.hosts.plover.services.wezterm-mux-server.enable = lib.mkEnableOption "Wezterm mux server setup";
systemd.services.wezterm-mux-server = {
requires = [ "acme-finished-${weztermDomain}.target" ];
environment.WEZTERM_LOG = "info";
serviceConfig = {
LoadCredential =
let
certDir = config.security.acme.certs."${weztermDomain}".directory;
credentialCertPath = path: "${path}:${certDir}/${path}";
in
[
(credentialCertPath "key.pem")
(credentialCertPath "cert.pem")
(credentialCertPath "fullchain.pem")
];
};
};
config = lib.mkIf cfg.enable (lib.mkMerge [
{
services.wezterm-mux-server = {
enable = true;
inherit configFile;
};
security.acme.certs."${weztermDomain}".postRun = ''
systemctl restart wezterm-mux-server.service
'';
systemd.services.wezterm-mux-server = {
requires = [ "acme-finished-${weztermDomain}.target" ];
environment.WEZTERM_LOG = "info";
serviceConfig = {
LoadCredential =
let
certDir = config.security.acme.certs."${weztermDomain}".directory;
credentialCertPath = path: "${path}:${certDir}/${path}";
in
[
(credentialCertPath "key.pem")
(credentialCertPath "cert.pem")
(credentialCertPath "fullchain.pem")
];
};
};
services.nginx.streamConfig = ''
upstream wezterm {
server ${listenAddress};
security.acme.certs."${weztermDomain}".postRun = ''
systemctl restart wezterm-mux-server.service
'';
}
server {
listen ${builtins.toString port};
proxy_pass wezterm;
}
'';
# TODO: where mux.foodogsquared.one setup
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.streamConfig = ''
upstream wezterm {
server ${listenAddress};
}
server {
listen ${builtins.toString port};
proxy_pass wezterm;
}
'';
})
]);
}

View File

@ -3,6 +3,9 @@
# Take note this service is heavily based on the hardware networking setup of
# this host so better stay focused on the hardware configuration on this host.
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.wireguard;
inherit (import ../hardware/networks.nix) interfaces wireguardPort wireguardPeers;
wireguardIFName = interfaces.wireguard0.ifname;
@ -11,95 +14,103 @@ let
phonePeerAddresses = with wireguardPeers.phone; [ "${IPv4}/32" "${IPv6}/128" ];
in
{
environment.systemPackages = [ pkgs.wireguard-tools ];
options.hosts.plover.services.wireguard.enable = lib.mkEnableOption "Wireguard VPN setup";
sops.secrets =
let
systemdNetworkdPermission = {
group = config.users.users.systemd-network.group;
reloadUnits = [ "systemd-networkd.service" ];
mode = "0640";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
environment.systemPackages = [ pkgs.wireguard-tools ];
sops.secrets =
let
systemdNetworkdPermission = {
group = config.users.users.systemd-network.group;
reloadUnits = [ "systemd-networkd.service" ];
mode = "0640";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"wireguard/private-key" = systemdNetworkdPermission;
"wireguard/preshared-keys/ni" = systemdNetworkdPermission;
"wireguard/preshared-keys/phone" = systemdNetworkdPermission;
};
# Since we're using systemd-networkd to configure interfaces, we can control
# how each interface can handle things such as IP masquerading so no need for
# modifying sysctl settings like 'ipv4.ip_forward' or similar.
systemd.network = {
wait-online.ignoredInterfaces = [ wireguardIFName ];
netdevs."99-${wireguardIFName}" = {
netdevConfig = {
Name = wireguardIFName;
Kind = "wireguard";
};
wireguardConfig = {
PrivateKeyFile = config.sops.secrets."wireguard/private-key".path;
ListenPort = wireguardPort;
};
wireguardPeers = [
# Desktop workstation.
{
wireguardPeerConfig = {
PublicKey = lib.readFile ../../../ni/files/wireguard/wireguard-public-key-ni;
PresharedKeyFile = config.sops.secrets."wireguard/preshared-keys/ni".path;
AllowedIPs = lib.concatStringsSep "," desktopPeerAddresses;
};
}
# Phone.
{
wireguardPeerConfig = {
PublicKey = lib.readFile ../../files/wireguard/wireguard-public-key-phone;
PresharedKeyFile = config.sops.secrets."wireguard/preshared-keys/phone".path;
AllowedIPs = lib.concatStringsSep "," phonePeerAddresses;
};
}
];
};
networks."99-${wireguardIFName}" = with interfaces.wireguard0; {
matchConfig.Name = ifname;
address = [
"${IPv4.address}/14"
"${IPv6.address}/64"
];
routes = [
{ routeConfig.Gateway = IPv4.gateway; }
];
};
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"wireguard/private-key" = systemdNetworkdPermission;
"wireguard/preshared-keys/ni" = systemdNetworkdPermission;
"wireguard/preshared-keys/phone" = systemdNetworkdPermission;
};
networking.firewall = {
# Allow the UDP traffic for the Wireguard service.
allowedUDPPorts = [ wireguardPort ];
# IP forwarding for specific interfaces.
filterForward = true;
extraForwardRules = ''
iifname ${wireguardIFName} accept comment "IP forward from Wireguard interface to LAN"
'';
};
networking.nftables.ruleset = ''
table ip wireguard-${wireguardIFName} {
chain prerouting {
type nat hook prerouting priority filter; policy accept;
}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
iifname ${wireguardIFName} snat to ${interfaces.lan.IPv4.address} comment "Make packets from Wireguard interface appear as coming from the LAN interface"
}
}
'';
# Since we're using systemd-networkd to configure interfaces, we can control
# how each interface can handle things such as IP masquerading so no need for
# modifying sysctl settings like 'ipv4.ip_forward' or similar.
systemd.network = {
wait-online.ignoredInterfaces = [ wireguardIFName ];
(lib.mkIf hostCfg.services.firewall.enable {
networking.firewall = {
# Allow the UDP traffic for the Wireguard service.
allowedUDPPorts = [ wireguardPort ];
netdevs."99-${wireguardIFName}" = {
netdevConfig = {
Name = wireguardIFName;
Kind = "wireguard";
# IP forwarding for specific interfaces.
filterForward = true;
extraForwardRules = ''
iifname ${wireguardIFName} accept comment "IP forward from Wireguard interface to LAN"
'';
};
wireguardConfig = {
PrivateKeyFile = config.sops.secrets."wireguard/private-key".path;
ListenPort = wireguardPort;
};
networking.nftables.ruleset = ''
table ip wireguard-${wireguardIFName} {
chain prerouting {
type nat hook prerouting priority filter; policy accept;
}
wireguardPeers = [
# Desktop workstation.
{
wireguardPeerConfig = {
PublicKey = lib.readFile ../../../ni/files/wireguard/wireguard-public-key-ni;
PresharedKeyFile = config.sops.secrets."wireguard/preshared-keys/ni".path;
AllowedIPs = lib.concatStringsSep "," desktopPeerAddresses;
};
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
iifname ${wireguardIFName} snat to ${interfaces.lan.IPv4.address} comment "Make packets from Wireguard interface appear as coming from the LAN interface"
}
}
# Phone.
{
wireguardPeerConfig = {
PublicKey = lib.readFile ../../files/wireguard/wireguard-public-key-phone;
PresharedKeyFile = config.sops.secrets."wireguard/preshared-keys/phone".path;
AllowedIPs = lib.concatStringsSep "," phonePeerAddresses;
};
}
];
};
networks."99-${wireguardIFName}" = with interfaces.wireguard0; {
matchConfig.Name = ifname;
address = [
"${IPv4.address}/14"
"${IPv6.address}/64"
];
routes = [
{ routeConfig.Gateway = IPv4.gateway; }
];
};
};
'';
})
]);
}