hosts/plover: try out host-specific module structure

This commit is contained in:
Gabriel Arazas 2023-12-11 16:30:00 +08:00
parent 396a25f797
commit 9762042848
No known key found for this signature in database
GPG Key ID: ADE0C41DAB221FCC
24 changed files with 1695 additions and 1487 deletions

View File

@ -388,27 +388,14 @@
# Don't create the user directories since they are assumed to # Don't create the user directories since they are assumed to
# be already created by a pre-installed system (which should # be already created by a pre-installed system (which should
# already handle them). # already handle them).
xdg.userDirs.createDirectories = lib.mkDefault false; xdg.userDirs.createDirectories = lib.mkForce false;
# To be able to use the most of our config as possible, we want
# both to use the same overlays.
nixpkgs.overlays = overlays;
# Stallman-senpai will be disappointed. :/
nixpkgs.config.allowUnfree = lib.mkDefault true;
# Find Nix files with these! Even if nix-index is already enabled, it
# is better to make it explicit.
programs.nix-index.enable = lib.mkDefault true;
# Setting the homely options. # Setting the homely options.
home.username = lib.mkForce name; home.username = lib.mkForce name;
home.homeDirectory = lib.mkForce metadata.home-directory or "/home/${config.home.username}"; home.homeDirectory = lib.mkForce metadata.home-directory or "/home/${config.home.username}";
# home-manager configurations are expected to be deployed on programs.home-manager.enable = lib.mkForce true;
# non-NixOS systems so it is safe to set this. targets.genericLinux.enable = true;
programs.home-manager.enable = lib.mkDefault true;
targets.genericLinux.enable = lib.mkDefault true;
}) })
userSharedConfig userSharedConfig
nixSettingsSharedConfig nixSettingsSharedConfig

View File

@ -1,8 +1,5 @@
{ config, lib, pkgs, modulesPath, ... }: { config, lib, pkgs, modulesPath, ... }:
let
inherit (import ./modules/hardware/networks.nix) interfaces;
in
{ {
imports = [ imports = [
# Since this will be rarely configured, make sure to import the appropriate # Since this will be rarely configured, make sure to import the appropriate
@ -17,69 +14,34 @@ in
# Hardened profile from nixpkgs. # Hardened profile from nixpkgs.
"${modulesPath}/profiles/hardened.nix" "${modulesPath}/profiles/hardened.nix"
# Of course, what is a server without a backup? A professionally-handled ./modules
# production system. However, we're not professionals so we do have
# backups.
./modules/services/borgbackup.nix
# The primary DNS server that is completely hidden.
./modules/services/bind.nix
# The reverse proxy of choice.
./modules/services/nginx.nix
# The single-sign on setup.
./modules/services/kanidm.nix
./modules/services/vouch-proxy.nix
# The monitoring stack.
./modules/services/prometheus.nix
./modules/services/grafana.nix
# The database of choice which is used by most self-managed services on
# this server.
./modules/services/postgresql.nix
# The application services for this server. They are modularized since
# configuring it here will make it too big.
./modules/services/atuin.nix
./modules/services/gitea.nix
./modules/services/vaultwarden.nix
./modules/services/wireguard.nix
./modules/services/wezterm-mux-server.nix
]; ];
# Host-specific modules structuring.
hosts.plover.services = {
# The essential services.
backup.enable = true;
database.enable = true;
firewall.enable = true;
dns-server.enable = true;
idm.enable = true;
monitoring.enable = true;
reverse-proxy.enable = true;
fail2ban.enable = true;
# The self-hosted services.
atuin.enable = true;
gitea.enable = true;
grafana.enable = true;
vaultwarden.enable = true;
wireguard.enable = true;
};
# Automatic format and partitioning. # Automatic format and partitioning.
disko.devices = import ./disko.nix { disko.devices = import ./disko.nix {
disks = [ "/dev/sda" ]; disks = [ "/dev/sda" ];
}; };
networking = {
nftables.enable = true;
domain = "foodogsquared.one";
firewall = {
enable = true;
allowedTCPPorts = [
22 # Secure Shells.
];
};
};
services.fail2ban = {
ignoreIP = [
# VPN clients.
"${interfaces.wireguard0.IPv4.address}/13"
"${interfaces.wireguard0.IPv6.address}/64"
];
# We're going to be unforgiving with this one since we only have key
# authentication and password authentication is disabled anyways.
jails.sshd.settings = {
enabled = true;
maxretry = 1;
};
};
# Offline SSH!?! # Offline SSH!?!
programs.mosh.enable = true; programs.mosh.enable = true;

View File

@ -0,0 +1,38 @@
# Take note only optional modules should be imported here.
{
imports = [
# Of course, what is a server without a backup? A professionally-handled
# production system. However, we're not professionals so we do have
# backups.
./services/backup.nix
# The database of choice which is used by most self-managed services on
# this server.
./services/database.nix
# The primary DNS server that is completely hidden.
./services/dns-server.nix
# The single-sign on setup.
./services/idm.nix
# The reverse proxy of choice.
./services/reverse-proxy.nix
# The firewall of choice.
./services/firewall.nix
# The VPN setup of choice.
./services/wireguard.nix
# The rest of the self-hosted applications.
./services/atuin.nix
./services/fail2ban.nix
./services/gitea.nix
./services/grafana.nix
./services/monitoring.nix
./services/vouch-proxy.nix
./services/vaultwarden.nix
./services/wezterm-mux-server.nix
];
}

View File

@ -4,12 +4,19 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.atuin;
inherit (import ../hardware/networks.nix) interfaces; inherit (import ../hardware/networks.nix) interfaces;
atuinInternalDomain = "atuin.${config.networking.fqdn}"; atuinInternalDomain = "atuin.${config.networking.fqdn}";
host = interfaces.lan.IPv4.address; host = interfaces.lan.IPv4.address;
in in
{ {
options.hosts.plover.services.atuin.enable = lib.mkEnableOption "Atuin sync server setup";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
# Atuin sync server because why not. # Atuin sync server because why not.
services.atuin = { services.atuin = {
enable = true; enable = true;
@ -18,7 +25,18 @@ in
inherit host; inherit host;
port = 8965; port = 8965;
}; };
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
# Putting it altogether in the reverse proxy of choice.
services.nginx.virtualHosts."${atuinInternalDomain}" = {
locations."/" = {
proxyPass = "http://${host}:${toString config.services.atuin.port}";
};
};
})
(lib.mkIf hostCfg.services.database.enable {
# Putting a neat little script to create the appropriate schema since we're # Putting a neat little script to create the appropriate schema since we're
# using secure schema usage pattern as encouraged from PostgreSQL # using secure schema usage pattern as encouraged from PostgreSQL
# documentation. # documentation.
@ -29,11 +47,6 @@ in
grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS atuin;" grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS atuin;"
''; '';
}; };
})
# Putting it altogether in the reverse proxy of choice. ]);
services.nginx.virtualHosts."${atuinInternalDomain}" = {
locations."/" = {
proxyPass = "http://${host}:${toString config.services.atuin.port}";
};
};
} }

View File

@ -0,0 +1,86 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.backup;
# The head of the Borgbase hostname.
hetzner-boxes-user = "u332477";
hetzner-boxes-server = "${hetzner-boxes-user}.your-storagebox.de";
borgRepo = path: "ssh://${hetzner-boxes-user}@${hetzner-boxes-server}:23/./borg/plover/${path}";
jobCommonSettings = { patternFiles ? [ ], patterns ? [ ], paths ? [ ], repo, passCommand }: {
inherit paths repo;
compression = "zstd,11";
dateFormat = "+%F-%H-%M-%S-%z";
doInit = true;
encryption = {
inherit passCommand;
mode = "repokey-blake2";
};
extraCreateArgs =
let
args = lib.flatten [
(builtins.map
(patternFile: "--patterns-from ${lib.escapeShellArg patternFile}")
patternFiles)
(builtins.map
(pattern: "--pattern ${lib.escapeShellArg pattern}")
patterns)
];
in
lib.concatStringsSep " " args;
extraInitArgs = "--make-parent-dirs";
persistentTimer = true;
preHook = ''
extraCreateArgs="$extraCreateArgs --stats"
'';
prune.keep = {
weekly = 4;
monthly = 12;
yearly = 6;
};
startAt = "monthly";
environment.BORG_RSH = "ssh -i ${config.sops.secrets."borg/ssh-key".path}";
};
in
{
options.hosts.plover.services.backup.enable = lib.mkEnableOption "backup service";
config = lib.mkIf cfg.enable {
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"borg/repos/host/patterns/keys" = { };
"borg/repos/host/password" = { };
"borg/repos/services/password" = { };
"borg/ssh-key" = { };
};
services.borgbackup.jobs = {
# Backup for host-specific files. They don't change much so it is
# acceptable for it to be backed up monthly.
host-backup = jobCommonSettings {
patternFiles = [
config.sops.secrets."borg/repos/host/patterns/keys".path
];
repo = borgRepo "host";
passCommand = "cat ${config.sops.secrets."borg/repos/host/password".path}";
};
# Backups for various services.
services-backup = jobCommonSettings
{
paths = [
# ACME accounts and TLS certificates
"/var/lib/acme"
];
repo = borgRepo "services";
passCommand = "cat ${config.sops.secrets."borg/repos/services/password".path}";
} // { startAt = "daily"; };
};
programs.ssh.extraConfig = ''
Host ${hetzner-boxes-server}
IdentityFile ${config.sops.secrets."borg/ssh-key".path}
'';
};
}

View File

@ -1,347 +0,0 @@
# The DNS server for my domains. Take note it uses a hidden master setup with
# the secondary nameservers of the service (as of 2023-10-05, we're using
# Hetzner's secondary nameservers).
{ config, lib, pkgs, ... }:
let
inherit (config.networking) domain fqdn;
inherit (import ../hardware/networks.nix) interfaces clientNetworks serverNetworks secondaryNameServers;
secondaryNameServersIPs = lib.foldl'
(total: addresses: total ++ addresses.IPv4 ++ addresses.IPv6)
[ ]
(lib.attrValues secondaryNameServers);
domainZone = pkgs.substituteAll {
src = ../../config/dns/${domain}.zone;
ploverWANIPv4 = interfaces.wan.IPv4.address;
ploverWANIPv6 = interfaces.wan.IPv6.address;
};
fqdnZone = pkgs.substituteAll {
src = ../../config/dns/${fqdn}.zone;
ploverLANIPv4 = interfaces.lan.IPv4.address;
ploverLANIPv6 = interfaces.lan.IPv6.address;
};
zonesDir = "/etc/bind/zones";
zoneFile = domain: "${zonesDir}/${domain}.zone";
dnsSubdomain = "ns1.${domain}";
dnsOverHTTPSPort = 8443;
in
{
sops.secrets =
let
dnsFileAttribute = {
owner = config.users.users.named.name;
group = config.users.users.named.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"dns/${domain}/mailbox-security-key" = dnsFileAttribute;
"dns/${domain}/mailbox-security-key-record" = dnsFileAttribute;
"dns/${domain}/keybase-verification-key" = dnsFileAttribute;
"dns/${domain}/rfc2136-key" = dnsFileAttribute // {
reloadUnits = [ "bind.service" ];
};
};
# Install the utilities.
environment.systemPackages = [ config.services.bind.package ];
services.bind = {
enable = true;
forward = "first";
cacheNetworks = [
"127.0.0.1"
"::1"
];
listenOn = [
"127.0.0.1"
interfaces.lan.IPv4.address
interfaces.wan.IPv4.address
];
listenOnIpv6 = [
"::1"
interfaces.lan.IPv6.address
interfaces.wan.IPv6.address
];
# Welp, since the template is pretty limited, we'll have to go with our
# own. This is partially based from the NixOS Bind module except without
# the template for filling in zones since we use views.
configFile =
let
cfg = config.services.bind;
certDir = path: "/run/credentials/bind.service/${path}";
listenInterfaces = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOn;
listenInterfacesIpv6 = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6;
in
pkgs.writeText "named.conf" ''
include "/etc/bind/rndc.key";
include "${config.sops.secrets."dns/${domain}/rfc2136-key".path}";
controls {
inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
};
tls ${dnsSubdomain} {
key-file "${certDir "key.pem"}";
cert-file "${certDir "cert.pem"}";
dhparam-file "${config.security.dhparams.params.bind.path}";
ciphers "HIGH:!kRSA:!aNULL:!eNULL:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!SHA1:!SHA256:!SHA384";
prefer-server-ciphers yes;
session-tickets no;
};
http ${dnsSubdomain} {
endpoints { "/dns-query"; };
};
acl trusted { ${lib.concatStringsSep "; " (clientNetworks ++ serverNetworks)}; localhost; };
acl cachenetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
acl badnetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
options {
# Native DNS.
listen-on { ${listenInterfaces} };
listen-on-v6 { ${listenInterfacesIpv6} };
# DNS-over-TLS.
listen-on tls ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} { ${listenInterfacesIpv6} };
# DNS-over-HTTPS.
https-port ${builtins.toString dnsOverHTTPSPort};
listen-on tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfacesIpv6} };
allow-query { cachenetworks; };
blackhole { badnetworks; };
forward ${cfg.forward};
forwarders { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
directory "${cfg.directory}";
pid-file "/run/named/named.pid";
};
view internal {
match-clients { trusted; };
allow-query { any; };
allow-recursion { any; };
// We'll use systemd-resolved as our forwarder.
forwarders { 127.0.0.53 port 53; };
zone "${fqdn}" {
type primary;
file "${zoneFile fqdn}";
};
zone "${domain}" {
type primary;
file "${zoneFile domain}";
allow-transfer { ${lib.concatStringsSep "; " secondaryNameServersIPs}; };
update-policy {
grant rfc2136key.${domain}. zonesub TXT;
};
};
};
view external {
match-clients { any; };
forwarders { };
empty-zones-enable yes;
allow-query { any; };
allow-recursion { none; };
zone "${domain}" {
in-view internal;
};
};
${cfg.extraConfig}
'';
};
systemd.services.bind = {
path = with pkgs; [ replace-secret ];
preStart =
let
domainZone' = zoneFile domain;
fqdnZone' = zoneFile fqdn;
secretPath = path: config.sops.secrets."dns/${path}".path;
rndc = lib.getExe' config.services.bind.package "rndc";
in
lib.mkAfter ''
# Install the domain zone.
{
install -Dm0600 '${domainZone}' '${domainZone'}'
replace-secret '#mailboxSecurityKey#' '${secretPath "${domain}/mailbox-security-key"}' '${domainZone'}'
replace-secret '#mailboxSecurityKeyRecord#' '${secretPath "${domain}/mailbox-security-key-record"}' '${domainZone'}'
#${rndc} sync "${domain}" IN external
}
# Install the internal DNS zones.
install -Dm0600 '${fqdnZone}' '${fqdnZone'}'
'';
serviceConfig = {
# Additional service hardening. You can see most of the options from
# systemd.exec(5) manual. Run it as an unprivileged user.
User = config.users.users.named.name;
Group = config.users.users.named.group;
UMask = "0037";
# Get the credentials into the service.
LoadCredential =
let
certDirectory = config.security.acme.certs."${dnsSubdomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
LogFilterPatterns = [
# systemd-resolved doesn't have DNS cookie support, it seems.
"~missing expected cookie from 127.0.0.53#53"
];
# Lock and protect various system components.
LockPersonality = true;
PrivateTmp = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
ProtectHome = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
ProtectProc = "invisible";
# Make the filesystem invisible to the service.
ProtectSystem = "strict";
ReadWritePaths = [
config.services.bind.directory
"/etc/bind"
];
ReadOnlyPaths = [
config.security.dhparams.params.bind.path
config.security.acme.certs."${dnsSubdomain}".directory
];
# Set up writable directories.
RuntimeDirectory = "named";
RuntimeDirectoryMode = "0750";
CacheDirectory = "named";
CacheDirectoryMode = "0750";
ConfigurationDirectory = "bind";
ConfigurationDirectoryMode = "0755";
# Filtering system calls.
SystemCallFilter = [ "@system-service" ];
SystemCallErrorNumber = "EPERM";
SystemCallArchitectures = "native";
# Granting and restricting its capabilities. Take note we're not using
# syslog for this even if the application can so no syslog capability.
# Additionally, we're using omitting the program's ability to chroot and
# chown since the user and the directories are already configured.
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
# Restrict what address families can it access.
RestrictAddressFamilies = [
"AF_LOCAL"
"AF_NETLINK"
"AF_BRIDGE"
"AF_INET"
"AF_INET6"
];
# Restricting what namespaces it can create.
RestrictNamespaces = true;
};
};
# Set up the firewall. Take note the ports with the transport layer being
# accepted in Bind.
networking.firewall =
let
ports = [
53 # DNS
853 # DNS-over-TLS/DNS-over-QUIC
];
in
{
allowedUDPPorts = ports;
allowedTCPPorts = ports;
};
# Making this with nginx.
services.nginx.upstreams.local-dns = {
extraConfig = ''
zone dns 64k;
'';
servers = {
"127.0.0.1:${builtins.toString dnsOverHTTPSPort}" = { };
};
};
services.nginx.virtualHosts."${dnsSubdomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
extraConfig = ''
add_header Strict-Transport-Security max-age=31536000;
'';
kTLS = true;
locations = {
"/".return = "444";
"/dns-query".extraConfig = ''
grpc_pass grpcs://local-dns;
grpc_socket_keepalive on;
grpc_connect_timeout 10s;
grpc_ssl_verify off;
grpc_ssl_protocols TLSv1.3 TLSv1.2;
'';
};
};
services.nginx.streamConfig = ''
upstream dns_servers {
server localhost:53;
}
server {
listen 53 udp reuseport;
proxy_timeout 20s;
proxy_pass dns_servers;
}
'';
# Then generate a DH parameter for the application.
security.dhparams.params.bind.bits = 4096;
# Set up a fail2ban which is apparently already available in the package.
services.fail2ban.jails."named-refused".settings = {
enabled = true;
backend = "systemd";
filter = "named-refused[journalmatch='_SYSTEMD_UNIT=bind.service']";
maxretry = 3;
};
# Add the following to be backed up.
services.borgbackup.jobs.services-backup.paths = [ zonesDir ];
}

View File

@ -1,79 +0,0 @@
{ config, lib, pkgs, ... }:
let
# The head of the Borgbase hostname.
hetzner-boxes-user = "u332477";
hetzner-boxes-server = "${hetzner-boxes-user}.your-storagebox.de";
borgRepo = path: "ssh://${hetzner-boxes-user}@${hetzner-boxes-server}:23/./borg/plover/${path}";
jobCommonSettings = { patternFiles ? [ ], patterns ? [ ], paths ? [ ], repo, passCommand }: {
inherit paths repo;
compression = "zstd,11";
dateFormat = "+%F-%H-%M-%S-%z";
doInit = true;
encryption = {
inherit passCommand;
mode = "repokey-blake2";
};
extraCreateArgs =
let
args = lib.flatten [
(builtins.map
(patternFile: "--patterns-from ${lib.escapeShellArg patternFile}")
patternFiles)
(builtins.map
(pattern: "--pattern ${lib.escapeShellArg pattern}")
patterns)
];
in
lib.concatStringsSep " " args;
extraInitArgs = "--make-parent-dirs";
persistentTimer = true;
preHook = ''
extraCreateArgs="$extraCreateArgs --stats"
'';
prune.keep = {
weekly = 4;
monthly = 12;
yearly = 6;
};
startAt = "monthly";
environment.BORG_RSH = "ssh -i ${config.sops.secrets."borg/ssh-key".path}";
};
in
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"borg/repos/host/patterns/keys" = { };
"borg/repos/host/password" = { };
"borg/repos/services/password" = { };
"borg/ssh-key" = { };
};
services.borgbackup.jobs = {
# Backup for host-specific files. They don't change much so it is
# acceptable for it to be backed up monthly.
host-backup = jobCommonSettings {
patternFiles = [
config.sops.secrets."borg/repos/host/patterns/keys".path
];
repo = borgRepo "host";
passCommand = "cat ${config.sops.secrets."borg/repos/host/password".path}";
};
# Backups for various services.
services-backup = jobCommonSettings
{
paths = [
# ACME accounts and TLS certificates
"/var/lib/acme"
];
repo = borgRepo "services";
passCommand = "cat ${config.sops.secrets."borg/repos/services/password".path}";
} // { startAt = "daily"; };
};
programs.ssh.extraConfig = ''
Host ${hetzner-boxes-server}
IdentityFile ${config.sops.secrets."borg/ssh-key".path}
'';
}

View File

@ -0,0 +1,92 @@
# The database service of choice. Most services can use this so far
# (thankfully).
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.database;
postgresqlDomain = "postgres.${config.networking.domain}";
in
{
options.hosts.plover.services.database.enable = lib.mkEnableOption "preferred service SQL database";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
services.postgresql = {
enable = true;
package = pkgs.postgresql_16;
enableTCPIP = true;
# Create per-user schema as documented from Usage Patterns. This is to make
# use of the secure schema usage pattern they encouraged to do.
#
# Now, you just have to keep in mind about applications making use of them.
# Most of them should have the setting to set the schema to be used. If
# not, then screw them (or just file an issue and politely ask for the
# feature).
initialScript =
let
# This will be run once anyways so it is acceptable to create users
# "forcibly".
perUserSchemas = lib.lists.map
(user: ''
CREATE USER ${user.name};
CREATE SCHEMA AUTHORIZATION ${user.name};
'')
config.services.postgresql.ensureUsers;
in
pkgs.writeText "plover-initial-postgresql-script" ''
${lib.concatStringsSep "\n" perUserSchemas}
'';
settings =
let
credsDir = path: "/run/credentials/postgresql.service/${path}";
in
{
# Still doing the secure schema usage pattern.
search_path = ''"$user"'';
ssl_cert_file = credsDir "cert.pem";
ssl_key_file = credsDir "key.pem";
ssl_ca_file = credsDir "fullchain.pem";
};
};
# With a database comes a dumping.
services.postgresqlBackup = {
enable = true;
compression = "zstd";
compressionLevel = 11;
# Start at every 3 days starting from the first day of the month.
startAt = "*-*-1/3";
};
# Setting this up for TLS.
systemd.services.postgresql = {
requires = [ "acme-finished-${postgresqlDomain}.target" ];
serviceConfig.LoadCredential =
let
certDirectory = config.security.acme.certs."${postgresqlDomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
};
security.acme.certs."${postgresqlDomain}".postRun = ''
systemctl restart postgresql.service
'';
}
(lib.mkIf hostCfg.services.backup.enable {
# Add the dumps to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.postgresqlBackup.location ];
})
]);
}

View File

@ -0,0 +1,365 @@
# The DNS server for my domains. Take note it uses a hidden master setup with
# the secondary nameservers of the service (as of 2023-10-05, we're using
# Hetzner's secondary nameservers).
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.dns-server;
inherit (config.networking) domain fqdn;
inherit (import ../hardware/networks.nix) interfaces clientNetworks serverNetworks secondaryNameServers;
secondaryNameServersIPs = lib.foldl'
(total: addresses: total ++ addresses.IPv4 ++ addresses.IPv6)
[ ]
(lib.attrValues secondaryNameServers);
domainZone = pkgs.substituteAll {
src = ../../config/dns/${domain}.zone;
ploverWANIPv4 = interfaces.wan.IPv4.address;
ploverWANIPv6 = interfaces.wan.IPv6.address;
};
fqdnZone = pkgs.substituteAll {
src = ../../config/dns/${fqdn}.zone;
ploverLANIPv4 = interfaces.lan.IPv4.address;
ploverLANIPv6 = interfaces.lan.IPv6.address;
};
zonesDir = "/etc/bind/zones";
zoneFile = domain: "${zonesDir}/${domain}.zone";
dnsSubdomain = "ns1.${domain}";
dnsOverHTTPSPort = 8443;
in
{
options.hosts.plover.services.dns-server.enable = lib.mkEnableOption "preferred DNS server";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets =
let
dnsFileAttribute = {
owner = config.users.users.named.name;
group = config.users.users.named.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"dns/${domain}/mailbox-security-key" = dnsFileAttribute;
"dns/${domain}/mailbox-security-key-record" = dnsFileAttribute;
"dns/${domain}/keybase-verification-key" = dnsFileAttribute;
"dns/${domain}/rfc2136-key" = dnsFileAttribute // {
reloadUnits = [ "bind.service" ];
};
};
# Install the utilities.
environment.systemPackages = [ config.services.bind.package ];
services.bind = {
enable = true;
forward = "first";
cacheNetworks = [
"127.0.0.1"
"::1"
];
listenOn = [
"127.0.0.1"
interfaces.lan.IPv4.address
interfaces.wan.IPv4.address
];
listenOnIpv6 = [
"::1"
interfaces.lan.IPv6.address
interfaces.wan.IPv6.address
];
# Welp, since the template is pretty limited, we'll have to go with our
# own. This is partially based from the NixOS Bind module except without
# the template for filling in zones since we use views.
configFile =
let
cfg = config.services.bind;
certDir = path: "/run/credentials/bind.service/${path}";
listenInterfaces = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOn;
listenInterfacesIpv6 = lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6;
in
pkgs.writeText "named.conf" ''
include "/etc/bind/rndc.key";
include "${config.sops.secrets."dns/${domain}/rfc2136-key".path}";
controls {
inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
};
tls ${dnsSubdomain} {
key-file "${certDir "key.pem"}";
cert-file "${certDir "cert.pem"}";
dhparam-file "${config.security.dhparams.params.bind.path}";
ciphers "HIGH:!kRSA:!aNULL:!eNULL:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!SHA1:!SHA256:!SHA384";
prefer-server-ciphers yes;
session-tickets no;
};
http ${dnsSubdomain} {
endpoints { "/dns-query"; };
};
acl trusted { ${lib.concatStringsSep "; " (clientNetworks ++ serverNetworks)}; localhost; };
acl cachenetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
acl badnetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
options {
# Native DNS.
listen-on { ${listenInterfaces} };
listen-on-v6 { ${listenInterfacesIpv6} };
# DNS-over-TLS.
listen-on tls ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} { ${listenInterfacesIpv6} };
# DNS-over-HTTPS.
https-port ${builtins.toString dnsOverHTTPSPort};
listen-on tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfaces} };
listen-on-v6 tls ${dnsSubdomain} http ${dnsSubdomain} { ${listenInterfacesIpv6} };
allow-query { cachenetworks; };
blackhole { badnetworks; };
forward ${cfg.forward};
forwarders { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
directory "${cfg.directory}";
pid-file "/run/named/named.pid";
};
view internal {
match-clients { trusted; };
allow-query { any; };
allow-recursion { any; };
// We'll use systemd-resolved as our forwarder.
forwarders { 127.0.0.53 port 53; };
zone "${fqdn}" {
type primary;
file "${zoneFile fqdn}";
};
zone "${domain}" {
type primary;
file "${zoneFile domain}";
allow-transfer { ${lib.concatStringsSep "; " secondaryNameServersIPs}; };
update-policy {
grant rfc2136key.${domain}. zonesub TXT;
};
};
};
view external {
match-clients { any; };
forwarders { };
empty-zones-enable yes;
allow-query { any; };
allow-recursion { none; };
zone "${domain}" {
in-view internal;
};
};
${cfg.extraConfig}
'';
};
systemd.services.bind = {
path = with pkgs; [ replace-secret ];
preStart =
let
domainZone' = zoneFile domain;
fqdnZone' = zoneFile fqdn;
secretPath = path: config.sops.secrets."dns/${path}".path;
rndc = lib.getExe' config.services.bind.package "rndc";
in
lib.mkAfter ''
# Install the domain zone.
{
install -Dm0600 '${domainZone}' '${domainZone'}'
replace-secret '#mailboxSecurityKey#' '${secretPath "${domain}/mailbox-security-key"}' '${domainZone'}'
replace-secret '#mailboxSecurityKeyRecord#' '${secretPath "${domain}/mailbox-security-key-record"}' '${domainZone'}'
#${rndc} sync "${domain}" IN external
}
# Install the internal DNS zones.
install -Dm0600 '${fqdnZone}' '${fqdnZone'}'
'';
serviceConfig = {
# Additional service hardening. You can see most of the options from
# systemd.exec(5) manual. Run it as an unprivileged user.
User = config.users.users.named.name;
Group = config.users.users.named.group;
UMask = "0037";
# Get the credentials into the service.
LoadCredential =
let
certDirectory = config.security.acme.certs."${dnsSubdomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
LogFilterPatterns = [
# systemd-resolved doesn't have DNS cookie support, it seems.
"~missing expected cookie from 127.0.0.53#53"
];
# Lock and protect various system components.
LockPersonality = true;
PrivateTmp = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
ProtectHome = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
ProtectProc = "invisible";
# Make the filesystem invisible to the service.
ProtectSystem = "strict";
ReadWritePaths = [
config.services.bind.directory
"/etc/bind"
];
ReadOnlyPaths = [
config.security.dhparams.params.bind.path
config.security.acme.certs."${dnsSubdomain}".directory
];
# Set up writable directories.
RuntimeDirectory = "named";
RuntimeDirectoryMode = "0750";
CacheDirectory = "named";
CacheDirectoryMode = "0750";
ConfigurationDirectory = "bind";
ConfigurationDirectoryMode = "0755";
# Filtering system calls.
SystemCallFilter = [ "@system-service" ];
SystemCallErrorNumber = "EPERM";
SystemCallArchitectures = "native";
# Granting and restricting its capabilities. Take note we're not using
# syslog for this even if the application can so no syslog capability.
# Additionally, we're using omitting the program's ability to chroot and
# chown since the user and the directories are already configured.
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
# Restrict what address families can it access.
RestrictAddressFamilies = [
"AF_LOCAL"
"AF_NETLINK"
"AF_BRIDGE"
"AF_INET"
"AF_INET6"
];
# Restricting what namespaces it can create.
RestrictNamespaces = true;
};
};
# Then generate a DH parameter for the application.
security.dhparams.params.bind.bits = 4096;
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
# Making this with nginx.
services.nginx.upstreams.local-dns = {
extraConfig = ''
zone dns 64k;
'';
servers = {
"127.0.0.1:${builtins.toString dnsOverHTTPSPort}" = { };
};
};
services.nginx.virtualHosts."${dnsSubdomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
extraConfig = ''
add_header Strict-Transport-Security max-age=31536000;
'';
kTLS = true;
locations = {
"/".return = "444";
"/dns-query".extraConfig = ''
grpc_pass grpcs://local-dns;
grpc_socket_keepalive on;
grpc_connect_timeout 10s;
grpc_ssl_verify off;
grpc_ssl_protocols TLSv1.3 TLSv1.2;
'';
};
};
services.nginx.streamConfig = ''
upstream dns_servers {
server localhost:53;
}
server {
listen 53 udp reuseport;
proxy_timeout 20s;
proxy_pass dns_servers;
}
'';
})
# Set up the firewall. Take note the ports with the transport layer being
# accepted in Bind.
(lib.mkIf hostCfg.services.firewall.enable {
networking.firewall =
let
ports = [
53 # DNS
853 # DNS-over-TLS/DNS-over-QUIC
];
in
{
allowedUDPPorts = ports;
allowedTCPPorts = ports;
};
})
# Add the following to be backed up.
(lib.mkIf hostCfg.services.backup.enable {
services.borgbackup.jobs.services-backup.paths = [ zonesDir ];
})
# Set up a fail2ban which is apparently already available in the package.
(lib.mkIf hostCfg.services.fail2ban.enable {
services.fail2ban.jails."named-refused".settings = {
enabled = true;
backend = "systemd";
filter = "named-refused[journalmatch='_SYSTEMD_UNIT=bind.service']";
maxretry = 3;
};
})
]);
}

View File

@ -0,0 +1,29 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.fail2ban;
inherit (import ../hardware/networks.nix) interfaces;
in
{
options.hosts.plover.services.fail2ban.enable = lib.mkEnableOption "fail2ban monitoring";
config = lib.mkIf cfg.enable {
services.fail2ban = {
enable = true;
ignoreIP = [
# VPN clients.
"${interfaces.wireguard0.IPv4.address}/13"
"${interfaces.wireguard0.IPv6.address}/64"
];
# We're going to be unforgiving with this one since we only have key
# authentication and password authentication is disabled anyways.
jails.sshd.settings = {
enabled = true;
maxretry = 1;
};
};
};
}

View File

@ -0,0 +1,22 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.firewall;
in
{
options.hosts.plover.services.firewall.enable = lib.mkEnableOption "firewall setup";
config = lib.mkIf cfg.enable {
networking = {
nftables.enable = true;
domain = "foodogsquared.one";
firewall = {
enable = true;
allowedTCPPorts = [
22 # Secure Shells.
];
};
};
};
}

View File

@ -5,12 +5,18 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.gitea;
codeForgeDomain = "code.${config.networking.domain}"; codeForgeDomain = "code.${config.networking.domain}";
giteaUser = config.users.users."${config.services.gitea.user}".name; giteaUser = config.users.users."${config.services.gitea.user}".name;
giteaDatabaseUser = config.services.gitea.user; giteaDatabaseUser = config.services.gitea.user;
in in
{ {
options.hosts.plover.services.gitea.enable = lib.mkEnableOption "Gitea server for ${config.networking.domain}";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml { sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"gitea/db/password".owner = giteaUser; "gitea/db/password".owner = giteaUser;
"gitea/smtp/password".owner = giteaUser; "gitea/smtp/password".owner = giteaUser;
@ -125,6 +131,22 @@ in
| tail -n -3 | xargs rm | tail -n -3 | xargs rm
''; '';
# Customizing Gitea which you can see more details at
# https://docs.gitea.io/en-us/customizing-gitea/. We're just using
# systemd-tmpfiles to make this work which is pretty convenient.
systemd.tmpfiles.rules =
let
# To be used similarly to $GITEA_CUSTOM variable.
giteaCustomDir = config.services.gitea.customDir;
in
[
"L+ ${giteaCustomDir}/templates/home.tmpl - - - - ${../../files/gitea/home.tmpl}"
"L+ ${giteaCustomDir}/public/img/logo.svg - - - - ${../../files/gitea/logo.svg}"
"L+ ${giteaCustomDir}/public/img/logo.png - - - - ${../../files/gitea/logo.png}"
];
}
(lib.mkIf hostCfg.services.database.enable {
# Making sure this plays nicely with the database service of choice. Take # Making sure this plays nicely with the database service of choice. Take
# note, we're mainly using secure schema usage pattern here as described from # note, we're mainly using secure schema usage pattern here as described from
# the PostgreSQL documentation at # the PostgreSQL documentation at
@ -163,7 +185,9 @@ in
'') '')
]; ];
}; };
})
(lib.mkIf hostCfg.services.reverse-proxy.enable {
# Attaching it altogether with the reverse proxy of choice. # Attaching it altogether with the reverse proxy of choice.
services.nginx.virtualHosts."${codeForgeDomain}" = { services.nginx.virtualHosts."${codeForgeDomain}" = {
forceSSL = true; forceSSL = true;
@ -186,7 +210,9 @@ in
"localhost:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}" = { }; "localhost:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}" = { };
}; };
}; };
})
(lib.mkIf hostCfg.services.fail2ban.enable {
# Configuring fail2ban for this service which thankfully has a dedicated page # Configuring fail2ban for this service which thankfully has a dedicated page
# at https://docs.gitea.io/en-us/fail2ban-setup/. # at https://docs.gitea.io/en-us/fail2ban-setup/.
services.fail2ban.jails = { services.fail2ban.jails = {
@ -210,21 +236,11 @@ in
ignoreregex = ignoreregex =
''; '';
}; };
})
# Customizing Gitea which you can see more details at (lib.mkIf hostCfg.services.backup.enable {
# https://docs.gitea.io/en-us/customizing-gitea/. We're just using
# systemd-tmpfiles to make this work which is pretty convenient.
systemd.tmpfiles.rules =
let
# To be used similarly to $GITEA_CUSTOM variable.
giteaCustomDir = config.services.gitea.customDir;
in
[
"L+ ${giteaCustomDir}/templates/home.tmpl - - - - ${../../files/gitea/home.tmpl}"
"L+ ${giteaCustomDir}/public/img/logo.svg - - - - ${../../files/gitea/logo.svg}"
"L+ ${giteaCustomDir}/public/img/logo.png - - - - ${../../files/gitea/logo.png}"
];
# Add the following files to be backed up. # Add the following files to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.gitea.dump.backupDir ]; services.borgbackup.jobs.services-backup.paths = [ config.services.gitea.dump.backupDir ];
})
]);
} }

View File

@ -1,6 +1,9 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.grafana;
monitoringDomain = "monitoring.${config.networking.domain}"; monitoringDomain = "monitoring.${config.networking.domain}";
grafanaDatabaseUser = config.services.grafana.settings.database.user; grafanaDatabaseUser = config.services.grafana.settings.database.user;
grafanaDatabaseName = config.services.grafana.settings.database.name; grafanaDatabaseName = config.services.grafana.settings.database.name;
@ -12,6 +15,23 @@ let
vouchSettings = config.services.vouch-proxy.instances."${vouchDomain}".settings; vouchSettings = config.services.vouch-proxy.instances."${vouchDomain}".settings;
in in
{ {
options.hosts.plover.services.grafana.enable = lib.mkEnableOption "monitoring dashboard for ${config.networking.hostName}";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets =
let
grafanaFileAttributes = {
owner = config.users.users.grafana.name;
group = config.users.users.grafana.group;
mode = "0400";
};
in
lib.getSecrets ../../secrets/secrets.yaml {
"grafana/database/password" = grafanaFileAttributes;
"grafana/users/admin/password" = grafanaFileAttributes;
};
services.grafana = { services.grafana = {
enable = true; enable = true;
@ -22,17 +42,6 @@ in
login_maximum_lifetime_duration = "14d"; login_maximum_lifetime_duration = "14d";
}; };
"auth.generic_oauth" = {
api_url = authSubpath "oauth2/authorise";
client_id = "grafana";
client_secret = "$__file{${config.sops.secrets."vouch-proxy/client/secret".path}";
enabled = true;
name = "Kanidm";
oauth_url = authSubpath "ui/oauth2";
scopes = lib.concatStringsSep " " [ "openid" "email" "profile" ];
token_url = authSubpath "oauth2/token";
};
database = rec { database = rec {
host = "127.0.0.1:${builtins.toString config.services.postgresql.port}"; host = "127.0.0.1:${builtins.toString config.services.postgresql.port}";
password = "$__file{${config.sops.secrets."grafana/database/password".path}}"; password = "$__file{${config.sops.secrets."grafana/database/password".path}}";
@ -73,7 +82,9 @@ in
}; };
}; };
}; };
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${monitoringDomain}" = { services.nginx.virtualHosts."${monitoringDomain}" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
@ -125,6 +136,9 @@ in
}; };
}; };
})
(lib.mkIf hostCfg.services.database.enable {
# Setting up with secure schema usage pattern. # Setting up with secure schema usage pattern.
systemd.services.grafana = { systemd.services.grafana = {
preStart = preStart =
@ -148,17 +162,19 @@ in
}; };
}]; }];
}; };
})
sops.secrets = (lib.mkIf hostCfg.services.vouch-proxy.enable {
let services.grafana.settings."auth.generic_oauth" = {
grafanaFileAttributes = { api_url = authSubpath "oauth2/authorise";
owner = config.users.users.grafana.name; client_id = "grafana";
group = config.users.users.grafana.group; client_secret = "$__file{${config.sops.secrets."vouch-proxy/client/secret".path}";
mode = "0400"; enabled = true;
}; name = "Kanidm";
in oauth_url = authSubpath "ui/oauth2";
lib.getSecrets ../../secrets/secrets.yaml { scopes = lib.concatStringsSep " " [ "openid" "email" "profile" ];
"grafana/database/password" = grafanaFileAttributes; token_url = authSubpath "oauth2/token";
"grafana/users/admin/password" = grafanaFileAttributes;
}; };
})
]);
} }

View File

@ -0,0 +1,78 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.idm;
authDomain = "auth.${config.networking.domain}";
port = 9443;
certsDir = config.security.acme.certs."${authDomain}".directory;
backupsDir = "/var/lib/kanidm/backups";
in
{
options.hosts.plover.services.idm.enable = lib.mkEnableOption "preferred IDM server";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
hosts.plover.services.vouch-proxy.enable = lib.mkDefault true;
services.kanidm = {
enableServer = true;
serverSettings = {
domain = authDomain;
origin = "https://${authDomain}:${builtins.toString port}";
bindaddress = "127.0.0.1:${builtins.toString port}";
ldapbindaddress = "127.0.0.1:3636";
role = "WriteReplica";
trust_x_forward_for = true;
tls_chain = "${certsDir}/fullchain.pem";
tls_key = "${certsDir}/key.pem";
online_backup = {
path = backupsDir;
schedule = "0 0 * * *";
};
};
};
# The kanidm Nix module already sets the certificates directory to be
# read-only with systemd so no need for it though we may need to set the
# backups directory.
systemd.services.kanidm = {
preStart = lib.mkBefore ''
mkdir -p "${backupsDir}"
'';
serviceConfig = {
SupplementaryGroups = [ config.security.acme.certs."${authDomain}".group ];
};
};
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${authDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/".proxyPass = "https://kanidm";
};
services.nginx.upstreams."kanidm" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString port}" = { };
};
};
})
(lib.mkIf hostCfg.services.backup.enable {
# Add the following to be backed up.
services.borgbackup.jobs.services-backup.paths = [ backupsDir ];
})
]);
}

View File

@ -1,63 +0,0 @@
{ config, lib, pkgs, ... }:
let
authDomain = "auth.${config.networking.domain}";
port = 9443;
certsDir = config.security.acme.certs."${authDomain}".directory;
backupsDir = "/var/lib/kanidm/backups";
in
{
services.kanidm = {
enableServer = true;
serverSettings = {
domain = authDomain;
origin = "https://${authDomain}:${builtins.toString port}";
bindaddress = "127.0.0.1:${builtins.toString port}";
ldapbindaddress = "127.0.0.1:3636";
role = "WriteReplica";
trust_x_forward_for = true;
tls_chain = "${certsDir}/fullchain.pem";
tls_key = "${certsDir}/key.pem";
online_backup = {
path = backupsDir;
schedule = "0 0 * * *";
};
};
};
# The kanidm Nix module already sets the certificates directory to be
# read-only with systemd so no need for it though we may need to set the
# backups directory.
systemd.services.kanidm = {
preStart = lib.mkBefore ''
mkdir -p "${backupsDir}"
'';
serviceConfig = {
SupplementaryGroups = [ config.security.acme.certs."${authDomain}".group ];
};
};
services.nginx.virtualHosts."${authDomain}" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
kTLS = true;
locations."/".proxyPass = "https://kanidm";
};
services.nginx.upstreams."kanidm" = {
extraConfig = ''
zone services;
'';
servers = {
"localhost:${builtins.toString port}" = { };
};
};
# Add the following to be backed up.
services.borgbackup.jobs.services-backup.paths = [ backupsDir ];
}

View File

@ -0,0 +1,52 @@
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.monitoring;
bindStatsPort = 8053;
prometheusExports = config.services.prometheus.exporters;
in
{
options.hosts.plover.services.monitoring.enable = lib.mkEnableOption "preferred monitoring stack";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
services.prometheus = {
enable = true;
exporters = {
bind = {
enable = true;
bindURI = "http://127.0.0.1/${builtins.toString bindStatsPort}";
};
nginx.enable = true;
nginxlog.enable = true;
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [{
targets = [ "127.0.0.1:${builtins.toString prometheusExports.node.port}" ];
}];
}
];
};
# Requiring this for Prometheus being able to monitor my services.
services.nginx.statusPage = true;
services.bind.extraConfig = ''
statistics-channels {
inet 127.0.0.1 port ${builtins.toString bindStatsPort} allow { 127.0.0.1; };
};
'';
}
]);
}

View File

@ -1,74 +0,0 @@
# The reverse proxy of choice. Logs should be rotated weekly.
{ config, lib, pkgs, ... }:
{
# The main server where it will tie all of the services in one neat little
# place. Take note, the virtual hosts definition are all in their respective
# modules.
services.nginx = {
enable = true;
enableReload = true;
package = pkgs.nginxMainline;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
# Some more server-sided compressions.
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedZstdSettings = true;
proxyCachePath.apps = {
enable = true;
keysZoneName = "apps";
};
appendConfig = ''
worker_processes auto;
'';
# We're avoiding any service to be the default server especially that it
# could be used for enter a service with unencrypted HTTP. So we're setting
# up one with an unresponsive server response.
appendHttpConfig = ''
# https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/
proxy_cache_min_uses 5;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_no_cache $http_pragma $http_authorization;
server {
listen 80 default_server;
listen [::]:80 default_server;
return 444;
}
'';
# This is defined for other services.
upstreams."nginx" = {
extraConfig = ''
zone services 64k;
'';
servers = {
"localhost:80" = { };
};
};
};
networking.firewall.allowedTCPPorts = [
80 # HTTP servers.
443 # HTTPS servers.
];
# Some fail2ban policies to apply for nginx.
services.fail2ban.jails = {
nginx-http-auth.settings = { enabled = true; };
nginx-botsearch.settings = { enabled = true; };
nginx-bad-request.settings = { enabled = true; };
};
# Generate a DH parameters for nginx-specific security configurations.
security.dhparams.params.nginx.bits = 4096;
}

View File

@ -1,81 +0,0 @@
# The database service of choice. Most services can use this so far
# (thankfully).
{ config, lib, pkgs, ... }:
let
postgresqlDomain = "postgres.${config.networking.domain}";
in
{
services.postgresql = {
enable = true;
package = pkgs.postgresql_15;
enableTCPIP = true;
# Create per-user schema as documented from Usage Patterns. This is to make
# use of the secure schema usage pattern they encouraged to do.
#
# Now, you just have to keep in mind about applications making use of them.
# Most of them should have the setting to set the schema to be used. If
# not, then screw them (or just file an issue and politely ask for the
# feature).
initialScript =
let
# This will be run once anyways so it is acceptable to create users
# "forcibly".
perUserSchemas = lib.lists.map
(user: ''
CREATE USER ${user.name};
CREATE SCHEMA AUTHORIZATION ${user.name};
'')
config.services.postgresql.ensureUsers;
in
pkgs.writeText "plover-initial-postgresql-script" ''
${lib.concatStringsSep "\n" perUserSchemas}
'';
settings =
let
credsDir = path: "/run/credentials/postgresql.service/${path}";
in
{
# Still doing the secure schema usage pattern.
search_path = ''"$user"'';
ssl_cert_file = credsDir "cert.pem";
ssl_key_file = credsDir "key.pem";
ssl_ca_file = credsDir "fullchain.pem";
};
};
# With a database comes a dumping.
services.postgresqlBackup = {
enable = true;
compression = "zstd";
compressionLevel = 11;
# Start at every 3 days starting from the first day of the month.
startAt = "*-*-1/3";
};
# Setting this up for TLS.
systemd.services.postgresql = {
requires = [ "acme-finished-${postgresqlDomain}.target" ];
serviceConfig.LoadCredential =
let
certDirectory = config.security.acme.certs."${postgresqlDomain}".directory;
certCredentialPath = path: "${path}:${certDirectory}/${path}";
in
[
(certCredentialPath "cert.pem")
(certCredentialPath "key.pem")
(certCredentialPath "fullchain.pem")
];
};
security.acme.certs."${postgresqlDomain}".postRun = ''
systemctl restart postgresql.service
'';
# Add the dumps to be backed up.
services.borgbackup.jobs.services-backup.paths = [ config.services.postgresqlBackup.location ];
}

View File

@ -1,43 +0,0 @@
{ config, lib, pkgs, ... }:
let
bindStatsPort = 8053;
prometheusExports = config.services.prometheus.exporters;
in
{
services.prometheus = {
enable = true;
exporters = {
bind = {
enable = true;
bindURI = "http://127.0.0.1/${builtins.toString bindStatsPort}";
};
nginx.enable = true;
nginxlog.enable = true;
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [{
targets = [ "127.0.0.1:${builtins.toString prometheusExports.node.port}" ];
}];
}
];
};
# Requiring this for Prometheus being able to monitor my services.
services.nginx.statusPage = true;
services.bind.extraConfig = ''
statistics-channels {
inet 127.0.0.1 port ${builtins.toString bindStatsPort} allow { 127.0.0.1; };
};
'';
}

View File

@ -0,0 +1,86 @@
# The reverse proxy of choice. Logs should be rotated weekly.
{ config, lib, pkgs, ... }:
let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.reverse-proxy;
in
{
options.hosts.plover.services.reverse-proxy.enable = lib.mkEnableOption "preferred public-facing reverse proxy";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
# The main server where it will tie all of the services in one neat little
# place. Take note, the virtual hosts definition are all in their respective
# modules.
services.nginx = {
enable = true;
enableReload = true;
package = pkgs.nginxMainline;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
# Some more server-sided compressions.
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedZstdSettings = true;
proxyCachePath.apps = {
enable = true;
keysZoneName = "apps";
};
appendConfig = ''
worker_processes auto;
'';
# We're avoiding any service to be the default server especially that it
# could be used for enter a service with unencrypted HTTP. So we're setting
# up one with an unresponsive server response.
appendHttpConfig = ''
# https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/
proxy_cache_min_uses 5;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_no_cache $http_pragma $http_authorization;
server {
listen 80 default_server;
listen [::]:80 default_server;
return 444;
}
'';
# This is defined for other services.
upstreams."nginx" = {
extraConfig = ''
zone services 64k;
'';
servers = {
"localhost:80" = { };
};
};
};
networking.firewall.allowedTCPPorts = [
80 # HTTP servers.
443 # HTTPS servers.
];
# Generate a DH parameters for nginx-specific security configurations.
security.dhparams.params.nginx.bits = 4096;
}
(lib.mkIf config.profiles.server.enable {
# Some fail2ban policies to apply for nginx.
services.fail2ban.jails = {
nginx-http-auth.settings = { enabled = true; };
nginx-botsearch.settings = { enabled = true; };
nginx-bad-request.settings = { enabled = true; };
};
})
]);
}

View File

@ -3,6 +3,9 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.vaultwarden;
passwordManagerDomain = "pass.${config.networking.domain}"; passwordManagerDomain = "pass.${config.networking.domain}";
# This should be set from service module from nixpkgs. # This should be set from service module from nixpkgs.
@ -12,13 +15,16 @@ let
vaultwardenDbName = "vaultwarden"; vaultwardenDbName = "vaultwarden";
in in
{ {
options.hosts.plover.services.vaultwarden.enable = lib.mkEnableOption "Vaultwarden instance";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets = lib.getSecrets ../../secrets/secrets.yaml { sops.secrets = lib.getSecrets ../../secrets/secrets.yaml {
"vaultwarden/env".owner = vaultwardenUser; "vaultwarden/env".owner = vaultwardenUser;
}; };
services.vaultwarden = { services.vaultwarden = {
enable = true; enable = true;
dbBackend = "postgresql";
environmentFile = config.sops.secrets."vaultwarden/env".path; environmentFile = config.sops.secrets."vaultwarden/env".path;
config = { config = {
DOMAIN = "https://${passwordManagerDomain}"; DOMAIN = "https://${passwordManagerDomain}";
@ -49,9 +55,6 @@ in
# Enabling web vault with whatever nixpkgs comes in. # Enabling web vault with whatever nixpkgs comes in.
WEB_VAULT_ENABLED = true; WEB_VAULT_ENABLED = true;
# Databasifications...
DATABASE_URL = "postgresql://${vaultwardenUser}@/${vaultwardenDbName}";
# Mailer service configuration (except the user and password). # Mailer service configuration (except the user and password).
SMTP_HOST = "smtp.sendgrid.net"; SMTP_HOST = "smtp.sendgrid.net";
SMTP_PORT = 587; SMTP_PORT = 587;
@ -60,30 +63,10 @@ in
}; };
}; };
services.postgresql = {
ensureDatabases = [ vaultwardenDbName ];
ensureUsers = [{
name = vaultwardenUser;
ensurePermissions = {
"DATABASE ${vaultwardenDbName}" = "ALL PRIVILEGES";
"SCHEMA ${vaultwardenDbName}" = "ALL PRIVILEGES";
};
}];
};
systemd.services.vaultwarden = {
path = [ config.services.postgresql.package ];
# Making it comply with PostgreSQL secure schema usage pattern.
preStart = lib.mkAfter ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
psql -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='${vaultwardenUser}';" \
| grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${vaultwardenUser};"
'';
# We do a little service hardening. Even though the Vaultwarden NixOS # We do a little service hardening. Even though the Vaultwarden NixOS
# module is already doing some of those things, we'll just add some of # module is already doing some of those things, we'll just add some of
# them. # them.
systemd.services.vaultwarden = {
serviceConfig = lib.mkAfter { serviceConfig = lib.mkAfter {
LockPersonality = true; LockPersonality = true;
NoNewPrivileges = true; NoNewPrivileges = true;
@ -126,8 +109,38 @@ in
RestrictNamespaces = true; RestrictNamespaces = true;
}; };
}; };
}
# Attaching it to our reverse proxy of choice. (lib.mkIf hostCfg.services.database.enable {
services.vaultwarden = {
dbBackend = "postgresql";
config.DATABASE_URL = "postgresql://${vaultwardenUser}@/${vaultwardenDbName}";
};
services.postgresql = {
ensureDatabases = [ vaultwardenDbName ];
ensureUsers = [{
name = vaultwardenUser;
ensurePermissions = {
"DATABASE ${vaultwardenDbName}" = "ALL PRIVILEGES";
"SCHEMA ${vaultwardenDbName}" = "ALL PRIVILEGES";
};
}];
};
systemd.services.vaultwarden = {
path = [ config.services.postgresql.package ];
# Making it comply with PostgreSQL secure schema usage pattern.
preStart = lib.mkAfter ''
# Setting up the appropriate schema for PostgreSQL secure schema usage.
psql -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='${vaultwardenUser}';" \
| grep -q 1 || psql -tAc "CREATE SCHEMA IF NOT EXISTS AUTHORIZATION ${vaultwardenUser};"
'';
};
})
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${passwordManagerDomain}" = { services.nginx.virtualHosts."${passwordManagerDomain}" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
@ -172,7 +185,14 @@ in
"${address}:${builtins.toString port}" = { }; "${address}:${builtins.toString port}" = { };
}; };
}; };
})
(lib.mkIf hostCfg.services.backup.enable {
# Add the data directory to be backed up.
services.borgbackup.jobs.services-backup.paths = [ "/var/lib/bitwarden_rs" ];
})
(lib.mkIf hostCfg.services.fail2ban.enable {
# Configuring fail2ban for this service which thankfully has a dedicated page # Configuring fail2ban for this service which thankfully has a dedicated page
# at https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup. # at https://github.com/dani-garcia/vaultwarden/wiki/Fail2Ban-Setup.
services.fail2ban.jails = { services.fail2ban.jails = {
@ -216,7 +236,6 @@ in
ignoreregex = ignoreregex =
''; '';
}; };
})
# Add the data directory to be backed up. ]);
services.borgbackup.jobs.services-backup.paths = [ "/var/lib/bitwarden_rs" ];
} }

View File

@ -1,11 +1,18 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.vouch-proxy;
inherit (config.services.vouch-proxy.instances."${vouchDomain}") settings; inherit (config.services.vouch-proxy.instances."${vouchDomain}") settings;
vouchDomain = "vouch.${config.networking.domain}"; vouchDomain = "vouch.${config.networking.domain}";
authDomain = config.services.kanidm.serverSettings.domain; authDomain = config.services.kanidm.serverSettings.domain;
in in
{ {
options.hosts.plover.services.vouch-proxy.enable = lib.mkEnableOption "Vouch proxy setup";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
sops.secrets = let sops.secrets = let
vouchPermissions = rec { vouchPermissions = rec {
owner = "vouch-proxy"; owner = "vouch-proxy";
@ -41,7 +48,9 @@ in
}; };
}; };
}; };
}
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.virtualHosts."${vouchDomain}" = { services.nginx.virtualHosts."${vouchDomain}" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
@ -64,4 +73,6 @@ in
"${settings.vouch.listen}:${builtins.toString settings.vouch.port}" = { }; "${settings.vouch.listen}:${builtins.toString settings.vouch.port}" = { };
}; };
}; };
})
]);
} }

View File

@ -2,6 +2,9 @@
# We're setting up Wezterm mux server with TLS domains. # We're setting up Wezterm mux server with TLS domains.
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.wezterm-mux-server;
weztermDomain = "mux.${config.networking.domain}"; weztermDomain = "mux.${config.networking.domain}";
port = 9801; port = 9801;
listenAddress = "localhost:${builtins.toString port}"; listenAddress = "localhost:${builtins.toString port}";
@ -12,6 +15,10 @@ let
}; };
in in
{ {
options.hosts.plover.services.wezterm-mux-server.enable = lib.mkEnableOption "Wezterm mux server setup";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
services.wezterm-mux-server = { services.wezterm-mux-server = {
enable = true; enable = true;
inherit configFile; inherit configFile;
@ -37,7 +44,10 @@ in
security.acme.certs."${weztermDomain}".postRun = '' security.acme.certs."${weztermDomain}".postRun = ''
systemctl restart wezterm-mux-server.service systemctl restart wezterm-mux-server.service
''; '';
}
# TODO: where mux.foodogsquared.one setup
(lib.mkIf hostCfg.services.reverse-proxy.enable {
services.nginx.streamConfig = '' services.nginx.streamConfig = ''
upstream wezterm { upstream wezterm {
server ${listenAddress}; server ${listenAddress};
@ -48,4 +58,6 @@ in
proxy_pass wezterm; proxy_pass wezterm;
} }
''; '';
})
]);
} }

View File

@ -3,6 +3,9 @@
# Take note this service is heavily based on the hardware networking setup of # Take note this service is heavily based on the hardware networking setup of
# this host so better stay focused on the hardware configuration on this host. # this host so better stay focused on the hardware configuration on this host.
let let
hostCfg = config.hosts.plover;
cfg = hostCfg.services.wireguard;
inherit (import ../hardware/networks.nix) interfaces wireguardPort wireguardPeers; inherit (import ../hardware/networks.nix) interfaces wireguardPort wireguardPeers;
wireguardIFName = interfaces.wireguard0.ifname; wireguardIFName = interfaces.wireguard0.ifname;
@ -11,6 +14,10 @@ let
phonePeerAddresses = with wireguardPeers.phone; [ "${IPv4}/32" "${IPv6}/128" ]; phonePeerAddresses = with wireguardPeers.phone; [ "${IPv4}/32" "${IPv6}/128" ];
in in
{ {
options.hosts.plover.services.wireguard.enable = lib.mkEnableOption "Wireguard VPN setup";
config = lib.mkIf cfg.enable (lib.mkMerge [
{
environment.systemPackages = [ pkgs.wireguard-tools ]; environment.systemPackages = [ pkgs.wireguard-tools ];
sops.secrets = sops.secrets =
@ -27,30 +34,6 @@ in
"wireguard/preshared-keys/phone" = systemdNetworkdPermission; "wireguard/preshared-keys/phone" = systemdNetworkdPermission;
}; };
networking.firewall = {
# Allow the UDP traffic for the Wireguard service.
allowedUDPPorts = [ wireguardPort ];
# IP forwarding for specific interfaces.
filterForward = true;
extraForwardRules = ''
iifname ${wireguardIFName} accept comment "IP forward from Wireguard interface to LAN"
'';
};
networking.nftables.ruleset = ''
table ip wireguard-${wireguardIFName} {
chain prerouting {
type nat hook prerouting priority filter; policy accept;
}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
iifname ${wireguardIFName} snat to ${interfaces.lan.IPv4.address} comment "Make packets from Wireguard interface appear as coming from the LAN interface"
}
}
'';
# Since we're using systemd-networkd to configure interfaces, we can control # Since we're using systemd-networkd to configure interfaces, we can control
# how each interface can handle things such as IP masquerading so no need for # how each interface can handle things such as IP masquerading so no need for
# modifying sysctl settings like 'ipv4.ip_forward' or similar. # modifying sysctl settings like 'ipv4.ip_forward' or similar.
@ -102,4 +85,32 @@ in
]; ];
}; };
}; };
}
(lib.mkIf hostCfg.services.firewall.enable {
networking.firewall = {
# Allow the UDP traffic for the Wireguard service.
allowedUDPPorts = [ wireguardPort ];
# IP forwarding for specific interfaces.
filterForward = true;
extraForwardRules = ''
iifname ${wireguardIFName} accept comment "IP forward from Wireguard interface to LAN"
'';
};
networking.nftables.ruleset = ''
table ip wireguard-${wireguardIFName} {
chain prerouting {
type nat hook prerouting priority filter; policy accept;
}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
iifname ${wireguardIFName} snat to ${interfaces.lan.IPv4.address} comment "Make packets from Wireguard interface appear as coming from the LAN interface"
}
}
'';
})
]);
} }