This commit is contained in:
nora 2025-08-03 00:41:37 +02:00
parent f456a5c626
commit 0949cba7be
92 changed files with 19 additions and 58 deletions

62
nix/modules/backup/backup.sh Executable file
View file

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
time="$(date --iso-8601=s --utc)"
echo "Starting backup procedure with time=$time"
dir=$(mktemp -d)
echo "Setting workdir to $dir"
cd "$dir"
export HOME="$dir"
# Delete the temporary directory afterwards.
# Yes, this variable should expand now.
# shellcheck disable=SC2064
trap "rm -rf $dir" EXIT
echo "Logging into garage"
export MC_CONFIG_DIR="$dir"
mc alias set garage "$S3_ENDPOINT" "$S3_ACCESS_KEY" "$S3_SECRET_KEY" --api S3v4
mc ls garage/backups
files=$(jq -c '.files[]' "$CONFIG_FILE")
pg_dumps=$(jq -c '.pg_dumps[]' "$CONFIG_FILE")
echo "$files"
echo "$pg_dumps"
IFS=$'\n'
for file_config in $files; do
filepath=$(echo "$file_config" | jq -r ".file")
app=$(echo "$file_config" | jq -r ".app")
echo "Backing up app $app FILE $filepath..."
tmppath="$dir/file"
xz < "$filepath" > "$tmppath"
echo "Uplading file"
mc put "$tmppath" "garage/$S3_BUCKET/$app/$time/$(basename "$filepath").xz"
echo "Uploaded file"
done
for pg_config in $pg_dumps; do
app=$(echo "$pg_config" | jq -r ".app")
containerName=$(echo "$pg_config" | jq -r ".containerName")
dbName=$(echo "$pg_config" | jq -r ".dbName")
userName=$(echo "$pg_config" | jq -r ".userName")
echo "Backing up app $app POSTGRES $containerName/$dbName..."
tmppath="$dir/file"
podman exec "$containerName" pg_dump --format=custom --file /tmp/db.bak \
--host "127.0.0.1" --dbname "$dbName" --username "$userName"
podman cp "$containerName:/tmp/db.bak" "$tmppath"
xz -f "$tmppath" > "$tmppath.xz"
echo "Uplading file"
mc put "$tmppath.xz" "garage/$S3_BUCKET/$app/$time/$dbName.bak.xz"
echo "Uploaded file"
podman exec "$containerName" rm "/tmp/db.bak"
done

View file

@ -0,0 +1,83 @@
{ config, lib, pkgs, ... }: with lib;
let
jobOptions = { ... }: {
options = {
app = mkOption {
type = types.str;
description = "The app name, used as the directory in the bucket";
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
};
file = mkOption {
type = types.nullOr types.str;
default = null;
};
pgDump = mkOption {
type = types.nullOr (types.submodule ({ ... }: {
options = {
containerName = mkOption {
type = types.str;
};
dbName = mkOption {
type = types.str;
};
userName = mkOption {
type = types.str;
};
};
}));
default = null;
};
#mongo_dump = { };
};
};
in
{
options.services.custom-backup = {
jobs = mkOption {
default = [ ];
type = types.listOf (types.submodule jobOptions);
description = "Backup jobs to execute";
};
};
config =
let
cfg = config.services.custom-backup;
backupConfig = {
files = builtins.map (job: { app = job.app; file = job.file; })
(builtins.filter (job: job.file != null) cfg.jobs);
pg_dumps = builtins.map (job: { app = job.app; } // job.pgDump)
(builtins.filter (job: job.pgDump != null) cfg.jobs);
};
backupScript = pkgs.writeShellApplication {
name = "backup";
runtimeInputs = with pkgs; [ podman jq minio-client getent xz ];
text = builtins.readFile ./backup.sh;
};
in
{
age.secrets.backup_s3_secret.file = ../../secrets/backup_s3_secret.age;
systemd.services.custom-backup = {
startAt = "daily";
serviceConfig = {
# TODO: can we use a dynamic user?
#DynamicUser = true;
ExecStart = "${backupScript}/bin/backup";
Environment = [
"CONFIG_FILE=${pkgs.writeText "backup-config.json" (builtins.toJSON backupConfig)}"
"S3_BUCKET=backups"
"S3_ENDPOINT=http://localhost:3900"
];
EnvironmentFile = (builtins.filter (file: file != null)
(builtins.map (job: job.environmentFile) cfg.jobs)) ++ [
config.age.secrets.backup_s3_secret.path
];
};
};
};
}

View file

@ -0,0 +1,59 @@
{
email noratrieb@proton.me
auto_https disable_redirects
storage s3 {
host "localhost:3900"
bucket "caddy-store"
# access_id ENV S3_ACCESS_ID
# secret_key ENV S3_SECRET_KEY
insecure true
}
servers {
metrics
}
log default {
output stdout
format json
}
}
# https://gist.github.com/ryanburnette/d13575c9ced201e73f8169d3a793c1a3
(cors) {
@cors_preflight{args[0]} method OPTIONS
@cors{args[0]} header Origin {args[0]}
handle @cors_preflight{args[0]} {
header {
Access-Control-Allow-Origin "{args[0]}"
Access-Control-Allow-Methods "GET, POST, PUT, PATCH, DELETE, OPTIONS"
Access-Control-Allow-Credentials "false"
Access-Control-Allow-Headers "${args[1]}"
Access-Control-Max-Age "86400"
defer
}
respond "" 204
}
handle @cors{args[0]} {
header {
Access-Control-Allow-Origin "{args[0]}"
Access-Control-Expose-Headers *
defer
}
}
}
http:// {
log
respond "This is an HTTPS-only server, silly you. Go to https:// instead." 418
}
# HTTP
:9010 {
log
metrics /metrics
}

View file

@ -0,0 +1,116 @@
# Copied from https://github.com/NixOS/nixpkgs/pull/259275 and updated.
{ lib
, buildGoModule
, fetchFromGitHub
, gnused
, nixosTests
, caddy
, stdenv
, testers
, installShellFiles
, externalPlugins ? [ ]
, vendorHash ? "sha256-1Api8bBZJ1/oYk4ZGIiwWCSraLzK9L+hsKXkFtk6iVM="
}:
let
attrsToModules = attrs:
builtins.map ({ name, repo, version }: "${repo}") attrs;
attrsToSources = attrs:
builtins.map ({ name, repo, version }: "${repo}@${version}") attrs;
in
buildGoModule rec {
pname = "caddy";
version = "2.8.4";
dist = fetchFromGitHub {
owner = "caddyserver";
repo = "dist";
rev = "v${version}";
hash = "sha256-O4s7PhSUTXoNEIi+zYASx8AgClMC5rs7se863G6w+l0=";
};
src = fetchFromGitHub {
owner = "caddyserver";
repo = "caddy";
rev = "v${version}";
hash = "sha256-CBfyqtWp3gYsYwaIxbfXO3AYaBiM7LutLC7uZgYXfkQ=";
};
inherit vendorHash;
subPackages = [ "cmd/caddy" ];
ldflags = [
"-s"
"-w"
"-X github.com/caddyserver/caddy/v2.CustomVersion=${version}"
];
# matches upstream since v2.8.0
tags = [ "nobadger" ];
nativeBuildInputs = [ gnused installShellFiles ];
modBuildPhase = ''
for module in ${builtins.toString (attrsToModules externalPlugins)}; do
sed -i "/standard/a _ \"$module\"" ./cmd/caddy/main.go
done
for plugin in ${builtins.toString (attrsToSources externalPlugins)}; do
go get $plugin
done
go generate
go mod vendor
'';
modInstallPhase = ''
mv -t vendor go.mod go.sum
cp -r --reflink=auto vendor "$out"
'';
preBuild = ''
chmod -R u+w vendor
[ -f vendor/go.mod ] && mv -t . vendor/go.{mod,sum}
go generate
for module in ${builtins.toString (attrsToModules externalPlugins)}; do
sed -i "/standard/a _ \"$module\"" ./cmd/caddy/main.go
done
'';
postInstall = ''
install -Dm644 ${dist}/init/caddy.service ${dist}/init/caddy-api.service -t $out/lib/systemd/system
substituteInPlace $out/lib/systemd/system/caddy.service \
--replace-fail "/usr/bin/caddy" "$out/bin/caddy"
substituteInPlace $out/lib/systemd/system/caddy-api.service \
--replace-fail "/usr/bin/caddy" "$out/bin/caddy"
'' + lib.optionalString (stdenv.buildPlatform.canExecute stdenv.hostPlatform) ''
# Generating man pages and completions fail on cross-compilation
# https://github.com/NixOS/nixpkgs/issues/308283
$out/bin/caddy manpage --directory manpages
installManPage manpages/*
installShellCompletion --cmd caddy \
--bash <($out/bin/caddy completion bash) \
--fish <($out/bin/caddy completion fish) \
--zsh <($out/bin/caddy completion zsh)
'';
passthru.tests = {
inherit (nixosTests) caddy;
version = testers.testVersion {
command = "${caddy}/bin/caddy version";
package = caddy;
};
};
meta = with lib; {
homepage = "https://caddyserver.com";
description = "Fast and extensible multi-platform HTTP/1-2-3 web server with automatic HTTPS";
license = licenses.asl20;
mainProgram = "caddy";
maintainers = with maintainers; [ Br1ght0ne emilylange techknowlogick ];
};
}

View file

@ -0,0 +1,13 @@
{ pkgs, lib, name, src ? null, ... }: pkgs.stdenv.mkDerivation {
inherit name src;
buildInputs = with pkgs; [ python311 python311Packages.zstandard python311Packages.brotli ];
buildPhase = ''
mkdir -p $out
cp -r $src/* $out/
chmod -R +w $out
${lib.getExe pkgs.python311} ${./prepare.py} $out
chmod -R -w $out
'';
}

View file

@ -0,0 +1,60 @@
import os
import sys
import gzip
import brotli
import zstandard
import hashlib
def usage():
print("usage: prepare.py [SRC]")
def write_etag(path, content):
shasum = hashlib.sha256(content)
etag_path = path+".sha256"
with open(etag_path, "w") as f:
print(f"Writing ETag {etag_path}")
f.write(f'"{shasum.hexdigest()}"')
def main():
if len(sys.argv) < 2:
usage()
exit(1)
src_dir = sys.argv[1]
for root, dirs, files in os.walk(src_dir):
for file in files:
path = os.path.join(root, file)
# Ignore etags
if path.endswith(".sha256") or path.endswith(".b3sum"):
continue
# Ignore already compressed files
if path.endswith(".gz") or path.endswith(".zst") or path.endswith(".br"):
continue
with open(path, "rb") as f:
content = f.read()
compressions = [
(".gz", gzip),
(".zst", zstandard),
(".br", brotli),
]
for ext, alg in compressions:
new_path = path+ext
with open(new_path, "wb") as out:
print(f"Writing {new_path}")
compressed = alg.compress(content)
out.write(compressed)
write_etag(new_path, compressed)
write_etag(path, content)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,14 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>nora's server</title>
</head>
<body>
<h1>congrats, you landed on my server (100% NixOS) directly!?</h1>
<p>sorry, but there isn't anything cool here. this is <b>my</b> infra, you are not allowed here.</p>
<p>if you do want to be allowed here, then uh.. still no.</p>
<p>:3</p>
</body>
</html>

View file

@ -0,0 +1,95 @@
{ pkgs, config, lib, name, website, slides, blog, ... }:
let
caddy = pkgs.callPackage ./caddy-build.nix {
externalPlugins = [
{
name = "certmagic-s3";
repo = "github.com/noratrieb-mirrors/certmagic-s3";
version = "e48519f95173e982767cbb881d49335b6a00a599";
}
];
vendorHash = "sha256-KP9bYitM/Pocw4DxOXPVBigWh4IykNf8yKJiBlTFZmI=";
};
in
{
environment.systemPackages = [ caddy ];
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 9010 ]; # metrics
networking.firewall = {
allowedTCPPorts = [
80 # HTTP
443 # HTTPS
];
allowedUDPPorts = [
443 # HTTP/3 via QUIC
];
};
age.secrets.caddy_s3_key_secret.file = ../../secrets/caddy_s3_key_secret.age;
systemd.services.caddy.serviceConfig.EnvironmentFile = config.age.secrets.caddy_s3_key_secret.path;
systemd.services.caddy.after = [ "garage.service" ]; # the cert store depends on garage
services.caddy = {
enable = true;
package = caddy;
configFile = pkgs.writeTextFile {
name = "Caddyfile";
text = (
builtins.readFile ./base.Caddyfile +
''
${config.networking.hostName}.infra.noratrieb.dev {
log
encode zstd gzip
header -Last-Modified
root * ${import ./caddy-static-prepare {
name = "debugging-page";
src = ./debugging-page;
inherit pkgs lib;
}}
file_server {
etag_file_extensions .sha256
precompressed zstd gzip br
}
}
${
if name == "vps1" || name == "vps3" || name == "vps4" then ''
noratrieb.dev {
log
encode zstd gzip
header -Last-Modified
root * ${import ./caddy-static-prepare {
name = "website";
src = website { inherit pkgs slides blog; };
inherit pkgs lib;
}}
file_server {
etag_file_extensions .sha256
precompressed zstd gzip br
}
}
files.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:3902
}
'' else ""
}
${
if name == "vps1" || name == "vps3" || name == "vps4" then
builtins.readFile ./${name}.Caddyfile else ""
}
''
);
checkPhase = ''
${lib.getExe caddy} --version
${lib.getExe caddy} validate --adapter=caddyfile --config=$out
'';
};
};
}

View file

@ -0,0 +1,119 @@
www.noratrieb.dev {
log
redir https://noratrieb.dev{uri} permanent
}
uptime.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5010
}
hugo-chat.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5002
}
api.hugo-chat.noratrieb.dev {
log
import cors https://hugo-chat.noratrieb.dev "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type"
encode zstd gzip
reverse_proxy * localhost:5001
}
bisect-rustc.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5005
}
docker.noratrieb.dev {
log
reverse_proxy * localhost:5000
}
git.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5015
}
olat.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5011
}
# unsure if necessary... something was misconfigured in the past here...
olat.noratrieb.dev:8088 {
log
encode zstd gzip
reverse_proxy * localhost:5011
}
upload.files.noratrieb.dev {
log
encode zstd gzip
# we need HTTP/2 here because the server doesn't work with HTTP/1.1
# because it will send early 401 responses during the upload without consuming the body
reverse_proxy * h2c://localhost:3050
}
################################################################
# redirects
blog.noratrieb.dev {
log
redir https://noratrieb.dev/blog{uri} permanent
}
nilstrieb.dev {
log
redir https://noratrieb.dev{uri} permanent
}
www.nilstrieb.dev {
log
redir https://noratrieb.dev{uri} permanent
}
blog.nilstrieb.dev {
log
redir https://noratrieb.dev/blog{uri} permanent
}
bisect-rustc.nilstrieb.dev {
log
redir https://bisect-rustc.dev/blog{uri} permanent
}
docker.nilstrieb.dev {
log
redir https://docker.noratrieb.dev{uri} permanent
}
hugo-chat.nilstrieb.dev {
log
redir https://hugo-chat.noratrieb.dev{uri} permanent
}
api.hugo-chat.nilstrieb.dev {
log
redir https://api.hugo-chat.noratrieb.dev{uri} permanent
}
uptime.nilstrieb.dev {
log
redir https://uptime.noratrieb.dev{uri} permanent
}
olat.nilstrieb.dev {
log
redir https://olat.noratrieb.dev{uri} permanent
}
olat.nilstrieb.dev:8088 {
log
redir https://olat.noratrieb.dev{uri} permanent
}

View file

@ -0,0 +1,5 @@
grafana.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:3000
}

View file

@ -0,0 +1,5 @@
does-it-build.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:3000
}

View file

@ -0,0 +1,9 @@
# Default settings for Contabo VPS.
{ ... }: {
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { device = "/dev/sda3"; fsType = "ext4"; };
deployment.tags = [ "contabo" ];
}

View file

@ -0,0 +1,163 @@
{ pkgs, lib, config, name, pretense, quotdd, nixpkgs-path, ... }: {
deployment.targetHost = "${config.networking.hostName}.infra.noratrieb.dev";
imports = [
"${builtins.fetchTarball "https://github.com/ryantm/agenix/archive/de96bd907d5fbc3b14fc33ad37d1b9a3cb15edc6.tar.gz"}/modules/age.nix" # main 2024-07-26
];
nix = {
nixPath = [ "nixpkgs=${nixpkgs-path}" ];
};
environment.systemPackages = with pkgs; [
vim
wget
curl
traceroute
dnsutils
nftables
];
networking.hostName = name;
time.timeZone = "Europe/Zurich";
users.users.root.openssh.authorizedKeys.keys = [ ''ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG0n1ikUG9rYqobh7WpAyXrqZqxQoQ2zNJrFPj12gTpP nilsh@PC-Nils'' ];
boot.tmp.cleanOnBoot = true;
zramSwap.enable = true;
services.openssh = {
enable = true;
openFirewall = true;
banner = "meoooooow!! 😼 :3\n";
hostKeys = [
{
path = "/etc/ssh/ssh_host_ed25519_key";
type = "ed25519";
}
{
# P256
path = "/etc/ssh/ssh_host_ecdsa_key";
type = "ecdsa";
}
{
bits = 4096;
path = "/etc/ssh/ssh_host_rsa_key";
type = "rsa";
}
];
settings = {
PasswordAuthentication = false;
};
};
services.fail2ban = {
enable = true;
};
system.nixos.distroName = "NixOS (gay 🏳)";
systemd.services.pretense = {
description = "pretense connection logger";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe (pretense {inherit pkgs;})}";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
Environment = [
"PRETENSE_PORTS=23,3306,5432,1521" # telnet,mysql,postgres,oracle
"PRETENSE_METRICS_PORT=9150"
];
};
};
systemd.services.quotdd = {
description = "quotdd Quote of The Day Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe (quotdd {inherit pkgs;})}";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
Environment = [ ];
};
};
networking.firewall.allowedTCPPorts = [
23 # telnet, pretense
3306 # mysql, pretense
5432 # postgres, pretense
1521 # oracle, pretense
17 # quote of the day, quotdd
];
# monitoring
networking.firewall.interfaces.wg0.allowedTCPPorts = [
9100 # node exporter
9150 # pretense exporter
];
services.prometheus.exporters = {
node = {
enable = true;
};
};
services.promtail = {
enable = true;
configuration = {
server = {
disable = true;
};
clients = [
{
url = "http://vps3.local:3100/loki/api/v1/push";
}
];
scrape_configs = [
{
job_name = "journal";
journal = {
max_age = "24h";
labels = {
job = "systemd-journal";
node = name;
};
};
pipeline_stages = [{
match = {
selector = "{unit = \"sshd.service\"} |= \"Invalid user\"";
stages = [
{ regex = { expression = "Invalid user.*from (?P<ip>.*) port.*"; }; }
{
geoip = {
db = pkgs.fetchurl
{
# Note: You cannot use this for your own usage, this is only for me.
url = "https://github.com/noratrieb-mirrors/maxmind-geoip/releases/download/20240922/GeoLite2-City.mmdb";
sha256 = "sha256-xRGf2JEaEHpxEkIq3jJnZv49lTisFbygbjxiIZHIThg=";
};
source = "ip";
db_type = "city";
};
}
];
};
}];
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal__hostname" ];
target_label = "host";
}
{
source_labels = [ "__journal_priority_keyword" ];
target_label = "severity";
regex = "(.+)";
}
];
}
];
};
};
}

View file

@ -0,0 +1,48 @@
{ pkgs, lib, networkingConfig, ... }: {
# get the package for the debugging tools
environment.systemPackages = with pkgs; [ knot-dns ];
networking.firewall.allowedUDPPorts = [
53
];
nixpkgs.overlays = [
(final: prev: {
nix-dns = import (pkgs.fetchFromGitHub {
owner = "nix-community";
repo = "dns.nix";
rev = "v1.1.2";
hash = "sha256-EHiDP2jEa7Ai5ZwIf5uld9RVFcV77+2SUxjQXwJsJa0=";
});
})
];
services.knot = {
enable = true;
settingsFile = pkgs.writeTextFile {
name = "knot.conf";
text = ''
server:
listen: 0.0.0.0@53
listen: ::@53
zone:
- domain: noratrieb.dev
storage: /var/lib/knot/zones/
file: ${import ./noratrieb.dev.nix { inherit pkgs lib networkingConfig; }}
- domain: nilstrieb.dev
storage: /var/lib/knot/zones/
file: ${import ./nilstrieb.dev.nix { inherit pkgs lib networkingConfig; }}
log:
- target: syslog
any: info
'';
};
};
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 9433 ]; # metrics
services.prometheus.exporters.knot = {
enable = true;
port = 9433;
};
}

View file

@ -0,0 +1,66 @@
# https://github.com/nix-community/dns.nix
{ pkgs, lib, networkingConfig, ... }:
let
data = with pkgs.nix-dns.lib.combinators;
let
hour1 = 3600;
hostsToDns = builtins.mapAttrs
(name: { publicIPv4, publicIPv6, ... }:
lib.optionalAttrs (publicIPv4 != null) { A = [ (a publicIPv4) ]; } //
lib.optionalAttrs (publicIPv6 != null) { AAAA = [ (aaaa publicIPv6) ]; })
networkingConfig;
vps2 = {
A = [ "184.174.32.252" ];
};
in
with hostsToDns;
# point nilstrieb.dev to vps1 (retired)
vps1 // {
TTL = hour1;
SOA = {
nameServer = "ns1.nilstrieb.dev.";
adminEmail = "void@nilstrieb.dev";
serial = 2024072601;
};
CAA = [
{ issuerCritical = false; tag = "issue"; value = "letsencrypt.org"; }
{ issuerCritical = false; tag = "issue"; value = "sectigo.com"; }
];
NS = [
"ns1.nilstrieb.dev."
"ns2.nilstrieb.dev."
];
subdomains = {
ns1 = dns1;
ns2 = dns2;
localhost.A = [ (a "127.0.0.1") ];
# --- retired:
bisect-rustc = vps1;
blog = vps1;
docker = vps1;
www = vps1;
uptime = vps1;
hugo-chat = vps1 // {
subdomains.api = vps1;
};
olat = vps1;
# ---
# infra (legacy)
inherit vps2;
pronouns.TXT = [
"she/her"
];
};
};
in
pkgs.writeTextFile {
name = "nilstrieb.dev.zone";
text = pkgs.nix-dns.lib.toString "nilstrieb.dev" data;
}

View file

@ -0,0 +1,108 @@
# https://github.com/nix-community/dns.nix
{ pkgs, lib, networkingConfig, ... }:
let
data = with pkgs.nix-dns.lib.combinators;
let
hour1 = 3600;
hostsToDns = builtins.mapAttrs
(name: { publicIPv4, publicIPv6, ... }:
lib.optionalAttrs (publicIPv4 != null) { A = [ (a publicIPv4) ]; } //
lib.optionalAttrs (publicIPv6 != null) { AAAA = [ (aaaa publicIPv6) ]; })
networkingConfig;
combine = hosts: {
A = lib.lists.flatten (map (host: if builtins.hasAttr "A" host then host.A else [ ]) hosts);
AAAA = lib.lists.flatten (map (host: if builtins.hasAttr "AAAA" host then host.AAAA else [ ]) hosts);
};
in
with hostsToDns;
# vps{1,3,4} contains root noratrieb.dev
combine [ vps1 vps3 vps4 ] // {
TTL = hour1;
SOA = {
nameServer = "ns1.noratrieb.dev.";
adminEmail = "void@noratrieb.dev";
serial = 2024072601;
};
NS = [
"ns1.noratrieb.dev."
"ns2.noratrieb.dev."
];
CAA = [
{ issuerCritical = false; tag = "issue"; value = "letsencrypt.org"; }
{ issuerCritical = false; tag = "issue"; value = "sectigo.com"; }
];
TXT = [
"protonmail-verification=09106d260e40df267109be219d9c7b2759e808b5"
"v=spf1 include:_spf.protonmail.ch ~all"
];
MX = [
(mx.mx 10 "mail.protonmail.ch.")
(mx.mx 20 "mailsec.protonmail.ch.")
];
subdomains = {
# --- NS records
ns1 = dns1;
ns2 = dns2;
# --- website stuff
blog = vps1;
www = vps1;
files = combine [ vps1 vps3 vps4 ] // {
subdomains = {
upload = vps1;
};
};
# --- apps
bisect-rustc = vps1;
docker = vps1;
hugo-chat = vps1 // {
subdomains.api = vps1;
};
uptime = vps1;
does-it-build = vps4;
git = vps1;
olat = vps1;
std.CNAME = [ (cname "noratrieb.github.io.") ];
# --- fun shit
localhost.A = [ (a "127.0.0.1") ];
newtest.TXT = [ "uwu it works" ];
pronouns.TXT = [
"she/her"
];
sshhoneypot = vps5;
# --- infra
grafana = vps3;
infra.subdomains = hostsToDns;
# --- other verification
_discord.TXT = [ "dh=e0f7e99c70c4ce17f7afcce3be8bfda9cd363843" ];
_atproto.TXT = [ "did=did:plc:pqyzoyxk7gfcbxk65mjyncyl" ];
# --- email
_domainkey.subdomains = {
protonmail.CNAME = [ (cname "protonmail.domainkey.deenxxi4ieo32na6brazky2h7bt5ezko6vexdbvbzzbtj6oj43kca.domains.proton.ch.") ];
protonmail2.CNAME = [ (cname "protonmail2.domainkey.deenxxi4ieo32na6brazky2h7bt5ezko6vexdbvbzzbtj6oj43kca.domains.proton.ch.") ];
protonmail3.CNAME = [ (cname "protonmail3.domainkey.deenxxi4ieo32na6brazky2h7bt5ezko6vexdbvbzzbtj6oj43kca.domains.proton.ch.") ];
};
_dmarc.TXT = [
"v=DMARC1; p=quarantine"
];
};
};
in
pkgs.writeTextFile
{
name = "noratrieb.dev.zone";
text = pkgs.nix-dns.lib.toString "noratrieb.dev" data;
}

View file

@ -0,0 +1,34 @@
# garage
## layout
- co-ka -> Contabo Karlsruhe
- co-du -> Contabo Düsseldorf
- he-nu -> Hetzner Nürnberg
## buckets
- `caddy-store`: Store for Caddy webservers
- key `caddy` RW
- `docker-registry`
- key `docker-registry` RW
- `loki`
- key `loki` RW
- `backups`
- key `backups` RW
- `forgejo`
- key `forgejo` RW
- `files.noratrieb.dev`
- key `upload-files` RW
## keys
- `caddy`: `GK25e33d4ba20d54231e513b80`
- `docker-registry`: `GK48011ee5b5ccbaf4233c0e40`
- `loki`: `GK84ffae2a0728abff0f96667b`
- `backups`: `GK8cb8454a6f650326562bff2f`
- `forgejo`: `GKc8bfd905eb7f85980ffe84c9`
- `upload-files`: `GK607464882f6e29fb31e0f553`
- `admin`: `GKaead6cf5340e54a4a19d9490`
- RW permissions on ~every bucket

View file

@ -0,0 +1,49 @@
{ config, pkgs, name, ... }: {
age.secrets.garage_secrets.file = ../../secrets/garage_secrets.age;
environment.systemPackages = with pkgs; [
minio-client
];
networking.firewall.interfaces.wg0.allowedTCPPorts = [
3901 # RPC
3903 # admin for metrics
];
services.garage = {
enable = true;
package = pkgs.garage_1_1_0;
settings = {
metadata_dir = "/var/lib/garage/meta";
data_dir = "/var/lib/garage/data";
db_engine = "sqlite";
metadata_auto_snapshot_interval = "6h";
replication_factor = 3;
# arbitrary, but a bit higher as disk space matters more than time. she says, cluelessly.
compression-level = 5;
rpc_bind_addr = "[::]:3901";
rpc_public_addr = "${name}.local:3901";
s3_api = {
s3_region = "garage";
api_bind_addr = "[::]:3900";
root_domain = ".s3.garage.localhost";
};
s3_web = {
bind_addr = "[::]:3902";
root_domain = ".web.garage.localhost";
index = "index.html";
};
admin = {
api_bind_addr = "[::]:3903";
};
};
environmentFile = config.age.secrets.garage_secrets.path;
};
}

View file

@ -0,0 +1,8 @@
{ ... }: {
virtualisation.podman = {
enable = true;
};
# https://github.com/NixOS/nixpkgs/issues/226365
networking.firewall.interfaces."podman+".allowedUDPPorts = [ 53 5353 ];
age.secrets.docker_registry_password.file = ../../secrets/docker_registry_password.age;
}

View file

@ -0,0 +1,165 @@
{ config, lib, ... }: {
services.prometheus = {
enable = true;
globalConfig = { };
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{ targets = [ "localhost:9090" ]; }
];
}
{
job_name = "node";
static_configs = [
{ targets = [ "dns1.local:9100" ]; }
{ targets = [ "dns2.local:9100" ]; }
{ targets = [ "vps1.local:9100" ]; }
{ targets = [ "vps2.local:9100" ]; }
{ targets = [ "vps3.local:9100" ]; }
{ targets = [ "vps4.local:9100" ]; }
{ targets = [ "vps5.local:9100" ]; }
];
}
{
job_name = "caddy";
static_configs = [
{ targets = [ "vps1.local:9010" ]; }
{ targets = [ "vps2.local:9010" ]; }
{ targets = [ "vps3.local:9010" ]; }
{ targets = [ "vps4.local:9010" ]; }
{ targets = [ "vps5.local:9010" ]; }
];
}
{
job_name = "docker-registry";
static_configs = [
{ targets = [ "vps1.local:9011" ]; }
];
}
{
job_name = "garage";
static_configs = [
{ targets = [ "vps1.local:3903" ]; }
{ targets = [ "vps2.local:3903" ]; }
{ targets = [ "vps3.local:3903" ]; }
{ targets = [ "vps4.local:3903" ]; }
{ targets = [ "vps5.local:3903" ]; }
];
}
{
job_name = "knot";
static_configs = [
{ targets = [ "dns1.local:9433" ]; }
{ targets = [ "dns2.local:9433" ]; }
];
}
{
job_name = "pretense";
static_configs = [
{ targets = [ "dns1.local:9150" ]; }
{ targets = [ "dns2.local:9150" ]; }
{ targets = [ "vps1.local:9150" ]; }
{ targets = [ "vps2.local:9150" ]; }
{ targets = [ "vps3.local:9150" ]; }
{ targets = [ "vps4.local:9150" ]; }
{ targets = [ "vps5.local:9150" ]; }
];
}
];
};
age.secrets.grafana_admin_password.file = ../../secrets/grafana_admin_password.age;
systemd.services.grafana.serviceConfig.EnvironmentFile = config.age.secrets.grafana_admin_password.path;
services.grafana = {
enable = true;
settings = {
security = {
admin_user = "admin";
};
server = {
root_url = "https://grafana.noratrieb.dev";
};
};
provision = {
enable = true;
datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "Prometheus";
type = "prometheus";
access = "proxy";
url = "http://vps3.local:9090";
jsonData = {
httpMethod = "POST";
prometheusType = "Prometheus";
};
}
{
name = "loki";
type = "loki";
access = "proxy";
url = "http://vps3.local:3100";
}
];
};
};
};
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 3100 ]; # loki
age.secrets.loki_env.file = ../../secrets/loki_env.age;
systemd.services.loki.serviceConfig.EnvironmentFile = config.age.secrets.loki_env.path;
services.loki = {
enable = true;
extraFlags = [ "-config.expand-env=true" /*"-print-config-stderr"*/ ];
configuration = {
auth_enabled = false;
server = {
http_listen_port = 3100;
};
common = {
ring = {
instance_addr = "127.0.0.1";
kvstore.store = "inmemory";
};
replication_factor = 1;
path_prefix = "/var/lib/loki";
};
schema_config = {
configs = [
{
from = "2020-05-15";
store = "tsdb";
object_store = "s3";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
};
storage_config = {
tsdb_shipper = {
active_index_directory = "/var/lib/loki/index";
cache_location = "/var/lib/loki/cache";
};
aws = {
access_key_id = "\${ACCESS_KEY}";
secret_access_key = "\${SECRET_KEY}";
endpoint = "127.0.0.1:3900";
s3forcepathstyle = true;
region = "garage";
insecure = true;
s3 = "s3://\${ACCESS_KEY}:\${SECRET_KEY}@127.0.0.1:3900/loki";
};
};
};
};
system.activationScripts.makeLokiDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/loki/{index,cache}
chown ${config.services.loki.user}:${config.services.loki.group} -R /var/lib/loki
'';
}

View file

@ -0,0 +1,58 @@
{ config, lib, ... }: {
age.secrets = {
registry_htpasswd = {
file = ../../secrets/registry_htpasswd.age;
owner = config.users.users.docker-registry.name;
};
registry_s3_key_secret = {
file = ../../secrets/registry_s3_key_secret.age;
owner = config.users.users.docker-registry.name;
};
};
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 9011 ]; # metrics
systemd.services.docker-registry.serviceConfig.EnvironmentFile = config.age.secrets.registry_s3_key_secret.path;
services.dockerRegistry = {
enable = true;
storagePath = null;
port = 5000;
extraConfig = {
log = {
accesslog.disabled = false;
level = "info";
formatter = "text";
fields.service = "registry";
};
redis = lib.mkForce null;
storage = {
s3 = {
regionendpoint = "http://127.0.0.1:3900";
forcepathstyle = true; # ensure it doesn't try docker-registry.127.0.0.1 as the host
region = "garage";
bucket = "docker-registry";
# accesskey = ""; ENV REGISTRY_STORAGE_S3_ACCESSKEY
# secretkey = ""; ENV REGISTRY_STORAGE_S3_SECRETKEY
secure = false;
};
redirect.disable = true;
};
http = {
host = "https://docker.noratrieb.dev";
draintimeout = "60s";
debug = {
addr = ":9011";
prometheus = {
enabled = true;
path = "/metrics";
};
};
};
auth.htpasswd = {
# TODO: ugh :(
realm = "nilstrieb-registry";
path = config.age.secrets.registry_htpasswd.path;
};
};
};
}

View file

@ -0,0 +1,49 @@
{ name, config, networkingConfig, ... }:
let
wgSettings = (builtins.getAttr name networkingConfig).wg;
listenPort = 51820;
in
{
# Map from $HOST.local to the private IP.
networking.hosts =
let
hostsEntries = map
(host:
let hostConfig = builtins.getAttr host networkingConfig; in
if builtins.hasAttr "wg" hostConfig then {
name = hostConfig.wg.privateIP;
value = [ "${host}.local" ];
} else null)
(builtins.attrNames networkingConfig);
wgHostEntries = builtins.filter (entry: entry != null) hostsEntries;
in
builtins.listToAttrs wgHostEntries;
networking.firewall.allowedUDPPorts = [
listenPort
];
age.secrets.wg_private.file = ../../secrets/wg_private_${name}.age;
networking.wg-quick.interfaces = {
wg0 = {
address = [ "${wgSettings.privateIP}/24" ];
inherit listenPort;
privateKeyFile = config.age.secrets.wg_private.path;
peers = map
(peer:
let peerConfig = (builtins.getAttr peer networkingConfig).wg;
in {
inherit (peerConfig) publicKey;
endpoint = "${peer}.infra.noratrieb.dev:${toString listenPort}";
allowedIPs = [ "${peerConfig.privateIP}/32" ];
# sometimes there's some weirdness....??
persistentKeepalive = 25;
}
)
wgSettings.peers;
};
};
deployment.tags = [ "wg-mesh" ];
}