This commit is contained in:
nora 2025-08-03 00:41:37 +02:00
parent f456a5c626
commit 0949cba7be
92 changed files with 19 additions and 58 deletions

View file

@ -0,0 +1,35 @@
{ config, lib, ... }:
let
dockerLogin = {
registry = "docker.noratrieb.dev";
username = "nils";
passwordFile = config.age.secrets.docker_registry_password.path;
};
in
{
virtualisation.oci-containers.containers = {
cargo-bisect-rustc-service = {
image = "docker.noratrieb.dev/cargo-bisect-rustc-service:316a4044";
volumes = [
"/var/lib/cargo-bisect-rustc-service:/data"
];
environment = {
SQLITE_DB = "/data/db.sqlite";
};
ports = [ "127.0.0.1:5005:4000" ];
login = dockerLogin;
};
};
services.custom-backup.jobs = [
{
app = "cargo-bisect-rustc-service";
file = "/var/lib/cargo-bisect-rustc-service/db.sqlite";
}
];
system.activationScripts.makeCargoBisectRustcServiceDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/cargo-bisect-rustc-service/
chmod ugo+w /var/lib/cargo-bisect-rustc-service/
'';
}

View file

@ -0,0 +1,43 @@
{ pkgs, lib, does-it-build, my-projects-versions, ... }:
let
does-it-build-base = does-it-build { inherit pkgs; };
does-it-build-with-commit = does-it-build-base.overrideAttrs (finalAttrs: previousAttrs: {
DOES_IT_BUILD_OVERRIDE_VERSION = my-projects-versions.does-it-build;
});
in
{
systemd.services.does-it-build = {
description = "https://github.com/Noratrieb/does-it-build";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = with pkgs; [ rustup gcc bash ];
serviceConfig = {
User = "does-it-build";
Group = "does-it-build";
ExecStart = "${lib.getExe' (does-it-build-with-commit) "does-it-build" }";
Environment = "DB_PATH=/var/lib/does-it-build/db.sqlite";
};
};
services.custom-backup.jobs = [
{
app = "does-it-build";
file = "/var/lib/does-it-build/db.sqlite";
}
];
users.users.does-it-build = {
isSystemUser = true;
home = "/var/lib/does-it-build";
description = "does-it-build builder account";
group = "does-it-build";
};
users.groups.does-it-build = { };
# TODO: i feel like there's gotta be a better way to do the chown..
system.activationScripts.makeDoesItBuildDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/does-it-build/
chown does-it-build:does-it-build /var/lib/does-it-build/
'';
}

View file

@ -0,0 +1,50 @@
{ config, ... }: {
age.secrets.forgejo_s3_key_secret.file = ../../secrets/forgejo_s3_key_secret.age;
services.forgejo = {
enable = true;
database = {
type = "sqlite3";
};
lfs.enable = false;
settings = {
DEFAULT = {
APP_NAME = "this forge meows";
APP_SLOGAN = "this forge meows";
};
server = rec {
DOMAIN = "git.noratrieb.dev";
ROOT_URL = "https://${DOMAIN}/";
HTTP_PORT = 5015;
};
service = {
DISABLE_REGISTRATION = true;
};
storage = {
STORAGE_TYPE = "minio";
MINIO_ENDPOINT = "127.0.0.1:3900";
MINIO_ACCESS_KEY_ID = "GKc8bfd905eb7f85980ffe84c9";
MINIO_BUCKET = "forgejo";
MINIO_BUCKET_LOOKUP = "auto";
MINIO_LOCATION = "garage";
MINIO_USE_SSL = false;
};
};
secrets = {
storage = {
MINIO_SECRET_ACCESS_KEY = config.age.secrets.forgejo_s3_key_secret.path;
};
};
};
services.custom-backup.jobs = [{
app = "forgejo";
file = "/var/lib/forgejo/data/forgejo.db";
}];
}

View file

@ -0,0 +1,67 @@
{ config, lib, pkgs, ... }:
let
dockerLogin = {
registry = "docker.noratrieb.dev";
username = "nils";
passwordFile = config.age.secrets.docker_registry_password.path;
};
in
{
age.secrets.hugochat_db_password.file = ../../secrets/hugochat_db_password.age;
virtualisation.oci-containers.containers = {
hugo-chat-client = {
image = "docker.noratrieb.dev/hugo-chat-client:89ce0b07";
login = dockerLogin;
ports = [ "127.0.0.1:5002:80" ];
};
hugo-chat-server = {
image = "docker.noratrieb.dev/hugo-chat-server:89ce0b07";
ports = [ "127.0.0.1:5001:8080" ];
environment = {
SPRING_DATASOURCE_URL = "jdbc:postgresql://hugo-chat-db:5432/postgres";
};
environmentFiles = [ config.age.secrets.hugochat_db_password.path ];
extraOptions = [ "--network=hugo-chat" ];
dependsOn = [ "hugo-chat-db" ];
login = dockerLogin;
};
hugo-chat-db = {
image = "postgres:16";
volumes = [ "/var/lib/hugo-chat/data:/var/lib/postgresql/data" ];
environment = {
PGDATA = "/var/lib/postgresql/data/pgdata";
};
extraOptions = [ "--network=hugo-chat" ];
environmentFiles = [ config.age.secrets.hugochat_db_password.path ];
};
};
services.custom-backup.jobs = [
{
app = "hugo-chat";
pgDump = {
containerName = "hugo-chat-db";
dbName = "postgres";
userName = "postgres";
};
}
];
# https://www.reddit.com/r/NixOS/comments/13e5w6b/does_anyone_have_a_working_nixos_ocicontainers/
systemd.services.init-hugo-chat-podman-network = {
description = "Create the network bridge for hugo-chat.";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script = ''
${lib.getExe pkgs.podman} network create hugo-chat || true
'';
};
system.activationScripts.makeHugoChatDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/hugo-chat/data
'';
}

View file

@ -0,0 +1,35 @@
{ config, lib, ... }:
let dataDir = "/var/lib/killua"; in
{
age.secrets.killua_env.file = ../../secrets/killua_env.age;
virtualisation.oci-containers.containers = {
killua = {
image = "docker.noratrieb.dev/killua-bot:ac8203d2";
volumes = [
"${dataDir}:/data"
];
environment = {
KILLUA_JSON_PATH = "/data/trivia_questions.json";
};
environmentFiles = [ config.age.secrets.killua_env.path ];
login = {
registry = "docker.noratrieb.dev";
username = "nils";
passwordFile = config.age.secrets.docker_registry_password.path;
};
};
};
services.custom-backup.jobs = [
{
app = "killua";
file = "${dataDir}/trivia_questions.json";
}
];
system.activationScripts.makeKilluaDir = lib.stringAfter [ "var" ] ''
mkdir -p ${dataDir}
chmod ugo+w ${dataDir}
'';
}

View file

@ -0,0 +1,72 @@
{ config, lib, pkgs, ... }:
let
dockerLogin = {
registry = "docker.noratrieb.dev";
username = "nils";
passwordFile = config.age.secrets.docker_registry_password.path;
};
in
{
age.secrets.openolat_db_password.file = ../../secrets/openolat_db_password.age;
virtualisation.oci-containers.containers = {
openolat = {
image = "docker.noratrieb.dev/openolat:69b3c8b6";
volumes = [
"/var/lib/openolat/files:/home/openolat/olatdata"
"${./extra-properties.properties}:/home/openolat/extra-properties.properties"
];
ports = [ "127.0.0.1:5011:8088" ];
environment = {
# DB_PASSWORD = from openolat_db_password
DB_URL = "jdbc:postgresql://openolat-db:5432/oodb";
EXTRA_PROPERTIES = "/home/openolat/extra-properties.properties";
OLAT_HOST = "olat.noratrieb.dev";
};
environmentFiles = [ config.age.secrets.openolat_db_password.path ];
extraOptions = [ "--network=openolat" ];
dependsOn = [ "openolat-db" ];
login = dockerLogin;
};
openolat-db = {
image = "postgres:15";
volumes = [ "/var/lib/openolat/db:/var/lib/postgresql/data" ];
environment = {
POSTGRES_DB = "oodb";
POSTGRES_USER = "oodbu";
# POSTGRES_PASSWORD = from openolat_db_password
PGDATA = "/var/lib/postgresql/data/pgdata";
};
extraOptions = [ "--network=openolat" ];
environmentFiles = [ config.age.secrets.openolat_db_password.path ];
};
};
services.custom-backup.jobs = [
{
app = "openolat-db";
pgDump = {
containerName = "openolat-db";
dbName = "oodb";
userName = "oodbu";
};
}
];
# https://www.reddit.com/r/NixOS/comments/13e5w6b/does_anyone_have_a_working_nixos_ocicontainers/
systemd.services.init-openolat-podman-network = {
description = "Create the network bridge for openolat.";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script = ''
${lib.getExe pkgs.podman} network create openolat || true
'';
};
system.activationScripts.makeOpenolatDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/openolat/db
mkdir -p /var/lib/openolat/files
'';
}

View file

@ -0,0 +1 @@
enforce.utf8.filesystem=false

View file

@ -0,0 +1,19 @@
{ upload-files, pkgs, lib, config, ... }: {
age.secrets.upload_files_s3_secret.file = ../../secrets/upload_files_s3_secret.age;
systemd.services.upload-files = {
description = "upload.files.noratrieb.dev file uploader for files.noratrieb.dev";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
environment = {
UPLOAD_FILES_NORATRIEB_DEV_BUCKET = "files.noratrieb.dev";
UPLOAD_FILES_NORATRIEB_DEV_ENDPOINT = "http://localhost:3900";
UPLOAD_FILES_NORATRIEB_DEV_REGION = "garage";
};
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe (upload-files {inherit pkgs;})}";
EnvironmentFile = [ config.age.secrets.upload_files_s3_secret.path ];
};
};
}

View file

@ -0,0 +1,42 @@
{ lib, config, ... }: {
virtualisation.oci-containers.containers.uptime = {
/*
uptime:
container_name: uptime
image: "docker.noratrieb.dev/uptime:50d15bc4"
restart: always
volumes:
- "/apps/uptime:/app/config"
environment:
UPTIME_CONFIG_PATH: /app/config/uptime.json
ports:
- "5010:3000"
*/
image = "docker.noratrieb.dev/uptime:50d15bc4";
volumes = [
"${./uptime.json}:/uptime.json"
"/var/lib/uptime:/data"
];
environment = {
UPTIME_CONFIG_PATH = "/uptime.json";
};
ports = [ "127.0.0.1:5010:3000" ];
login = {
registry = "docker.noratrieb.dev";
username = "nils";
passwordFile = config.age.secrets.docker_registry_password.path;
};
};
services.custom-backup.jobs = [
{
app = "uptime";
file = "/var/lib/uptime/uptime.db";
}
];
system.activationScripts.makeUptimeDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/uptime/
'';
}

View file

@ -0,0 +1,50 @@
{
"interval_seconds": 30,
"db_url": "/data/uptime.db",
"websites": [
{
"name": "noratrieb.dev",
"url": "https://noratrieb.dev"
},
{
"name": "nilstrieb.dev",
"url": "https://nilstrieb.dev"
},
{
"name": "docker.nilstrieb.dev",
"url": "https://docker.noratrieb.dev"
},
{
"name": "vps1.nilstrieb.dev",
"url": "https://vps1.infra.noratrieb.dev"
},
{
"name": "vps2.nilstrieb.dev",
"url": "https://vps2.nilstrieb.dev"
},
{
"name": "bisect-rustc.nilstrieb.dev",
"url": "https://bisect-rustc.noratrieb.dev"
},
{
"name": "hugo-chat.nilstrieb.dev",
"url": "https://hugo-chat.noratrieb.dev"
},
{
"name": "api.hugo-chat.nilstrieb.dev",
"url": "https://api.hugo-chat.noratrieb.dev/api/v2/rooms"
},
{
"name": "cors-school.nilstrieb.dev",
"url": "https://cors-school.nilstrieb.dev"
},
{
"name": "api.cors-school.nilstrieb.dev",
"url": "https://api.cors-school.nilstrieb.dev/api/hugo"
},
{
"name": "olat.nilstrieb.dev",
"url": "https://olat.nilstrieb.dev/dmz/"
}
]
}

View file

@ -0,0 +1,33 @@
{ config, ... }: {
age.secrets.widetom_bot_token.file = ../../secrets/widetom_bot_token.age;
age.secrets.widetom_config_toml.file = ../../secrets/widetom_config_toml.age;
virtualisation.oci-containers.containers = {
/*
container_name: widetom
image: "docker.noratrieb.dev/widetom:33d17387"
restart: always
volumes:
- "/apps/widetom:/app/config"
environment:
CONFIG_PATH: /app/config/config.toml
BOT_TOKEN_PATH: /app/config/bot_token
*/
widetom = {
image = "docker.noratrieb.dev/widetom:33d17387";
volumes = [
"${config.age.secrets.widetom_config_toml.path}:/config.toml"
"${config.age.secrets.widetom_bot_token.path}:/token"
];
environment = {
CONFIG_PATH = "/config.toml";
BOT_TOKEN_PATH = "/token";
};
login = {
registry = "docker.noratrieb.dev";
username = "nils";
passwordFile = config.age.secrets.docker_registry_password.path;
};
};
};
}

13
nix/deploy/deploy-dns.sh Executable file
View file

@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -euxo pipefail
cd "$(dirname "$(realpath "$0")")/.."
./deploy/smoke-tests.sh
colmena apply --on dns1
./deploy/smoke-tests.sh
colmena apply --on dns2
./deploy/smoke-tests.sh

54
nix/deploy/smoke-tests.sh Executable file
View file

@ -0,0 +1,54 @@
#!/usr/bin/env bash
# This script does a few basic smoke tests to ensure the servers haven't completely died.
set -eux
check_dig_answer() {
type="$1"
host="$2"
grep="$3"
dig @dns1.infra.noratrieb.dev "$type" "$host" +noall +answer | grep "$grep"
dig @dns2.infra.noratrieb.dev "$type" "$host" +noall +answer | grep "$grep"
}
# Check DNS name servers
check_dig_answer A "dns1.infra.noratrieb.dev" "154.38.163.74"
check_dig_answer A "nilstrieb.dev" "161.97.165.1"
# Check the NS records. The trailing dot matters!
check_dig_answer NS noratrieb.dev "noratrieb.dev..*3600.*IN.*NS.*ns1.noratrieb.dev."
# Mail stuff
check_dig_answer MX noratrieb.dev "mail.protonmail.ch."
check_dig_answer MX noratrieb.dev "mailsec.protonmail.ch."
check_dig_answer TXT noratrieb.dev "protonmail-verification=09106d260e40df267109be219d9c7b2759e808b5"
check_dig_answer TXT noratrieb.dev "v=spf1 include:_spf.protonmail.ch ~all"
# Check HTTP responses
http_hosts=(
noratrieb.dev
nilstrieb.dev
vps1.infra.noratrieb.dev
vps3.infra.noratrieb.dev
vps4.infra.noratrieb.dev
vps5.infra.noratrieb.dev
bisect-rustc.noratrieb.dev
docker.noratrieb.dev
does-it-build.noratrieb.dev
grafana.noratrieb.dev
hugo-chat.noratrieb.dev
api.hugo-chat.noratrieb.dev/api/v2/rooms
uptime.noratrieb.dev
www.noratrieb.dev
# legacy:
blog.noratrieb.dev
)
for http_host in "${http_hosts[@]}"; do
curl --fail -s "https://${http_host}/" -o /dev/null
done

329
nix/hive.nix Normal file
View file

@ -0,0 +1,329 @@
{
meta =
let
my-projects-versions = builtins.fromJSON (builtins.readFile ./my-projects.json);
nixpkgs-hash = "50ab793786d9de88ee30ec4e4c24fb4236fc2674"; # nixos-24.11 2025-07-27
nixpkgs-path = (fetchTarball "https://github.com/NixOS/nixpkgs/archive/${nixpkgs-hash}.tar.gz");
in
{
# Override to pin the Nixpkgs version (recommended). This option
# accepts one of the following:
# - A path to a Nixpkgs checkout
# - The Nixpkgs lambda (e.g., import <nixpkgs>)
# - An initialized Nixpkgs attribute set
nixpkgs = import nixpkgs-path;
specialArgs = {
website = import (fetchTarball "https://github.com/Noratrieb/website/archive/${my-projects-versions.website}.tar.gz");
blog = fetchTarball "https://github.com/Noratrieb/blog/archive/${my-projects-versions.blog}.tar.gz";
slides = fetchTarball "https://github.com/Noratrieb/slides/archive/${my-projects-versions.slides}.tar.gz";
pretense = import (fetchTarball "https://github.com/Noratrieb/pretense/archive/${my-projects-versions.pretense}.tar.gz");
quotdd = import (fetchTarball "https://github.com/Noratrieb/quotdd/archive/${my-projects-versions.quotdd}.tar.gz");
does-it-build = import (fetchTarball "https://github.com/Noratrieb/does-it-build/archive/${my-projects-versions.does-it-build}.tar.gz");
upload-files = import (fetchTarball "https://github.com/Noratrieb/upload.files.noratrieb.dev/archive/${my-projects-versions."upload.files.noratrieb.dev"}.tar.gz");
inherit my-projects-versions;
inherit nixpkgs-path;
networkingConfig = {
dns1 = {
publicIPv4 = "154.38.163.74";
publicIPv6 = null;
wg = {
privateIP = "10.0.1.1";
publicKey = "7jy2q93xYBHG5yKqLmNuMWSuFMnUGWXVuKQ1yMmxoV4=";
peers = [ "vps3" ];
};
};
dns2 = {
publicIPv4 = "128.140.3.7";
# somehow this doesnt quite work yet, keep it out of DNS records
#publicIPv6 = "2a01:4f8:c2c:d616::";
publicIPv6 = null;
wg = {
privateIP = "10.0.1.2";
publicKey = "yfOc/q5M+2DWPoZ4ZgwrTYYkviQxGxRWpcBCDcauDnc=";
peers = [ "vps3" ];
};
};
vps1 = {
publicIPv4 = "161.97.165.1";
publicIPv6 = null;
wg = {
privateIP = "10.0.0.1";
publicKey = "5tg3w/TiCuCeKIBJCd6lHUeNjGEA76abT1OXnhNVyFQ=";
peers = [ "vps2" "vps3" "vps4" "vps5" ];
};
};
vps2 = {
publicIPv4 = "184.174.32.252";
publicIPv6 = null;
wg = {
privateIP = "10.0.0.2";
publicKey = "SficHHJ0ynpZoGah5heBpNKnEVIVrgs72Z5HEKd3jHA=";
peers = [ "vps1" "vps3" "vps4" "vps5" ];
};
};
vps3 = {
publicIPv4 = "134.255.181.139";
publicIPv6 = null;
wg = {
privateIP = "10.0.0.3";
publicKey = "pdUxG1vhmYraKzIIEFxTRAMhGwGztBL/Ly5icJUV3g0=";
peers = [ "vps1" "vps2" "vps4" "vps5" "dns1" "dns2" ];
};
};
vps4 = {
publicIPv4 = "195.201.147.17";
# somehow this doesnt quite work yet, keep it out of DNS records
#publicIPv6 = "2a01:4f8:1c1c:cb18::1";
publicIPv6 = null;
wg = {
privateIP = "10.0.0.4";
publicKey = "+n2XKKaSFdCanEGRd41cvnuwJ0URY0HsnpBl6ZrSBRs=";
peers = [ "vps1" "vps2" "vps3" "vps5" ];
};
};
vps5 = {
publicIPv4 = "45.94.209.30";
publicIPv6 = null;
wg = {
privateIP = "10.0.0.5";
publicKey = "r1cwt63fcOR+FTqMTUpZdK4/MxpalkDYRHXyy7osWUk=";
peers = [ "vps1" "vps2" "vps3" "vps4" ];
};
};
};
};
# If your Colmena host has nix configured to allow for remote builds
# (for nix-daemon, your user being included in trusted-users)
# you can set a machines file that will be passed to the underlying
# nix-store command during derivation realization as a builders option.
# For example, if you support multiple orginizations each with their own
# build machine(s) you can ensure that builds only take place on your
# local machine and/or the machines specified in this file.
# machinesFile = ./machines.client-a;
};
defaults = { pkgs, config, lib, ... }: {
# This module will be imported by all hosts
imports = [ ./modules/default ];
};
dns1 = { name, nodes, modulesPath, ... }: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/contabo
./modules/dns
./modules/wg-mesh
];
# The name and nodes parameters are supported in Colmena,
# allowing you to reference configurations in other nodes.
deployment.tags = [ "dns" "us" ];
system.stateVersion = "23.11";
};
dns2 = { name, nodes, modulesPath, lib, ... }: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/dns
./modules/wg-mesh
];
deployment.tags = [ "dns" "eu" "hetzner" ];
system.stateVersion = "23.11";
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { device = "/dev/sda1"; fsType = "ext4"; };
# This file was populated at runtime with the networking
# details gathered from the active system.
networking = {
nameservers = [
"8.8.8.8"
];
defaultGateway = "172.31.1.1";
defaultGateway6 = {
address = "fe80::1";
interface = "eth0";
};
dhcpcd.enable = false;
usePredictableInterfaceNames = lib.mkForce false;
interfaces = {
eth0 = {
ipv4.addresses = [
{ address = "128.140.3.7"; prefixLength = 32; }
];
ipv6.addresses = [
{ address = "2a01:4f8:c2c:d616::1"; prefixLength = 64; }
{ address = "fe80::9400:3ff:fe91:1647"; prefixLength = 64; }
];
ipv4.routes = [{ address = "172.31.1.1"; prefixLength = 32; }];
ipv6.routes = [{ address = "fe80::1"; prefixLength = 128; }];
};
};
};
services.udev.extraRules = ''
ATTR{address}=="96:00:03:91:16:47", NAME="eth0"
'';
};
# VPS1 is the primary app server.
vps1 = { name, nodes, modulesPath, config, lib, ... }: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/contabo
./modules/wg-mesh
./modules/caddy
./modules/garage
./modules/podman
./modules/registry
./modules/backup
# apps
./apps/widetom
./apps/hugo-chat
./apps/uptime
./apps/cargo-bisect-rustc-service
./apps/killua
./apps/forgejo
./apps/openolat
./apps/upload-files
];
deployment.tags = [ "caddy" "eu" "apps" "website" ];
system.stateVersion = "23.11";
};
# VPS2 exists
vps2 = { name, nodes, modulesPath, config, lib, ... }: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/contabo
./modules/wg-mesh
./modules/caddy
./modules/garage
];
deployment.tags = [ "caddy" "eu" "apps" ];
system.stateVersion = "23.11";
};
# VPS3 is the primary monitoring/metrics server.
vps3 = { name, nodes, modulesPath, config, ... }: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/contabo
./modules/wg-mesh
./modules/caddy
./modules/garage
./modules/prometheus
];
deployment.tags = [ "eu" "apps" "website" ];
system.stateVersion = "23.11";
};
# VPS4 exists. It's useful for garage replication and runs does-it-build which uses some CPU.
vps4 = { lib, modulesPath, ... }: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/caddy
./modules/wg-mesh
./modules/garage
./modules/backup
# apps
./apps/does-it-build
];
deployment.tags = [ "eu" "apps" "hetzner" "website" ];
system.stateVersion = "23.11";
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { device = "/dev/sda1"; fsType = "ext4"; };
# This file was populated at runtime with the networking
# details gathered from the active system.
networking = {
nameservers = [
"8.8.8.8"
];
defaultGateway = "172.31.1.1";
defaultGateway6 = {
address = "fe80::1";
interface = "eth0";
};
dhcpcd.enable = false;
usePredictableInterfaceNames = lib.mkForce false;
interfaces = {
eth0 = {
ipv4.addresses = [
{ address = "195.201.147.17"; prefixLength = 32; }
];
ipv6.addresses = [
{ address = "2a01:4f8:1c1c:cb18::1"; prefixLength = 64; }
{ address = "fe80::9400:3ff:fe95:a9e4"; prefixLength = 64; }
];
ipv4.routes = [{ address = "172.31.1.1"; prefixLength = 32; }];
ipv6.routes = [{ address = "fe80::1"; prefixLength = 128; }];
};
};
};
services.udev.extraRules = ''
ATTR{address}=="96:00:03:95:a9:e4", NAME="eth0"
'';
};
# VPS5 is the primary test server, where new things are being deployed that could break stuff maybe.
vps5 = { name, nodes, modulesPath, config, pkgs, lib, ... }:
let
commit = "5f203d0f5ba2639043bd5bd1c3687c406d6abac1";
cluelessh = import (fetchTarball "https://github.com/Noratrieb/cluelessh/archive/${commit}.tar.gz");
in
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
./modules/contabo
./modules/caddy
./modules/wg-mesh
./modules/garage
];
services.openssh.ports = [ 2000 ];
systemd.services.fakessh = {
description = "cluelessh-faked ssh honeypot";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Restart = "on-failure";
RestartSec = "5s";
ExecStart = "${lib.getExe' (cluelessh {inherit pkgs;}) "cluelessh-faked" }";
# i really don't trust this.
DynamicUser = true;
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
MemoryHigh = "100M";
MemoryMax = "200M";
# config
Environment = [
"FAKESSH_LISTEN_ADDR=0.0.0.0:22"
"RUST_LOG=debug"
#"FAKESSH_JSON_LOGS=1"
];
};
};
networking.firewall.allowedTCPPorts = [ 22 ];
deployment.targetPort = 2000;
deployment.tags = [ "eu" "apps" ];
system.stateVersion = "23.11";
};
}

62
nix/modules/backup/backup.sh Executable file
View file

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
time="$(date --iso-8601=s --utc)"
echo "Starting backup procedure with time=$time"
dir=$(mktemp -d)
echo "Setting workdir to $dir"
cd "$dir"
export HOME="$dir"
# Delete the temporary directory afterwards.
# Yes, this variable should expand now.
# shellcheck disable=SC2064
trap "rm -rf $dir" EXIT
echo "Logging into garage"
export MC_CONFIG_DIR="$dir"
mc alias set garage "$S3_ENDPOINT" "$S3_ACCESS_KEY" "$S3_SECRET_KEY" --api S3v4
mc ls garage/backups
files=$(jq -c '.files[]' "$CONFIG_FILE")
pg_dumps=$(jq -c '.pg_dumps[]' "$CONFIG_FILE")
echo "$files"
echo "$pg_dumps"
IFS=$'\n'
for file_config in $files; do
filepath=$(echo "$file_config" | jq -r ".file")
app=$(echo "$file_config" | jq -r ".app")
echo "Backing up app $app FILE $filepath..."
tmppath="$dir/file"
xz < "$filepath" > "$tmppath"
echo "Uplading file"
mc put "$tmppath" "garage/$S3_BUCKET/$app/$time/$(basename "$filepath").xz"
echo "Uploaded file"
done
for pg_config in $pg_dumps; do
app=$(echo "$pg_config" | jq -r ".app")
containerName=$(echo "$pg_config" | jq -r ".containerName")
dbName=$(echo "$pg_config" | jq -r ".dbName")
userName=$(echo "$pg_config" | jq -r ".userName")
echo "Backing up app $app POSTGRES $containerName/$dbName..."
tmppath="$dir/file"
podman exec "$containerName" pg_dump --format=custom --file /tmp/db.bak \
--host "127.0.0.1" --dbname "$dbName" --username "$userName"
podman cp "$containerName:/tmp/db.bak" "$tmppath"
xz -f "$tmppath" > "$tmppath.xz"
echo "Uplading file"
mc put "$tmppath.xz" "garage/$S3_BUCKET/$app/$time/$dbName.bak.xz"
echo "Uploaded file"
podman exec "$containerName" rm "/tmp/db.bak"
done

View file

@ -0,0 +1,83 @@
{ config, lib, pkgs, ... }: with lib;
let
jobOptions = { ... }: {
options = {
app = mkOption {
type = types.str;
description = "The app name, used as the directory in the bucket";
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
};
file = mkOption {
type = types.nullOr types.str;
default = null;
};
pgDump = mkOption {
type = types.nullOr (types.submodule ({ ... }: {
options = {
containerName = mkOption {
type = types.str;
};
dbName = mkOption {
type = types.str;
};
userName = mkOption {
type = types.str;
};
};
}));
default = null;
};
#mongo_dump = { };
};
};
in
{
options.services.custom-backup = {
jobs = mkOption {
default = [ ];
type = types.listOf (types.submodule jobOptions);
description = "Backup jobs to execute";
};
};
config =
let
cfg = config.services.custom-backup;
backupConfig = {
files = builtins.map (job: { app = job.app; file = job.file; })
(builtins.filter (job: job.file != null) cfg.jobs);
pg_dumps = builtins.map (job: { app = job.app; } // job.pgDump)
(builtins.filter (job: job.pgDump != null) cfg.jobs);
};
backupScript = pkgs.writeShellApplication {
name = "backup";
runtimeInputs = with pkgs; [ podman jq minio-client getent xz ];
text = builtins.readFile ./backup.sh;
};
in
{
age.secrets.backup_s3_secret.file = ../../secrets/backup_s3_secret.age;
systemd.services.custom-backup = {
startAt = "daily";
serviceConfig = {
# TODO: can we use a dynamic user?
#DynamicUser = true;
ExecStart = "${backupScript}/bin/backup";
Environment = [
"CONFIG_FILE=${pkgs.writeText "backup-config.json" (builtins.toJSON backupConfig)}"
"S3_BUCKET=backups"
"S3_ENDPOINT=http://localhost:3900"
];
EnvironmentFile = (builtins.filter (file: file != null)
(builtins.map (job: job.environmentFile) cfg.jobs)) ++ [
config.age.secrets.backup_s3_secret.path
];
};
};
};
}

View file

@ -0,0 +1,59 @@
{
email noratrieb@proton.me
auto_https disable_redirects
storage s3 {
host "localhost:3900"
bucket "caddy-store"
# access_id ENV S3_ACCESS_ID
# secret_key ENV S3_SECRET_KEY
insecure true
}
servers {
metrics
}
log default {
output stdout
format json
}
}
# https://gist.github.com/ryanburnette/d13575c9ced201e73f8169d3a793c1a3
(cors) {
@cors_preflight{args[0]} method OPTIONS
@cors{args[0]} header Origin {args[0]}
handle @cors_preflight{args[0]} {
header {
Access-Control-Allow-Origin "{args[0]}"
Access-Control-Allow-Methods "GET, POST, PUT, PATCH, DELETE, OPTIONS"
Access-Control-Allow-Credentials "false"
Access-Control-Allow-Headers "${args[1]}"
Access-Control-Max-Age "86400"
defer
}
respond "" 204
}
handle @cors{args[0]} {
header {
Access-Control-Allow-Origin "{args[0]}"
Access-Control-Expose-Headers *
defer
}
}
}
http:// {
log
respond "This is an HTTPS-only server, silly you. Go to https:// instead." 418
}
# HTTP
:9010 {
log
metrics /metrics
}

View file

@ -0,0 +1,116 @@
# Copied from https://github.com/NixOS/nixpkgs/pull/259275 and updated.
{ lib
, buildGoModule
, fetchFromGitHub
, gnused
, nixosTests
, caddy
, stdenv
, testers
, installShellFiles
, externalPlugins ? [ ]
, vendorHash ? "sha256-1Api8bBZJ1/oYk4ZGIiwWCSraLzK9L+hsKXkFtk6iVM="
}:
let
attrsToModules = attrs:
builtins.map ({ name, repo, version }: "${repo}") attrs;
attrsToSources = attrs:
builtins.map ({ name, repo, version }: "${repo}@${version}") attrs;
in
buildGoModule rec {
pname = "caddy";
version = "2.8.4";
dist = fetchFromGitHub {
owner = "caddyserver";
repo = "dist";
rev = "v${version}";
hash = "sha256-O4s7PhSUTXoNEIi+zYASx8AgClMC5rs7se863G6w+l0=";
};
src = fetchFromGitHub {
owner = "caddyserver";
repo = "caddy";
rev = "v${version}";
hash = "sha256-CBfyqtWp3gYsYwaIxbfXO3AYaBiM7LutLC7uZgYXfkQ=";
};
inherit vendorHash;
subPackages = [ "cmd/caddy" ];
ldflags = [
"-s"
"-w"
"-X github.com/caddyserver/caddy/v2.CustomVersion=${version}"
];
# matches upstream since v2.8.0
tags = [ "nobadger" ];
nativeBuildInputs = [ gnused installShellFiles ];
modBuildPhase = ''
for module in ${builtins.toString (attrsToModules externalPlugins)}; do
sed -i "/standard/a _ \"$module\"" ./cmd/caddy/main.go
done
for plugin in ${builtins.toString (attrsToSources externalPlugins)}; do
go get $plugin
done
go generate
go mod vendor
'';
modInstallPhase = ''
mv -t vendor go.mod go.sum
cp -r --reflink=auto vendor "$out"
'';
preBuild = ''
chmod -R u+w vendor
[ -f vendor/go.mod ] && mv -t . vendor/go.{mod,sum}
go generate
for module in ${builtins.toString (attrsToModules externalPlugins)}; do
sed -i "/standard/a _ \"$module\"" ./cmd/caddy/main.go
done
'';
postInstall = ''
install -Dm644 ${dist}/init/caddy.service ${dist}/init/caddy-api.service -t $out/lib/systemd/system
substituteInPlace $out/lib/systemd/system/caddy.service \
--replace-fail "/usr/bin/caddy" "$out/bin/caddy"
substituteInPlace $out/lib/systemd/system/caddy-api.service \
--replace-fail "/usr/bin/caddy" "$out/bin/caddy"
'' + lib.optionalString (stdenv.buildPlatform.canExecute stdenv.hostPlatform) ''
# Generating man pages and completions fail on cross-compilation
# https://github.com/NixOS/nixpkgs/issues/308283
$out/bin/caddy manpage --directory manpages
installManPage manpages/*
installShellCompletion --cmd caddy \
--bash <($out/bin/caddy completion bash) \
--fish <($out/bin/caddy completion fish) \
--zsh <($out/bin/caddy completion zsh)
'';
passthru.tests = {
inherit (nixosTests) caddy;
version = testers.testVersion {
command = "${caddy}/bin/caddy version";
package = caddy;
};
};
meta = with lib; {
homepage = "https://caddyserver.com";
description = "Fast and extensible multi-platform HTTP/1-2-3 web server with automatic HTTPS";
license = licenses.asl20;
mainProgram = "caddy";
maintainers = with maintainers; [ Br1ght0ne emilylange techknowlogick ];
};
}

View file

@ -0,0 +1,13 @@
{ pkgs, lib, name, src ? null, ... }: pkgs.stdenv.mkDerivation {
inherit name src;
buildInputs = with pkgs; [ python311 python311Packages.zstandard python311Packages.brotli ];
buildPhase = ''
mkdir -p $out
cp -r $src/* $out/
chmod -R +w $out
${lib.getExe pkgs.python311} ${./prepare.py} $out
chmod -R -w $out
'';
}

View file

@ -0,0 +1,60 @@
import os
import sys
import gzip
import brotli
import zstandard
import hashlib
def usage():
print("usage: prepare.py [SRC]")
def write_etag(path, content):
shasum = hashlib.sha256(content)
etag_path = path+".sha256"
with open(etag_path, "w") as f:
print(f"Writing ETag {etag_path}")
f.write(f'"{shasum.hexdigest()}"')
def main():
if len(sys.argv) < 2:
usage()
exit(1)
src_dir = sys.argv[1]
for root, dirs, files in os.walk(src_dir):
for file in files:
path = os.path.join(root, file)
# Ignore etags
if path.endswith(".sha256") or path.endswith(".b3sum"):
continue
# Ignore already compressed files
if path.endswith(".gz") or path.endswith(".zst") or path.endswith(".br"):
continue
with open(path, "rb") as f:
content = f.read()
compressions = [
(".gz", gzip),
(".zst", zstandard),
(".br", brotli),
]
for ext, alg in compressions:
new_path = path+ext
with open(new_path, "wb") as out:
print(f"Writing {new_path}")
compressed = alg.compress(content)
out.write(compressed)
write_etag(new_path, compressed)
write_etag(path, content)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,14 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>nora's server</title>
</head>
<body>
<h1>congrats, you landed on my server (100% NixOS) directly!?</h1>
<p>sorry, but there isn't anything cool here. this is <b>my</b> infra, you are not allowed here.</p>
<p>if you do want to be allowed here, then uh.. still no.</p>
<p>:3</p>
</body>
</html>

View file

@ -0,0 +1,95 @@
{ pkgs, config, lib, name, website, slides, blog, ... }:
let
caddy = pkgs.callPackage ./caddy-build.nix {
externalPlugins = [
{
name = "certmagic-s3";
repo = "github.com/noratrieb-mirrors/certmagic-s3";
version = "e48519f95173e982767cbb881d49335b6a00a599";
}
];
vendorHash = "sha256-KP9bYitM/Pocw4DxOXPVBigWh4IykNf8yKJiBlTFZmI=";
};
in
{
environment.systemPackages = [ caddy ];
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 9010 ]; # metrics
networking.firewall = {
allowedTCPPorts = [
80 # HTTP
443 # HTTPS
];
allowedUDPPorts = [
443 # HTTP/3 via QUIC
];
};
age.secrets.caddy_s3_key_secret.file = ../../secrets/caddy_s3_key_secret.age;
systemd.services.caddy.serviceConfig.EnvironmentFile = config.age.secrets.caddy_s3_key_secret.path;
systemd.services.caddy.after = [ "garage.service" ]; # the cert store depends on garage
services.caddy = {
enable = true;
package = caddy;
configFile = pkgs.writeTextFile {
name = "Caddyfile";
text = (
builtins.readFile ./base.Caddyfile +
''
${config.networking.hostName}.infra.noratrieb.dev {
log
encode zstd gzip
header -Last-Modified
root * ${import ./caddy-static-prepare {
name = "debugging-page";
src = ./debugging-page;
inherit pkgs lib;
}}
file_server {
etag_file_extensions .sha256
precompressed zstd gzip br
}
}
${
if name == "vps1" || name == "vps3" || name == "vps4" then ''
noratrieb.dev {
log
encode zstd gzip
header -Last-Modified
root * ${import ./caddy-static-prepare {
name = "website";
src = website { inherit pkgs slides blog; };
inherit pkgs lib;
}}
file_server {
etag_file_extensions .sha256
precompressed zstd gzip br
}
}
files.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:3902
}
'' else ""
}
${
if name == "vps1" || name == "vps3" || name == "vps4" then
builtins.readFile ./${name}.Caddyfile else ""
}
''
);
checkPhase = ''
${lib.getExe caddy} --version
${lib.getExe caddy} validate --adapter=caddyfile --config=$out
'';
};
};
}

View file

@ -0,0 +1,119 @@
www.noratrieb.dev {
log
redir https://noratrieb.dev{uri} permanent
}
uptime.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5010
}
hugo-chat.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5002
}
api.hugo-chat.noratrieb.dev {
log
import cors https://hugo-chat.noratrieb.dev "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type"
encode zstd gzip
reverse_proxy * localhost:5001
}
bisect-rustc.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5005
}
docker.noratrieb.dev {
log
reverse_proxy * localhost:5000
}
git.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5015
}
olat.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:5011
}
# unsure if necessary... something was misconfigured in the past here...
olat.noratrieb.dev:8088 {
log
encode zstd gzip
reverse_proxy * localhost:5011
}
upload.files.noratrieb.dev {
log
encode zstd gzip
# we need HTTP/2 here because the server doesn't work with HTTP/1.1
# because it will send early 401 responses during the upload without consuming the body
reverse_proxy * h2c://localhost:3050
}
################################################################
# redirects
blog.noratrieb.dev {
log
redir https://noratrieb.dev/blog{uri} permanent
}
nilstrieb.dev {
log
redir https://noratrieb.dev{uri} permanent
}
www.nilstrieb.dev {
log
redir https://noratrieb.dev{uri} permanent
}
blog.nilstrieb.dev {
log
redir https://noratrieb.dev/blog{uri} permanent
}
bisect-rustc.nilstrieb.dev {
log
redir https://bisect-rustc.dev/blog{uri} permanent
}
docker.nilstrieb.dev {
log
redir https://docker.noratrieb.dev{uri} permanent
}
hugo-chat.nilstrieb.dev {
log
redir https://hugo-chat.noratrieb.dev{uri} permanent
}
api.hugo-chat.nilstrieb.dev {
log
redir https://api.hugo-chat.noratrieb.dev{uri} permanent
}
uptime.nilstrieb.dev {
log
redir https://uptime.noratrieb.dev{uri} permanent
}
olat.nilstrieb.dev {
log
redir https://olat.noratrieb.dev{uri} permanent
}
olat.nilstrieb.dev:8088 {
log
redir https://olat.noratrieb.dev{uri} permanent
}

View file

@ -0,0 +1,5 @@
grafana.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:3000
}

View file

@ -0,0 +1,5 @@
does-it-build.noratrieb.dev {
log
encode zstd gzip
reverse_proxy * localhost:3000
}

View file

@ -0,0 +1,9 @@
# Default settings for Contabo VPS.
{ ... }: {
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { device = "/dev/sda3"; fsType = "ext4"; };
deployment.tags = [ "contabo" ];
}

View file

@ -0,0 +1,163 @@
{ pkgs, lib, config, name, pretense, quotdd, nixpkgs-path, ... }: {
deployment.targetHost = "${config.networking.hostName}.infra.noratrieb.dev";
imports = [
"${builtins.fetchTarball "https://github.com/ryantm/agenix/archive/de96bd907d5fbc3b14fc33ad37d1b9a3cb15edc6.tar.gz"}/modules/age.nix" # main 2024-07-26
];
nix = {
nixPath = [ "nixpkgs=${nixpkgs-path}" ];
};
environment.systemPackages = with pkgs; [
vim
wget
curl
traceroute
dnsutils
nftables
];
networking.hostName = name;
time.timeZone = "Europe/Zurich";
users.users.root.openssh.authorizedKeys.keys = [ ''ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG0n1ikUG9rYqobh7WpAyXrqZqxQoQ2zNJrFPj12gTpP nilsh@PC-Nils'' ];
boot.tmp.cleanOnBoot = true;
zramSwap.enable = true;
services.openssh = {
enable = true;
openFirewall = true;
banner = "meoooooow!! 😼 :3\n";
hostKeys = [
{
path = "/etc/ssh/ssh_host_ed25519_key";
type = "ed25519";
}
{
# P256
path = "/etc/ssh/ssh_host_ecdsa_key";
type = "ecdsa";
}
{
bits = 4096;
path = "/etc/ssh/ssh_host_rsa_key";
type = "rsa";
}
];
settings = {
PasswordAuthentication = false;
};
};
services.fail2ban = {
enable = true;
};
system.nixos.distroName = "NixOS (gay 🏳)";
systemd.services.pretense = {
description = "pretense connection logger";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe (pretense {inherit pkgs;})}";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
Environment = [
"PRETENSE_PORTS=23,3306,5432,1521" # telnet,mysql,postgres,oracle
"PRETENSE_METRICS_PORT=9150"
];
};
};
systemd.services.quotdd = {
description = "quotdd Quote of The Day Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe (quotdd {inherit pkgs;})}";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
Environment = [ ];
};
};
networking.firewall.allowedTCPPorts = [
23 # telnet, pretense
3306 # mysql, pretense
5432 # postgres, pretense
1521 # oracle, pretense
17 # quote of the day, quotdd
];
# monitoring
networking.firewall.interfaces.wg0.allowedTCPPorts = [
9100 # node exporter
9150 # pretense exporter
];
services.prometheus.exporters = {
node = {
enable = true;
};
};
services.promtail = {
enable = true;
configuration = {
server = {
disable = true;
};
clients = [
{
url = "http://vps3.local:3100/loki/api/v1/push";
}
];
scrape_configs = [
{
job_name = "journal";
journal = {
max_age = "24h";
labels = {
job = "systemd-journal";
node = name;
};
};
pipeline_stages = [{
match = {
selector = "{unit = \"sshd.service\"} |= \"Invalid user\"";
stages = [
{ regex = { expression = "Invalid user.*from (?P<ip>.*) port.*"; }; }
{
geoip = {
db = pkgs.fetchurl
{
# Note: You cannot use this for your own usage, this is only for me.
url = "https://github.com/noratrieb-mirrors/maxmind-geoip/releases/download/20240922/GeoLite2-City.mmdb";
sha256 = "sha256-xRGf2JEaEHpxEkIq3jJnZv49lTisFbygbjxiIZHIThg=";
};
source = "ip";
db_type = "city";
};
}
];
};
}];
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal__hostname" ];
target_label = "host";
}
{
source_labels = [ "__journal_priority_keyword" ];
target_label = "severity";
regex = "(.+)";
}
];
}
];
};
};
}

View file

@ -0,0 +1,48 @@
{ pkgs, lib, networkingConfig, ... }: {
# get the package for the debugging tools
environment.systemPackages = with pkgs; [ knot-dns ];
networking.firewall.allowedUDPPorts = [
53
];
nixpkgs.overlays = [
(final: prev: {
nix-dns = import (pkgs.fetchFromGitHub {
owner = "nix-community";
repo = "dns.nix";
rev = "v1.1.2";
hash = "sha256-EHiDP2jEa7Ai5ZwIf5uld9RVFcV77+2SUxjQXwJsJa0=";
});
})
];
services.knot = {
enable = true;
settingsFile = pkgs.writeTextFile {
name = "knot.conf";
text = ''
server:
listen: 0.0.0.0@53
listen: ::@53
zone:
- domain: noratrieb.dev
storage: /var/lib/knot/zones/
file: ${import ./noratrieb.dev.nix { inherit pkgs lib networkingConfig; }}
- domain: nilstrieb.dev
storage: /var/lib/knot/zones/
file: ${import ./nilstrieb.dev.nix { inherit pkgs lib networkingConfig; }}
log:
- target: syslog
any: info
'';
};
};
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 9433 ]; # metrics
services.prometheus.exporters.knot = {
enable = true;
port = 9433;
};
}

View file

@ -0,0 +1,66 @@
# https://github.com/nix-community/dns.nix
{ pkgs, lib, networkingConfig, ... }:
let
data = with pkgs.nix-dns.lib.combinators;
let
hour1 = 3600;
hostsToDns = builtins.mapAttrs
(name: { publicIPv4, publicIPv6, ... }:
lib.optionalAttrs (publicIPv4 != null) { A = [ (a publicIPv4) ]; } //
lib.optionalAttrs (publicIPv6 != null) { AAAA = [ (aaaa publicIPv6) ]; })
networkingConfig;
vps2 = {
A = [ "184.174.32.252" ];
};
in
with hostsToDns;
# point nilstrieb.dev to vps1 (retired)
vps1 // {
TTL = hour1;
SOA = {
nameServer = "ns1.nilstrieb.dev.";
adminEmail = "void@nilstrieb.dev";
serial = 2024072601;
};
CAA = [
{ issuerCritical = false; tag = "issue"; value = "letsencrypt.org"; }
{ issuerCritical = false; tag = "issue"; value = "sectigo.com"; }
];
NS = [
"ns1.nilstrieb.dev."
"ns2.nilstrieb.dev."
];
subdomains = {
ns1 = dns1;
ns2 = dns2;
localhost.A = [ (a "127.0.0.1") ];
# --- retired:
bisect-rustc = vps1;
blog = vps1;
docker = vps1;
www = vps1;
uptime = vps1;
hugo-chat = vps1 // {
subdomains.api = vps1;
};
olat = vps1;
# ---
# infra (legacy)
inherit vps2;
pronouns.TXT = [
"she/her"
];
};
};
in
pkgs.writeTextFile {
name = "nilstrieb.dev.zone";
text = pkgs.nix-dns.lib.toString "nilstrieb.dev" data;
}

View file

@ -0,0 +1,108 @@
# https://github.com/nix-community/dns.nix
{ pkgs, lib, networkingConfig, ... }:
let
data = with pkgs.nix-dns.lib.combinators;
let
hour1 = 3600;
hostsToDns = builtins.mapAttrs
(name: { publicIPv4, publicIPv6, ... }:
lib.optionalAttrs (publicIPv4 != null) { A = [ (a publicIPv4) ]; } //
lib.optionalAttrs (publicIPv6 != null) { AAAA = [ (aaaa publicIPv6) ]; })
networkingConfig;
combine = hosts: {
A = lib.lists.flatten (map (host: if builtins.hasAttr "A" host then host.A else [ ]) hosts);
AAAA = lib.lists.flatten (map (host: if builtins.hasAttr "AAAA" host then host.AAAA else [ ]) hosts);
};
in
with hostsToDns;
# vps{1,3,4} contains root noratrieb.dev
combine [ vps1 vps3 vps4 ] // {
TTL = hour1;
SOA = {
nameServer = "ns1.noratrieb.dev.";
adminEmail = "void@noratrieb.dev";
serial = 2024072601;
};
NS = [
"ns1.noratrieb.dev."
"ns2.noratrieb.dev."
];
CAA = [
{ issuerCritical = false; tag = "issue"; value = "letsencrypt.org"; }
{ issuerCritical = false; tag = "issue"; value = "sectigo.com"; }
];
TXT = [
"protonmail-verification=09106d260e40df267109be219d9c7b2759e808b5"
"v=spf1 include:_spf.protonmail.ch ~all"
];
MX = [
(mx.mx 10 "mail.protonmail.ch.")
(mx.mx 20 "mailsec.protonmail.ch.")
];
subdomains = {
# --- NS records
ns1 = dns1;
ns2 = dns2;
# --- website stuff
blog = vps1;
www = vps1;
files = combine [ vps1 vps3 vps4 ] // {
subdomains = {
upload = vps1;
};
};
# --- apps
bisect-rustc = vps1;
docker = vps1;
hugo-chat = vps1 // {
subdomains.api = vps1;
};
uptime = vps1;
does-it-build = vps4;
git = vps1;
olat = vps1;
std.CNAME = [ (cname "noratrieb.github.io.") ];
# --- fun shit
localhost.A = [ (a "127.0.0.1") ];
newtest.TXT = [ "uwu it works" ];
pronouns.TXT = [
"she/her"
];
sshhoneypot = vps5;
# --- infra
grafana = vps3;
infra.subdomains = hostsToDns;
# --- other verification
_discord.TXT = [ "dh=e0f7e99c70c4ce17f7afcce3be8bfda9cd363843" ];
_atproto.TXT = [ "did=did:plc:pqyzoyxk7gfcbxk65mjyncyl" ];
# --- email
_domainkey.subdomains = {
protonmail.CNAME = [ (cname "protonmail.domainkey.deenxxi4ieo32na6brazky2h7bt5ezko6vexdbvbzzbtj6oj43kca.domains.proton.ch.") ];
protonmail2.CNAME = [ (cname "protonmail2.domainkey.deenxxi4ieo32na6brazky2h7bt5ezko6vexdbvbzzbtj6oj43kca.domains.proton.ch.") ];
protonmail3.CNAME = [ (cname "protonmail3.domainkey.deenxxi4ieo32na6brazky2h7bt5ezko6vexdbvbzzbtj6oj43kca.domains.proton.ch.") ];
};
_dmarc.TXT = [
"v=DMARC1; p=quarantine"
];
};
};
in
pkgs.writeTextFile
{
name = "noratrieb.dev.zone";
text = pkgs.nix-dns.lib.toString "noratrieb.dev" data;
}

View file

@ -0,0 +1,34 @@
# garage
## layout
- co-ka -> Contabo Karlsruhe
- co-du -> Contabo Düsseldorf
- he-nu -> Hetzner Nürnberg
## buckets
- `caddy-store`: Store for Caddy webservers
- key `caddy` RW
- `docker-registry`
- key `docker-registry` RW
- `loki`
- key `loki` RW
- `backups`
- key `backups` RW
- `forgejo`
- key `forgejo` RW
- `files.noratrieb.dev`
- key `upload-files` RW
## keys
- `caddy`: `GK25e33d4ba20d54231e513b80`
- `docker-registry`: `GK48011ee5b5ccbaf4233c0e40`
- `loki`: `GK84ffae2a0728abff0f96667b`
- `backups`: `GK8cb8454a6f650326562bff2f`
- `forgejo`: `GKc8bfd905eb7f85980ffe84c9`
- `upload-files`: `GK607464882f6e29fb31e0f553`
- `admin`: `GKaead6cf5340e54a4a19d9490`
- RW permissions on ~every bucket

View file

@ -0,0 +1,49 @@
{ config, pkgs, name, ... }: {
age.secrets.garage_secrets.file = ../../secrets/garage_secrets.age;
environment.systemPackages = with pkgs; [
minio-client
];
networking.firewall.interfaces.wg0.allowedTCPPorts = [
3901 # RPC
3903 # admin for metrics
];
services.garage = {
enable = true;
package = pkgs.garage_1_1_0;
settings = {
metadata_dir = "/var/lib/garage/meta";
data_dir = "/var/lib/garage/data";
db_engine = "sqlite";
metadata_auto_snapshot_interval = "6h";
replication_factor = 3;
# arbitrary, but a bit higher as disk space matters more than time. she says, cluelessly.
compression-level = 5;
rpc_bind_addr = "[::]:3901";
rpc_public_addr = "${name}.local:3901";
s3_api = {
s3_region = "garage";
api_bind_addr = "[::]:3900";
root_domain = ".s3.garage.localhost";
};
s3_web = {
bind_addr = "[::]:3902";
root_domain = ".web.garage.localhost";
index = "index.html";
};
admin = {
api_bind_addr = "[::]:3903";
};
};
environmentFile = config.age.secrets.garage_secrets.path;
};
}

View file

@ -0,0 +1,8 @@
{ ... }: {
virtualisation.podman = {
enable = true;
};
# https://github.com/NixOS/nixpkgs/issues/226365
networking.firewall.interfaces."podman+".allowedUDPPorts = [ 53 5353 ];
age.secrets.docker_registry_password.file = ../../secrets/docker_registry_password.age;
}

View file

@ -0,0 +1,165 @@
{ config, lib, ... }: {
services.prometheus = {
enable = true;
globalConfig = { };
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{ targets = [ "localhost:9090" ]; }
];
}
{
job_name = "node";
static_configs = [
{ targets = [ "dns1.local:9100" ]; }
{ targets = [ "dns2.local:9100" ]; }
{ targets = [ "vps1.local:9100" ]; }
{ targets = [ "vps2.local:9100" ]; }
{ targets = [ "vps3.local:9100" ]; }
{ targets = [ "vps4.local:9100" ]; }
{ targets = [ "vps5.local:9100" ]; }
];
}
{
job_name = "caddy";
static_configs = [
{ targets = [ "vps1.local:9010" ]; }
{ targets = [ "vps2.local:9010" ]; }
{ targets = [ "vps3.local:9010" ]; }
{ targets = [ "vps4.local:9010" ]; }
{ targets = [ "vps5.local:9010" ]; }
];
}
{
job_name = "docker-registry";
static_configs = [
{ targets = [ "vps1.local:9011" ]; }
];
}
{
job_name = "garage";
static_configs = [
{ targets = [ "vps1.local:3903" ]; }
{ targets = [ "vps2.local:3903" ]; }
{ targets = [ "vps3.local:3903" ]; }
{ targets = [ "vps4.local:3903" ]; }
{ targets = [ "vps5.local:3903" ]; }
];
}
{
job_name = "knot";
static_configs = [
{ targets = [ "dns1.local:9433" ]; }
{ targets = [ "dns2.local:9433" ]; }
];
}
{
job_name = "pretense";
static_configs = [
{ targets = [ "dns1.local:9150" ]; }
{ targets = [ "dns2.local:9150" ]; }
{ targets = [ "vps1.local:9150" ]; }
{ targets = [ "vps2.local:9150" ]; }
{ targets = [ "vps3.local:9150" ]; }
{ targets = [ "vps4.local:9150" ]; }
{ targets = [ "vps5.local:9150" ]; }
];
}
];
};
age.secrets.grafana_admin_password.file = ../../secrets/grafana_admin_password.age;
systemd.services.grafana.serviceConfig.EnvironmentFile = config.age.secrets.grafana_admin_password.path;
services.grafana = {
enable = true;
settings = {
security = {
admin_user = "admin";
};
server = {
root_url = "https://grafana.noratrieb.dev";
};
};
provision = {
enable = true;
datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "Prometheus";
type = "prometheus";
access = "proxy";
url = "http://vps3.local:9090";
jsonData = {
httpMethod = "POST";
prometheusType = "Prometheus";
};
}
{
name = "loki";
type = "loki";
access = "proxy";
url = "http://vps3.local:3100";
}
];
};
};
};
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 3100 ]; # loki
age.secrets.loki_env.file = ../../secrets/loki_env.age;
systemd.services.loki.serviceConfig.EnvironmentFile = config.age.secrets.loki_env.path;
services.loki = {
enable = true;
extraFlags = [ "-config.expand-env=true" /*"-print-config-stderr"*/ ];
configuration = {
auth_enabled = false;
server = {
http_listen_port = 3100;
};
common = {
ring = {
instance_addr = "127.0.0.1";
kvstore.store = "inmemory";
};
replication_factor = 1;
path_prefix = "/var/lib/loki";
};
schema_config = {
configs = [
{
from = "2020-05-15";
store = "tsdb";
object_store = "s3";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
};
storage_config = {
tsdb_shipper = {
active_index_directory = "/var/lib/loki/index";
cache_location = "/var/lib/loki/cache";
};
aws = {
access_key_id = "\${ACCESS_KEY}";
secret_access_key = "\${SECRET_KEY}";
endpoint = "127.0.0.1:3900";
s3forcepathstyle = true;
region = "garage";
insecure = true;
s3 = "s3://\${ACCESS_KEY}:\${SECRET_KEY}@127.0.0.1:3900/loki";
};
};
};
};
system.activationScripts.makeLokiDir = lib.stringAfter [ "var" ] ''
mkdir -p /var/lib/loki/{index,cache}
chown ${config.services.loki.user}:${config.services.loki.group} -R /var/lib/loki
'';
}

View file

@ -0,0 +1,58 @@
{ config, lib, ... }: {
age.secrets = {
registry_htpasswd = {
file = ../../secrets/registry_htpasswd.age;
owner = config.users.users.docker-registry.name;
};
registry_s3_key_secret = {
file = ../../secrets/registry_s3_key_secret.age;
owner = config.users.users.docker-registry.name;
};
};
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 9011 ]; # metrics
systemd.services.docker-registry.serviceConfig.EnvironmentFile = config.age.secrets.registry_s3_key_secret.path;
services.dockerRegistry = {
enable = true;
storagePath = null;
port = 5000;
extraConfig = {
log = {
accesslog.disabled = false;
level = "info";
formatter = "text";
fields.service = "registry";
};
redis = lib.mkForce null;
storage = {
s3 = {
regionendpoint = "http://127.0.0.1:3900";
forcepathstyle = true; # ensure it doesn't try docker-registry.127.0.0.1 as the host
region = "garage";
bucket = "docker-registry";
# accesskey = ""; ENV REGISTRY_STORAGE_S3_ACCESSKEY
# secretkey = ""; ENV REGISTRY_STORAGE_S3_SECRETKEY
secure = false;
};
redirect.disable = true;
};
http = {
host = "https://docker.noratrieb.dev";
draintimeout = "60s";
debug = {
addr = ":9011";
prometheus = {
enabled = true;
path = "/metrics";
};
};
};
auth.htpasswd = {
# TODO: ugh :(
realm = "nilstrieb-registry";
path = config.age.secrets.registry_htpasswd.path;
};
};
};
}

View file

@ -0,0 +1,49 @@
{ name, config, networkingConfig, ... }:
let
wgSettings = (builtins.getAttr name networkingConfig).wg;
listenPort = 51820;
in
{
# Map from $HOST.local to the private IP.
networking.hosts =
let
hostsEntries = map
(host:
let hostConfig = builtins.getAttr host networkingConfig; in
if builtins.hasAttr "wg" hostConfig then {
name = hostConfig.wg.privateIP;
value = [ "${host}.local" ];
} else null)
(builtins.attrNames networkingConfig);
wgHostEntries = builtins.filter (entry: entry != null) hostsEntries;
in
builtins.listToAttrs wgHostEntries;
networking.firewall.allowedUDPPorts = [
listenPort
];
age.secrets.wg_private.file = ../../secrets/wg_private_${name}.age;
networking.wg-quick.interfaces = {
wg0 = {
address = [ "${wgSettings.privateIP}/24" ];
inherit listenPort;
privateKeyFile = config.age.secrets.wg_private.path;
peers = map
(peer:
let peerConfig = (builtins.getAttr peer networkingConfig).wg;
in {
inherit (peerConfig) publicKey;
endpoint = "${peer}.infra.noratrieb.dev:${toString listenPort}";
allowedIPs = [ "${peerConfig.privateIP}/32" ];
# sometimes there's some weirdness....??
persistentKeepalive = 25;
}
)
wgSettings.peers;
};
};
deployment.tags = [ "wg-mesh" ];
}

9
nix/my-projects.json Normal file
View file

@ -0,0 +1,9 @@
{
"website": "57c4a239da5d17eafde4ade165f3c6706639a9b4",
"blog": "ea2758dd10f29e8d66ca3f54d7303f2ac20005d2",
"slides": "0401f35c22b124b69447655f0c537badae9e223c",
"pretense": "270b01fc1118dfd713c1c41530d1a7d98f04527d",
"quotdd": "e922229e1d9e055be35dabd112bafc87a0686548",
"does-it-build": "81790825173d87f89656f66f12a123bc99e2f6f1",
"upload.files.noratrieb.dev": "0124fa5ba5446cb463fb6b3c4f52e7e6b84e5077"
}

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg UtoSFhZQ2PW1y3ifXgSdQQswoi5kdRg2gvczlEateC4
ir2FpFkYo17MGBy+C4thM4lit7vn2CiBi09DcTb6ubs
--- YvRhsfFzedjeKssmOTzHvKkvIG0zXVVCIJsRNc/LTVg
 €KîÞ$é†Prm;Û·ûÎªæ ¹Œö+é ÚqE@<40>Àv]¢Ôòm =Í™'Sm

12
nix/secrets/encrypt.sh Executable file
View file

@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -euxo pipefail
dir=$(realpath "$(dirname "$0")")
cd "$dir"
for secret in ../../secrets-git-crypt/*; do
agename="$(basename "$secret" | sed 's/\./_/').age"
rm -f "$agename"
agenix -e "$agename" < "$secret"
done

View file

@ -0,0 +1,7 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg GNYf0FjEDEqCe09mS9Hl7OIIjvhKTu8urwUPtY+yyB0
xmAtm4n3s0rfq3S5OKFEG2k/noXFTKMt8hiW5QrD9SU
--- HGBYxXQGM254m2YP5twgjgDme80f0uOL2m4uKy19ZBs
ÖÂ(
Õ×åÇÄT
‚®à±Öì{ÙõF“ü-\ƒ6{mítÏæÊMÑ-óX{‡%bQd]E³Éàü]i¸úãË}F»2¸$7¤ö#k4“;8ZžGþ_oÛ –¼

View file

@ -0,0 +1,13 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg F9aj1EmsmRSXt1m3a41zpuwFmDBOuuaIrHkqP7PTVno
tVs8Oxa9gV/HdUf0hN/JLuWhbrXI9BXIrsh5HnsKBQI
-> ssh-ed25519 pP9cdg dQdPm3OfbWl5Y8kJxmsUZ4rwpUo8w3+P3CHCiXw9VCw
9yWbGgzgBz9GICAgYiOyPtMjDk/tBb4vsOveTuYP9bw
-> ssh-ed25519 XzACZQ 4lldtotM16DN/75dRX3QEmOzfIEySHcNOlFWqymI+Rs
oOaD7dZu0xC0R7CrVpfwoBU7eSgaWyJmAZ4WptCQdes
-> ssh-ed25519 51bcvA k9eq2Tc3A9MztsdTvt3sDYUj/usYBJMp9IJQZAR67Ac
ezccfIhPZaHKsVcUrxJL7u3jSA/kCTqLmWuQfxrFQBo
-> ssh-ed25519 vT7ExA BOCylq1RqaburnXxfsl3xqAmGSJnIxVhXK8H2xeFynk
OWhqsbJgHWlo3hsRZVQgEaArK32OI25N4Poi2qJ9wQs
--- bBQkNfDI0onJOyxOJIN3Yl2jkK5iRgYbK67RWsipXOE
3‡ýåA9â¯ÒAÕînÛ¯t•y®ßÚCj-îž{ÏŇâ)ô6¬DfØOÆQ¹Ü}'_n†øÈã‡>UPêNæDRŸÀÁª¨ûÊÆþ-<2D>¾„…éÂ"‡´úÛâšÙ?À>)E0<7F>‡v(~7 C¾O\UJJüŽ$SÂ8èá`€F«˜ÄíšQ§0uÙ3õmH•Ž~P÷Ž£ŒÅLqfõ~ºi¸Æn]=rSre#²wGŒ ³¥@ß|X#éØ÷’Â

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 XzACZQ g3qlnIBoRdlhvAhDd1oLC7sdWAYGw5FobFAbOp0Eamo
FGoPMBeNp63zkvTml9cnXspAS65/G2+3hzaeMu/ack8
--- /cGmX9i8KBgLSiv0HC7QGJoF5+C6wBHbBOhoIw5iRIE
í?Þ³¸[%N+ueeá8YÀЕFÞÇkM<6B>¯x&k+jŒì­ö¹œƒfW*U4½Ìß&“d˜ymNb¬úÀ?AcœiÝ€‡„á:n}<$]˜Ã‰Œ•0E¤má=/U6-j½

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg ZIBHuyNI3wIg1GaFtgZM+ubYEM2yoaM0cbG+Pei+chY
Bp4xfIz7PzmFADD+w8fnZ73KwAojT22WADuUA3kQc8Q
--- HvjuHpMC7XvjiM/y0zgOyg080PO3BbwnSWNgbZSIUWc
Í!„C¸¼›Y>coŒ+5\‘ëÇãÚµÍjG1sF ÎPÌÝ·°Óߊ*3Ö³³œ,,ý«U¾(^;Ãøbg€egÓtÐ:–³¨Ý®`áûa_>"eù=hC¡<43>ÛËÃ_ @¹ÖÓÚ³\SCoŒ[£Ì4x&êÄÿ9€Y Œæ<C592>ÅÏ @í­Ûƒ'Kd#aä ˆ¹^Öt°Ä…

View file

@ -0,0 +1,7 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg i9WKTDnkYrTPkHzEDzbpwE0UzYvsHGNdezC43k9N6xA
mQdIAyNO+1spsyKXdu4VxF18Dlh6ORkIn8qQVew6b0E
--- JA923cG0dvBxGC7zsjdKFKZLcHvTj3PgyISIFpEsKBE
;:Á
¸â^}cÌÖÎËuœî«öd£
»Ú•LmÂTzzM0RÝa=õi Íí!Uiþíö=I%@ÙÄ…pŠ÷¬ú܉KûÑÄ[­ÖFÐm<C390>/ajx^¨cDÁ0,Ü…Ûu,Ù<>Qþßž-ϼVë

5
nix/secrets/loki_env.age Normal file
View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 XzACZQ 4ra1tOKgtlquGn8NV4e5WVP9/x3hfV86Bq7xSv3bFmI
6aPQO3Sc++l2NpmmRhPo4RcdL3bsRLcbqHF4bWfYqJg
--- b739OmteTR/Z3J3HZqcmqKYvMucyNSbTabqopToJHpY
0¤¾C¦üŠŠ€,‡]Àwj¶£ŒÕÞ<4jŠsï@ÌeW9j¸¾w|¦Y2ÛJ{ƒÕ« “»­Té3„ƒ÷<C692>ëN”áŒý6ž)Ò $&;pÑtôC9&òÓy$÷JŸ°A—Îc†I½éáGéh:ÈO±ÇÊ<C387>HøVn%Ș}r3H¸‡®§a8ÐÀ9©p5

View file

@ -0,0 +1,7 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg FPuST3lWjHKcylkh0mlRbQm8lM28wce4Bb2/rp1zu2k
cmA9aRF8zDe6YYmBCH7pOtl1FflKxwAiHtMYTQ0OWMk
-> ssh-ed25519 XzACZQ 2M5O5Rj2LAS1T9UXRYeUZrq3iBiJu/0TPOtz5yC+nyE
A1JFvr1iVj2Mc4F7/yjGxikmdAbofTuOMvI8QtyzTr4
--- 7JakO0Kuuskiup7D+cYP7OKQtld7h7salUMRoOGa88Q
´ük·5 <20> »JçOU/<2F>St!ôûk2v§òm]ß o¾ü5V Å Ò·BE!×QtÂJ!|[ÉçÀÅ5(FxÁ<zk.‰Õ;.} eѨ=˜ |,$IƒÒšÈ[ᩨ‰dªªY,

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg 0lWcSdSricBNu8i0oMnNe0gOsoDrY9DfPvmCIS63ohc
fY0M+k7xXU5nlLTSbJQF7iDevujQVxZ2lLca9CiBTaI
--- 5ObZSaeWsTlkqKq5D8vWKsrY8WCku2ndSlrjBKRtQE8
ÕóˆI™þye$Q˜÷|<7C>îÂÏÂ<C38F>h'Q1Q¥·1éCõÕòÞ€mQ:ØQ.¿ýÎS¼îžE¨=cm… ¥äŠ@ß-¤9Öj®Œó7fǺFÚTÜ"<22>oâù"|¼0€Dξ‰å™‘ÊöWÅm*ß̬õ~5â'ç›{ jÝlu„¿Á”u Òßy+„¢ö

Binary file not shown.

Binary file not shown.

Binary file not shown.

35
nix/secrets/secrets.nix Normal file
View file

@ -0,0 +1,35 @@
let
dns1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINBKoyDczFntyQyWj47Z8JeewKcCobksd415WM1W56eS";
dns2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINZ1yLdDhI2Vou/9qrPIUP8RU8Sg0WxLI2njtP5hkdL7";
vps1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII4Xj3TsDPStoHquTfOlyxShbA/kgMfQskKN8jpfiY4R";
vps2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKzt3OZAOG2sih8T9Bhoqg8ANBP5ZX60z0xmUW4cBWvX";
vps3 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHvupo7d9YMZw56qhjB+tZPijxiG1dKChLpkOWZN0Y7C";
vps4 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMpoLgBTWj1BcNxXVdM26jDBZl+BCtUTj20Wv4sZdCHz";
vps5 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBWbIznvWQSqRF1E9Gv9y7JXMy3LZxMAWj6K0Nq91kyZ";
in
{
"widetom_bot_token.age".publicKeys = [ vps1 ];
"widetom_config_toml.age".publicKeys = [ vps1 ];
"docker_registry_password.age".publicKeys = [ vps1 ];
"hugochat_db_password.age".publicKeys = [ vps1 ];
"openolat_db_password.age".publicKeys = [ vps1 ];
"minio_env_file.age".publicKeys = [ vps1 vps3 ];
"garage_secrets.age".publicKeys = [ vps1 vps2 vps3 vps4 vps5 ];
"caddy_s3_key_secret.age".publicKeys = [ vps1 vps2 vps3 vps4 vps5 ];
"registry_htpasswd.age".publicKeys = [ vps1 ];
"registry_s3_key_secret.age".publicKeys = [ vps1 ];
"grafana_admin_password.age".publicKeys = [ vps3 ];
"loki_env.age".publicKeys = [ vps3 ];
"backup_s3_secret.age".publicKeys = [ vps1 vps2 vps3 vps4 vps5 ];
"s3_mc_admin_client.age".publicKeys = [ vps1 vps2 vps3 vps4 vps5 ];
"killua_env.age".publicKeys = [ vps1 ];
"forgejo_s3_key_secret.age".publicKeys = [ vps1 ];
"upload_files_s3_secret.age".publicKeys = [ vps1 ];
"wg_private_dns1.age".publicKeys = [ dns1 ];
"wg_private_dns2.age".publicKeys = [ dns2 ];
"wg_private_vps1.age".publicKeys = [ vps1 ];
"wg_private_vps2.age".publicKeys = [ vps2 ];
"wg_private_vps3.age".publicKeys = [ vps3 ];
"wg_private_vps4.age".publicKeys = [ vps4 ];
"wg_private_vps5.age".publicKeys = [ vps5 ];
}

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg Tq8qyikECRKhPhMbKFDd+YZIGkx9uj3vOWk7QRHEkn8
wDbkM7KZWGDF3mECEa1MPPTC5F7uxe8nGtIZZkVCWU0
--- hpRMWveZaPAIS44Jr6rRGHMOQfRi7nFpN0nxHU6fPOQ
t4¼`Æò³:,P»ùÊ<C3B9>µN„?‘ij\¬È±éµ¿­uAH_Ä?PgÎ#¨ ³T+¯êRÁ-ëȺXÆ,!YeZF m»d¢À“ºø¥\4ÍbDAk×Lžk¤1RzÊÜõ˜6xo(Â8ÄgzVÍ+s|³‚ .ÃT;<3B>O¶¨M6 <0B>z¶A¨Qƒ¥öV®™¡Ÿ~„ôª§mÛN«ŠXÐI qk²ËtÔ#óÄJyºrSðuÊ?ÉîÚN Õˆbø!KsyÜ Ï, AyfWÄÑÀ##"Ë`¢™ænPÎX,$z1ðŽ (PÈÍ <0A> Ä"yû€<C3BB>|Èsð<73>TŸÂýxåBFtl!6Û‰¤ìÔ0Ùos*.H/Üoëä5<C3A4>Û­ÐlçÀ

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 LZU5Eg C3IfbvL4t0pOHEb3Bc54+r6DZESgN6K6zPDhBlDumXk
UwOtrqp8I90Vux6L7CsV5K+2SDFB8LBiyLO8ud7IsQU
--- 2tIecoG70broXFTtgjCUMcvk2RdKqpe5tihO6meI8DY
泓、kユレウ& ミ`!ヘ藪_致`<60>マ-。ヤEp^<5E>ザ#:ゥ壌]ム優mサヒy<EFBE8B>厥^O†+t8ァ€<EFBDA7>.コ鉷奘; ョ

View file

@ -0,0 +1,6 @@
age-encryption.org/v1
-> ssh-ed25519 5bWSnQ wqkRMdob+7G2mTNKySF2kiGhOKt4GLN/ne+4lM3pIwA
Iz2Brik6I6YHjVxQcoDL0UTJOWcjuiErf5kCeWpnaV0
--- 1ZkP0GiP78eGKl8te1w+o5I5kEbyPaiJFq7WGH4k1LE
á61zIìTÂU/ò5ã'|Œ½ûÊÌþìhÕ>z±ñ¶Ýr^Éwanog´lùX„º,kܶG
cÊõ¸æPÇ!Rh×»fW¥éhä §

Binary file not shown.

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 pP9cdg GI2CXAYTJWUqmab/Fnl/cFZVCCBxYZX/snQ+w0aPjSk
8D6TxN4VYH14GQJ/XhUqyfKNLjM8f3LDmykLAvtl+IM
--- 6ru8v60LKlJjpy2PnmcwBdV09KMEh+neITYyuFscSIQ
F Й¿y#<ﯗÖmàߘžº¦¼Q2Õ^T2Lâ9µ]LÄžµhž[b¼rߤ!ï³jEnS?¾ìjCRà%„ÓsŸ;mœƒ\R

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 XzACZQ pOD3jNWIufLkEVtkFJu6W0QjdzPJTK+t1MwgACv1zXU
EJQ+9xPw6MnB6nJW6nDBUlzfHyY9XlfBIQlgje+FVE4
--- BmTwJED+mJ/Qr0WFDELozwR2BgGDkHDcR2I9eSxuVn8
KêÃ~alNh.€½ù «kiAÛF*Ã/MY±ñ¡Zd†¬p”AÉ+-²Ù¬A<C2AC>¹Ü¢*S¥Z¶ï„ Nê­F­fˆbô3tª×rûÇy

View file

@ -0,0 +1,6 @@
age-encryption.org/v1
-> ssh-ed25519 51bcvA mzB9FcwUgPczK4/Rd2DZvCYoQfjT4qE+Z7HE9yHjgGU
sPDlr+YNhvbjYagyJb/kua9dWeG9tSt6KNjKh+/p+ps
--- uZVoWpqKjapTtWRGpc7cUoifwOVFfd5DU+9pQpwruuo
Fv6¾œÚËï,ø»K¶¶Ó†(ÍÝèkÙ~Y4Á.`z(]w2²MV "¼%À³JUÚ$ô•È«ÁǸCïG
_:F¸Ý§ S

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 vT7ExA WsT1cFerSGwOnhrLBTN62zydQVC1oPQxXtwQxGUSY1w
Je1zd3NJ16yaOHQD8iPX7eaPJV3WH6Z3eiDkFip/2FY
--- J6ZhIFcXF12n+pV4JEaAut/QB2c5ycYSIGo6j3nLICQ
SńOĆŤ<EFBFBD>žsIµ˝öüÜLJăŢ i—=Ům|,gőnYÖDv·ćA âd{Á·á q)~Ă3Ó!ó8¶ ·«ŢěńxPçÚńL7™"

View file

@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 qM6TYg n/6/3HfVk0IWfGRbgBB7qLkEXylLgYDxNzbLTaJWyhs
jNP6viJqbOgpNke072hDeaGmApVc51wAN/O+8Gc58U4
--- WoF4XMNOMMwKJ16Q7QrH97cGdyJ4nB4Dw04dyznfmL8
þÿ#ÙÖõ"ØLËÆi"W€µ<E282AC><>AEŒèû-?Ø•´ìæ´~Z¿\±éÞðgO¨&Ùõ¥´õÊx¤»³vÈç —¼þ¹¢&w]ý"¢¿S2VɯÁ/”É

Binary file not shown.