work more on helperbot (complete more of backup steps, add docker cleanup, add certificate pulling

This commit is contained in:
Ruben 2025-07-07 11:38:46 -05:00
commit c78e81188c
No known key found for this signature in database
GPG key ID: 8EA836555FB6D9A5

283
helperbot
View file

@ -12,7 +12,7 @@
# =============================================================================
# exit immediately if an error occurs somewhere to prevent Fucked Up Shit
# exit immediately if an error occurs somewhere to prevent things from getting Worse
set -e
# ╭──────────────────────────────────────╮
@ -32,12 +32,24 @@ if [ -n "$1" ]; then
synth_args_exist=1
fi
# check that everything we need is installed
function check_applications {
local all_required_applications=("bash" "docker" "backblaze-b2")
for application in "${all_required_applications[@]}"; do
if [ ! -x "$(command -v $application)" ]; then
echo "${red}Error${normal}: "$application" is not installed. Please install it and try again. Beep!"
exit 1
fi
done
}
# attempt to detect the system based on hostname
function detect_system {
local valid_systems=("phosphorus" "neptunium" "cerium" "synthnix")
local current_hostname=$(hostname)
for variable in "${all_known_variables[@]}"; do
for system in "${valid_systems[@]}"; do
if [ "$current_hostname" = "$system" ]; then
synth_current_system=$system
echo "Detected ${blue}${system}${normal}."
@ -100,8 +112,11 @@ function info_help {
echo "${bold}-v${normal}, ${bold}--vacuum${normal}"
echo " Vacuum the postgresql databases."
echo
echo "${bold}--update-email-certs${normal}"
echo " Pull the email/XMPP certificates from Caddy into ${underline}/etc/certs${normal}."
echo "${bold}--docker-cleanup${normal}"
echo " Cleans up and purges all unused Docker images, networks, containers and volumes."
echo
echo "${bold}--update-certs${normal}"
echo " Pull in various certificates from Caddy into ${underline}/etc/certs${normal} required for other services."
echo
echo "${cyan}${bold}${underline}Fediverse:${normal}"
echo "${bold}--sync-blocklists${normal}"
@ -123,34 +138,14 @@ function invalid_command {
echo "${red}Error:${normal} Invalid option \""$1"\"."
echo "\"helperbot is very confused... >~<\""
echo
echo "Run with --help to see all options."
}
# attempt to detect the system based on hostname
function detect_system {
local valid_systems=("phosphorus" "neptunium" "cerium" "synthnix")
local current_hostname=$(hostname)
for system in "${valid_systems[@]}"; do
if [ "$current_hostname" = "$system" ]; then
synth_current_system=$system
echo "Detected ${blue}${system}${normal}."
return 0
fi
done
# report if no valid system was found
echo "${red}Failed to detect system.${normal}"
echo "We're most likely being run in an environment we don't know of."
echo "Exiting..."
exit 1
echo "Run with ${bold}--help${normal} to see all options."
}
# root check
function root_check {
if [[ ${UID} != 0 ]]; then
echo "${red}helperbot must be run as root or with sudo permissions to perform this action!${normal} Beep!"
exit 1
return 1
fi
}
@ -238,6 +233,11 @@ function system_upgrade {
# ╰────────────────╯
# mostly just symlinks to commands because i think it looks less ugly (and easier to update)
# TODO:
# https://askubuntu.com/questions/193055/how-to-make-files-accessible-only-by-root
# make secrets on server only accessible by root.
# maybe make a command to autocheck this?? idk
# psql vacuuming
# reusable step to vacuum databases - postgres_vacuum [postgres-db-1] [user_and_db_name] [password]
function postgres_vacuum {
@ -246,7 +246,7 @@ function postgres_vacuum {
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
else
echo "${red}postgres_vacuum:${normal} Postgresql Secrets don't exist. Exiting..."
exit 1
return 1
fi
# vacuum
docker exec -it "$1" /bin/bash -c "POSTGRES_PASSWORD="$3" psql -U "$2" -d "$2" -c 'VACUUM ANALYZE;'"
@ -256,7 +256,17 @@ function postgres_vacuum {
# postgres_vacuum_self
function postgres_vacuum_self {
# load postgres passwords
if [ -f /etc/secrets/postgres.env ]; then
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
else
echo "${red}postgres_vacuum_self:${normal} Postgresql Secrets don't exist. Exiting..."
return 1
fi
# vacuum self
docker exec -it postgres-db-1 /bin/bash -c "psql -U postgres -c 'VACUUM ANALYZE;'"
# unset secrets
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
}
# psql backup
@ -282,7 +292,7 @@ function b2_upload {
export $(grep -v '^#' /etc/secrets/b2.env | xargs)
else
echo "${red}b2_upload:${normal} B2 Secrets don't exist. Exiting..."
exit 1
return 1
fi
# upload file specified
backblaze-b2 authorize-account $B2_KEYID $B2_SECRET
@ -302,7 +312,6 @@ function system_backup {
backup_local_folder=/srv/docker
backup_working_directory=/var/backups/phosphorus
backup_output_tar=phosphorus.tar
backup_media_output_tar=fedi_media_backups.tar
# =============================================================================
# initial steps - cleanup then create
rm -fr $backup_working_directory/*
@ -312,7 +321,12 @@ function system_backup {
echo "${blue}Calling in vacuuming...${normal}"
system_vacuum
# =============================================================================
# backup files - sharkey
# backup files - postgres (we just want to keep a copy of the compose file)
echo "${blue}Pulling in Postgres configurations...${normal}"
mkdir -p $backup_working_directory/postgres
cp -r $backup_local_folder/postgres/compose.yaml $backup_working_directory/postgres
# =============================================================================
# sharkey
echo "${blue}Pulling in Sharkey...${normal}"
mkdir -p $backup_working_directory/sharkey/.config
# database
@ -321,7 +335,7 @@ function system_backup {
redis_snapshot sharkey-redis-1
cp -r $backup_local_folder/sharkey/redis $backup_working_directory/sharkey
# configs, extra
cp -r $backup_local_folder/sharkey/compose.yaml $backup_working_directory/sharkey
cp $backup_local_folder/sharkey/compose.yaml $backup_working_directory/sharkey
cp -r $backup_local_folder/sharkey/.config $backup_working_directory/sharkey
# =============================================================================
# iceshrimp
@ -330,7 +344,7 @@ function system_backup {
# database
postgres_backup postgres-db-1 iceshrimp iceshrimp $backup_working_directory
# configs, extra
cp -r $backup_local_folder/iceshrimp/compose.yaml $backup_working_directory/iceshrimp
cp $backup_local_folder/iceshrimp/compose.yaml $backup_working_directory/iceshrimp
cp -r $backup_local_folder/iceshrimp/config $backup_working_directory/iceshrimp
# =============================================================================
# mastodon
@ -342,7 +356,7 @@ function system_backup {
redis_snapshot mastodon-redis-1
cp -r $backup_local_folder/mastodon/redis $backup_working_directory/mastodon
# configs, extra
cp -r $backup_local_folder/mastodon/compose.yaml $backup_working_directory/mastodon
cp $backup_local_folder/mastodon/compose.yaml $backup_working_directory/mastodon
cp -r $backup_local_folder/mastodon/.config $backup_working_directory/mastodon
# =============================================================================
# pds
@ -353,7 +367,7 @@ function system_backup {
cp -r $backup_local_folder/pds/pds $backup_working_directory/pds
docker compose -f $backup_local_folder/pds/compose.yaml up -d
# configs, extra
cp -r $backup_local_folder/pds/compose.yaml $backup_working_directory/pds
cp $backup_local_folder/pds/compose.yaml $backup_working_directory/pds
# =============================================================================
# pull in any other common configs and secrets
echo "${blue}Pulling in other configurations...${normal}"
@ -368,9 +382,9 @@ function system_backup {
zstd -z -T3 -9 --rm "$backup_working_directory/$backup_output_tar" # compress the archive
# TODO: it may be possible to combine these steps so tar automatically compresses the archive with zstd instead of doing it separately
# =============================================================================
# upload backup to backblaze - secrets used here are fetched from b2.env
# upload backup to backblaze
echo "${blue}Uploading backup...${normal}"
b2_upload $backup_working_directory $backup_output_tar
b2_upload ${backup_working_directory} ${backup_output_tar}.zst
# =============================================================================
# cleanup
echo "${blue}Cleaning up...${normal}"
@ -380,7 +394,117 @@ function system_backup {
unset $(grep -v '^#' /etc/secrets/b2.env | sed -E 's/(.*)=.*/\1/' | xargs)
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
postgres_vacuum_self
# variables - could probably be set locally but unsure how much this will dynamically change between systems
backup_local_folder=/srv/docker
backup_working_directory=/var/backups/neptunium
backup_output_tar=neptunium.tar
# =============================================================================
# initial steps - cleanup then create
rm -fr $backup_working_directory/*
mkdir -p $backup_working_directory
# =============================================================================
# call in database vacuuming function
echo "${blue}Calling in vacuuming...${normal}"
system_vacuum
# =============================================================================
# backup files - postgres (we just want to keep a copy of the compose file)
echo "${blue}Pulling in Postgres configurations...${normal}"
mkdir -p $backup_working_directory/postgres
cp -r $backup_local_folder/postgres/compose.yaml $backup_working_directory/postgres
# =============================================================================
# backup files - zitadel
echo "${blue}Pulling in ZITADEL...${normal}"
mkdir -p $backup_working_directory/zitadel
# database
postgres_backup postgres-db-1 zitadel zitadel $backup_working_directory
# configs, extra
cp $backup_local_folder/zitadel/compose.yaml $backup_working_directory/zitadel
# =============================================================================
# freshrss
echo "${blue}Pulling in FreshRSS...${normal}"
mkdir -p $backup_working_directory/freshrss
# database
postgres_backup postgres-db-1 freshrss freshrss $backup_working_directory
cp -r $backup_local_folder/freshrss/data $backup_working_directory/freshrss
# configs, extra
cp -r $backup_local_folder/freshrss/extensions $backup_working_directory/freshrss
cp $backup_local_folder/freshrss/compose.yaml $backup_working_directory/freshrss
# =============================================================================
# vaultwarden
echo "${blue}Pulling in Vaultwarden...${normal}"
mkdir -p $backup_working_directory/vaultwarden/.config
# data - similar case to the pds, there isn't a native way to make a backup
docker compose -f $backup_local_folder/vaultwarden/compose.yaml down
cp -r $backup_local_folder/vaultwarden/vw-data $backup_working_directory/vaultwarden
docker compose -f $backup_local_folder/vaultwarden/compose.yaml up -d
# configs, extra
cp $backup_local_folder/vaultwarden/compose.yaml $backup_working_directory/vaultwarden
# =============================================================================
# mailserver - TEMP: eventually we'll migrate to mailu and this will need to be changed!!!!!!
echo "${blue}Pulling in mailserver...${normal}"
mkdir -p $backup_working_directory/mailserver/docker-data
# data - once again - no native way to make a backup
docker compose -f $backup_local_folder/mailserver/compose.yaml down
cp -r $backup_local_folder/mailserver/docker-data/dms $backup_working_directory/mailserver/docker-data
docker compose -f $backup_local_folder/mailserver/compose.yaml up -d
# configs, extra
cp $backup_local_folder/mailserver/compose.yaml $backup_working_directory/mailserver
cp $backup_local_folder/mailserver/mailserver.env $backup_working_directory/mailserver
# =============================================================================
# ejabberd
echo "${blue}Pulling in ejabberd...${normal}"
mkdir -p $backup_working_directory/ejabberd
# database
postgres_backup postgres-db-1 ejabberd ejabberd $backup_working_directory
cp -r $backup_local_folder/ejabberd/files $backup_working_directory/ejabberd
# configs, extra
cp $backup_local_folder/ejabberd/compose.yaml $backup_working_directory/ejabberd
cp -r $backup_local_folder/ejabberd/conf $backup_working_directory/ejabberd
# =============================================================================
# forgejo
echo "${blue}Pulling in Forgejo...${normal}"
mkdir -p $backup_working_directory/forgejo
# database
postgres_backup postgres-db-1 forgejo forgejo $backup_working_directory
cp -r $backup_local_folder/forgejo/forgejo $backup_working_directory/forgejo
# configs, extra
cp $backup_local_folder/forgejo/compose.yaml $backup_working_directory/forgejo
# =============================================================================
# freshrss
echo "${blue}Pulling in Ask-js...${normal}"
mkdir -p $backup_working_directory/ask-js
# database
postgres_backup postgres-db-1 askjs ask-js $backup_working_directory
# configs, extra
cp $backup_local_folder/ask-js/compose.yaml $backup_working_directory/ask-js
cp $backup_local_folder/ask-js/config.json $backup_working_directory/ask-js
# =============================================================================
# pull in any other common configs and secrets
echo "${blue}Pulling in other configurations...${normal}"
mkdir -p $backup_working_directory/other/etc/caddy
mkdir -p $backup_working_directory/other/etc/secrets
mkdir -p $backup_working_directory/other/var/www/mta-sts/.well-known/
cp /etc/caddy/Caddyfile $backup_working_directory/other/etc/caddy/Caddyfile
cp -r /etc/secrets/* $backup_working_directory/other/etc/secrets/
cp /var/www/mta-sts/.well-known/mta-sts.txt $backup_working_directory/other/var/www/mta-sts/.well-known
# =============================================================================
# archive and compress everything
echo "${blue}Compressing everything into one archive...${normal}"
tar -cf "$backup_working_directory/$backup_output_tar" $backup_working_directory # create the archive
zstd -z -T3 -9 --rm "$backup_working_directory/$backup_output_tar" # compress the archive
# TODO: it may be possible to combine these steps so tar automatically compresses the archive with zstd instead of doing it separately
# =============================================================================
# upload backup to backblaze
echo "${blue}Uploading backup...${normal}"
b2_upload ${backup_working_directory} ${backup_output_tar}.zst
# =============================================================================
# cleanup
echo "${blue}Cleaning up...${normal}"
rm -fr ${backup_working_directory}/${backup_output_tar}.zst $backup_working_directory/*
# =============================================================================
# unload secrets - we already unload them for each vacuum/upload step, but we want to ensure they are
unset $(grep -v '^#' /etc/secrets/b2.env | sed -E 's/(.*)=.*/\1/' | xargs)
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
elif [ "$synth_current_system" = "cerium" ]; then # cerium
postgres_vacuum_self
elif [ "$synth_current_system" = "synthnix" ]; then # synthnix
@ -406,13 +530,6 @@ function backup_create_copy {
# ╰─────────────╯
function system_vacuum {
echo "${blue}vacuum:${normal} Running database vacuums for ${green}${synth_current_system}${normal}."
# external files containing secrets
if [ -f /etc/secrets/postgres.env ]; then
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
else
echo "${red}vacuum:${normal} Secrets don't exist. Exiting..."
exit 1
fi
# vacuum
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
postgres_vacuum_self
@ -421,20 +538,69 @@ function system_vacuum {
postgres_vacuum postgres-db-1 mastodon ${MASTODON_POSTGRES_PASSWORD}
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
postgres_vacuum_self
postgres_vacuum forgejo ${FORGEJO_POSTGRES_PASSWORD}
postgres_vacuum ejabberd ${EJABBERD_POSTGRES_PASSWORD}
postgres_vacuum askjs ${ASKJS_POSTGRES_PASSWORD}
postgres_vacuum freshrss ${FRESHRSS_POSTGRES_PASSWORD}
postgres_vacuum zitadel ${ZITADEL_POSTGRES_PASSWORD}
elif [ "$synth_current_system" = "cerium" ]; then # cerium
postgres_vacuum_self
echo "${blue}vacuum:${normal} ${green}${synth_current_system}${normal} doesn't have anything to vacuum."
elif [ "$synth_current_system" = "synthnix" ]; then # synthnix
# as synthnix doesn't really include much and serves as a place for members
# we just need to back up the home directory here
#
# WIP
echo "wip"
echo "${blue}vacuum:${normal} ${green}${synth_current_system}${normal} doesn't have anything to vacuum."
fi
# unload secrets - if we pass that they do exist, no need to check if they exist here again
# unload secrets - they already should be, but we want to ensure they are
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
echo "${green}Vacuuming complete! Beep!~${normal}${normal}"
}
# ╭────────────────╮
# │ docker cleanup │
# ╰────────────────╯
function docker_cleanup {
# check if docker exists on the system
if [ ! -x "$(command -v docker)" ]; then
echo "${red}docker-cleanup:${normal} $synth_current_system does not include Docker."
return 1
fi
# prune everything that isn't running/not tied to any existing container
# this is usually dangerous but everything we have is already running 24/7 or has (important) data stored outside of an erasable volume
echo "${blue}docker-cleanup:${normal} Cleaning up Docker..."
docker image prune -af
docker volume prune -af
docker container prune -f
docker network prune -f
echo "${green}Done. Beep!${normal}"
}
# ╭─────────────────────╮
# │ update certificates │
# ╰─────────────────────╯
# (for context: certificates are handled automatically by caddy. we just pull them out of caddy's special home directory to make some of them accessible to other services we run like email and xmpp)
function update_certificates {
# internal values - caddy's home may change at random
local caddy_home_directory=/var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory
local known_domains=("synth.download" "mx1.synth.download" "xmpp.synth.download" "muc.xmpp.synth.download" "upload.xmpp.synth.download" "proxy.xmpp.synth.download" "pubsub.xmpp.synth.download")
local certs_location=/etc/certs
echo "${blue}update-certs:${normal} Pulling certificates..."
mkdir -p $certs_location # it already should exist - but lets make sure
if [ -d "$caddy_home_directory" ]; then
for domain in "${known_domains[@]}"; do
cp $caddy_home_directory/$domain/$domain.crt $certs_location/$domain.crt
cp $caddy_home_directory/$domain/$domain.key $certs_location/$domain.key
done
# ensure permissions are set correctly
chmod 755 $certs_location
chmod 755 $certs_location/*
# done
echo "${green}Certificates pulled! beep!~${normal}"
return 0
else
echo "${red}update-certs:${normal} Failed to detect Caddy's home directory."
return 1
fi
}
# ╭────────────────────────────────────╮
# │ functions and variables - end here │
# ╰────────────────────────────────────╯
@ -448,6 +614,9 @@ function system_vacuum {
# display the header
header
# check that everything we need is installed
#check_applications
# evaluate arguments and set environment variables to enable each command and see what should be executed
while [ -n "$1" ]; do
case "$1" in
@ -472,6 +641,18 @@ while [ -n "$1" ]; do
detect_system
fi
system_vacuum;;
--docker-cleanup) # docker cleanup
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
docker_cleanup;;
--update-certs) # email certificates
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
update_certificates;;
*) # invalid option was given
invalid_command $1
exit 1;;