2025-07-03 01:13:07 -05:00
#!/usr/bin/env bash
2025-07-04 22:45:05 -05:00
# ╭────────────────────────────────────────────────────╮
# │ _ _ _ _ owo │
# │ | |__ ___| |_ __ ___ _ __| |__ ___ | |_ │
# │ | '_ \ / _ \ | '_ \ / _ \ '__| '_ \ / _ \| __| │
# │ | | | | __/ | |_) | __/ | | |_) | (_) | |_ │
# │ |_| |_|\___|_| .__/ \___|_| |_.__/ \___/ \__| │
# │ |_| │
# ╰────────────────────────────────────────────────────╯
# helperbot - synth.download's all in one script for managing everything. beep!
# =============================================================================
2025-07-07 11:38:46 -05:00
# exit immediately if an error occurs somewhere to prevent things from getting Worse
2025-07-07 17:32:00 -05:00
# may remove at some point (it interferes with return 1 and kills the entire script instead of skipping), but unsure.
2025-07-06 22:51:45 -05:00
set -e
# ╭──────────────────────────────────────╮
# │ functions and variables - start here │
# ╰──────────────────────────────────────╯
# beware of function spam following this point :3
2025-07-04 22:45:05 -05:00
# unset everything - ensure we're working with a clean state
2025-07-06 22:51:45 -05:00
all_known_variables=("synth_current_system" "synth_args_exist")
2025-07-04 22:45:05 -05:00
2025-07-06 22:51:45 -05:00
for variable in "${all_known_variables[@]}"; do
unset $variable
done
2025-07-04 22:45:05 -05:00
2025-07-06 22:51:45 -05:00
# set variable to determine if we actually received any arguments
if [ -n "$1" ]; then
synth_args_exist=1
2025-07-04 22:45:05 -05:00
fi
2025-07-07 23:06:45 -05:00
# check that all required utils are installed
2025-07-07 11:38:46 -05:00
function check_applications {
2025-07-07 23:06:45 -05:00
local all_required_applications=("bash" "wget" "tar" "zstd" "backblaze-b2")
2025-07-07 11:38:46 -05:00
for application in "${all_required_applications[@]}"; do
if [ ! -x "$(command -v $application)" ]; then
echo "${red}Error${normal}: "$application" is not installed. Please install it and try again. Beep!"
exit 1
fi
done
}
2025-07-04 22:45:05 -05:00
# attempt to detect the system based on hostname
function detect_system {
2025-07-06 22:51:45 -05:00
local valid_systems=("phosphorus" "neptunium" "cerium" "synthnix")
local current_hostname=$(hostname)
2025-07-07 11:38:46 -05:00
for system in "${valid_systems[@]}"; do
2025-07-06 22:51:45 -05:00
if [ "$current_hostname" = "$system" ]; then
synth_current_system=$system
echo "Detected ${blue}${system}${normal}."
return 0
fi
done
# report if no valid system was found
echo "${red}Failed to detect system.${normal}"
echo "We're most likely being run in an environment we don't know of."
echo "Exiting..."
exit 1
2025-07-05 00:46:35 -05:00
}
2025-07-06 22:51:45 -05:00
# defining text formatting for text output
if [[ -t 1 ]]; then
red=$(tput setaf 1);
green=$(tput setaf 2);
yellow=$(tput setaf 3);
blue=$(tput setaf 4);
pink=$(tput setaf 5);
cyan=$(tput setaf 6);
gray=$(tput setaf 8);
bold=$(tput bold)
underline=$(tput smul)
2025-07-07 23:06:45 -05:00
normal=$(tput sgr 0);
2025-07-06 22:51:45 -05:00
fi
2025-07-05 00:46:35 -05:00
2025-07-06 22:51:45 -05:00
# =============================================================================
2025-07-04 22:45:05 -05:00
# ╭───────────────────╮
# │ defining messages │
# ╰───────────────────╯
2025-07-06 22:51:45 -05:00
# yes i know spamming echo is a bad way of doing this but other actually efficient requires me to remove indenting which makes it harder to read this code for me 💔
2025-07-04 22:45:05 -05:00
# header
function header {
echo "╭────────────────╮"
echo "│ helperbot! owo │"
echo "╰────────────────╯"
echo
sleep 1 # grace period
}
# help info
function info_help {
2025-07-06 22:51:45 -05:00
echo "${pink}${bold}${underline}Usage:${normal} ${bold}helperbot${normal} [-h|-u|-b|-v] [--update-email-certs|--sync-blocklists|--update-frontends]"
echo
echo "${blue}${bold}${underline}Options:${normal}"
echo "${bold}-h${normal}, ${bold}--help${normal}"
echo " Show this help page."
echo
echo "${green}${bold}${underline}System maintenance:${normal}"
echo "${bold}-u${normal}, ${bold}--upgrade${normal}"
echo " Perform a full system upgrade, including containers and services."
2025-07-04 22:45:05 -05:00
echo
2025-07-06 22:51:45 -05:00
echo "${bold}-b${normal}, ${bold}--backup${normal}"
echo " Perform a backup of all known services."
2025-07-04 22:45:05 -05:00
echo
2025-07-06 22:51:45 -05:00
echo "${bold}-v${normal}, ${bold}--vacuum${normal}"
echo " Vacuum the postgresql databases."
2025-07-04 22:45:05 -05:00
echo
2025-07-07 11:38:46 -05:00
echo "${bold}--docker-cleanup${normal}"
echo " Cleans up and purges all unused Docker images, networks, containers and volumes."
echo
echo "${bold}--update-certs${normal}"
echo " Pull in various certificates from Caddy into ${underline}/etc/certs${normal} required for other services."
2025-07-06 22:51:45 -05:00
echo
echo "${cyan}${bold}${underline}Fediverse:${normal}"
echo "${bold}--sync-blocklists${normal}"
echo " Import blocklist from Sharkey -> Iceshrimp"
echo
echo "${bold}--update-frontends${normal}"
echo " Update standalone fediverse frontends."
echo
echo "helperbot automatically knows what to do for some actions based on this system's hostname. Beep!"
echo
echo "${yellow}${bold}This script is still generally a work-in-progress.${normal}"
2025-07-04 22:45:05 -05:00
echo "Report breakage or suggestions or improvments or whatever to here:"
2025-07-06 22:51:45 -05:00
echo "${blue}https://forged.synth.download/synth.download/synth.download${normal}"
2025-07-04 22:45:05 -05:00
echo
}
2025-07-06 22:51:45 -05:00
# invalid command message
function invalid_command {
echo "${red}Error:${normal} Invalid option \""$1"\"."
2025-07-04 22:45:05 -05:00
echo "\"helperbot is very confused... >~<\""
echo
2025-07-07 11:38:46 -05:00
echo "Run with ${bold}--help${normal} to see all options."
2025-07-06 22:51:45 -05:00
}
2025-07-04 22:45:05 -05:00
2025-07-06 22:51:45 -05:00
# root check
function root_check {
if [[ ${UID} != 0 ]]; then
echo "${red}helperbot must be run as root or with sudo permissions to perform this action!${normal} Beep!"
2025-07-07 11:38:46 -05:00
return 1
2025-07-06 22:51:45 -05:00
fi
}
# ╭─────────────────╮
# │ upgrade related │
# ╰─────────────────╯
# base system upgrade - generic steps for debian/ubuntu based systems
function base_system_upgrade {
echo "${cyan}Upgrading base system.${normal}"
echo "${blue}Doing standard apt upgrade...${normal}"
apt update
apt upgrade -y
echo "${blue}Try upgrading distro base...${normal}"
apt dist-upgrade -y
echo "${blue}Apt cleanup...${normal}"
apt clean
echo "${green}Base system upgraded!.${normal}"
}
# docker container updates
# reusable steps to update containers - upgrade_docker_container [/srv/docker] [name_of_service_or_folder] [compose.yaml]
2025-07-07 17:32:00 -05:00
function update_docker_container {
2025-07-06 22:51:45 -05:00
if [ -d "$1/$2" ]; then
# pull the container
cd "$1"/"$2" && docker compose -f "$1/$2/$3" pull
docker compose -f "$1/$2/$3" down && docker compose -f "$1/$2/$3" up -d
else
echo "${red}docker:${normal} Folder $1/$2 does not exist."
fi
}
2025-07-04 22:45:05 -05:00
# ╭──────────────╮
# │ upgrade step │
# ╰──────────────╯
2025-07-06 22:51:45 -05:00
function system_upgrade {
2025-07-05 00:46:35 -05:00
#timestamp=$(date +'%Y%m%d%H%M%S')
#synth_upgrade_log=/tmp/upgrade-output-${timestamp}.txt
echo "${blue}upgrade:${normal} Running full system upgrade for ${green}${synth_current_system}${normal}."
#echo "Upgrade will be logged into ${yellow}${synth_upgrade_log}${normal} if needed." # logging doesn't work properly - check on later
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
2025-07-04 22:45:05 -05:00
# apt/system related upgrade
base_system_upgrade
# docker
2025-07-07 17:32:00 -05:00
update_docker_container "/srv/docker" "sharkey" "compose.yaml"
update_docker_container "/srv/docker" "iceshrimp" "compose.yaml"
update_docker_container "/srv/docker" "mastodon" "compose.yaml"
update_docker_container "/srv/docker" "pds" "compose.yaml"
2025-07-04 22:45:05 -05:00
# done
echo "${green}System upgrade finished! beep!~${normal}"
2025-07-05 00:46:35 -05:00
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
# apt/system related upgrade
base_system_upgrade
# docker
2025-07-07 17:32:00 -05:00
update_docker_container "/srv/docker" "mailserver" "compose.yaml"
update_docker_container "/srv/docker" "ejabberd" "compose.yaml"
update_docker_container "/srv/docker" "zitadel" "compose.yaml"
update_docker_container "/srv/docker" "forgejo" "compose.yaml"
update_docker_container "/srv/docker" "forgejo" "compose-runner.yaml"
update_docker_container "/srv/docker" "freshrss" "compose.yaml"
update_docker_container "/srv/docker" "vaultwarden" "compose.yaml"
update_docker_container "/srv/docker" "ask-js" "compose.yaml"
2025-07-05 00:46:35 -05:00
# done
echo "${green}System upgrade finished! beep!~${normal}"
elif [ "$synth_current_system" = "cerium" ]; then # cerium
# apt/system related upgrade
base_system_upgrade
# docker
upgrade_docker_container "/srv/docker" "redlib" "compose.yaml"
upgrade_docker_container "/srv/docker" "safetwitch" "compose.yaml"
# done
echo "${green}System upgrade finished! beep!~${normal}"
echo "${red}Rebooting system.${normal}"
sleep 1 && systemctl reboot
elif [ "$synth_current_system" = "synthnix" ]; then # synthnix
# apt/system related upgrade
base_system_upgrade
# done
echo "${green}System upgrade finished! beep!~${normal}"
fi
2025-07-06 22:51:45 -05:00
}
# ╭────────────────╮
# │ backup related │
# ╰────────────────╯
# mostly just symlinks to commands because i think it looks less ugly (and easier to update)
2025-07-07 11:38:46 -05:00
# TODO:
# https://askubuntu.com/questions/193055/how-to-make-files-accessible-only-by-root
# make secrets on server only accessible by root.
# maybe make a command to autocheck this?? idk
2025-07-06 22:51:45 -05:00
# psql vacuuming
# reusable step to vacuum databases - postgres_vacuum [postgres-db-1] [user_and_db_name] [password]
function postgres_vacuum {
# load postgres passwords
if [ -f /etc/secrets/postgres.env ]; then
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
else
echo "${red}postgres_vacuum:${normal} Postgresql Secrets don't exist. Exiting..."
2025-07-07 11:38:46 -05:00
return 1
2025-07-06 22:51:45 -05:00
fi
# vacuum
docker exec -it "$1" /bin/bash -c "POSTGRES_PASSWORD="$3" psql -U "$2" -d "$2" -c 'VACUUM ANALYZE;'"
# unset secrets
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
}
# postgres_vacuum_self
function postgres_vacuum_self {
2025-07-07 11:38:46 -05:00
# load postgres passwords
if [ -f /etc/secrets/postgres.env ]; then
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
else
echo "${red}postgres_vacuum_self:${normal} Postgresql Secrets don't exist. Exiting..."
return 1
fi
# vacuum self
2025-07-06 22:51:45 -05:00
docker exec -it postgres-db-1 /bin/bash -c "psql -U postgres -c 'VACUUM ANALYZE;'"
2025-07-07 11:38:46 -05:00
# unset secrets
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
2025-07-06 22:51:45 -05:00
}
# psql backup
# reusable step to backup databases - postgres_backup [postgres-db-1] [user_and_db_name] [output_name] [$backup_working_directory]
function postgres_backup {
# for some reason, doing a dump *doesn't* require a password apparently. huh
docker exec "$1" /bin/bash -c "pg_dump "$2" --username "$2" > "$3".sql"
docker cp "$1":/$3.sql $4/$3/$3.sql
docker exec "$1" /bin/bash -c "rm "$3".sql"
}
# redis snapshot
# tells redis to make a snapshot - redis_snapshot [whatever-redis-1]
function redis_snapshot {
docker exec $1 redis-cli SAVE
}
# b2 upload
# load secrets then start uploading to backblaze b2 - b2_upload [$backup_working_directory] [$backup_output_tar]
function b2_upload {
# load in secrets from external file
if [ -f /etc/secrets/b2.env ]; then
export $(grep -v '^#' /etc/secrets/b2.env | xargs)
else
echo "${red}b2_upload:${normal} B2 Secrets don't exist. Exiting..."
2025-07-07 11:38:46 -05:00
return 1
2025-07-06 22:51:45 -05:00
fi
# upload file specified
backblaze-b2 authorize-account $B2_KEYID $B2_SECRET
backblaze-b2 upload-file $B2_BACKUP_BUCKET ""$1"/"$2".zst" ""$2".zst"
backblaze-b2 clear-account # just to ensure we won't stay authenticated afterwards
# clear out secrets
unset $(grep -v '^#' /etc/secrets/b2.env | sed -E 's/(.*)=.*/\1/' | xargs)
}
2025-07-05 00:46:35 -05:00
# ╭─────────────╮
# │ backup step │
# ╰─────────────╯
2025-07-07 23:06:45 -05:00
# TODO: it's probably possible to throw some of these steps into an array in loop or something, if that's even a good idea
2025-07-06 22:51:45 -05:00
function system_backup {
2025-07-05 00:46:35 -05:00
echo "${blue}backup:${normal} Running full system backup for ${green}${synth_current_system}${normal}."
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
2025-07-06 22:51:45 -05:00
# variables - could probably be set locally but unsure how much this will dynamically change between systems
2025-07-05 00:46:35 -05:00
backup_local_folder=/srv/docker
backup_working_directory=/var/backups/phosphorus
backup_output_tar=phosphorus.tar
2025-07-06 22:51:45 -05:00
# =============================================================================
# initial steps - cleanup then create
rm -fr $backup_working_directory/*
2025-07-05 00:46:35 -05:00
mkdir -p $backup_working_directory
2025-07-06 22:51:45 -05:00
# =============================================================================
# call in database vacuuming function
echo "${blue}Calling in vacuuming...${normal}"
system_vacuum
2025-07-05 00:46:35 -05:00
# =============================================================================
2025-07-07 11:38:46 -05:00
# backup files - postgres (we just want to keep a copy of the compose file)
echo "${blue}Pulling in Postgres configurations...${normal}"
mkdir -p $backup_working_directory/postgres
cp -r $backup_local_folder/postgres/compose.yaml $backup_working_directory/postgres
# =============================================================================
# sharkey
2025-07-05 00:46:35 -05:00
echo "${blue}Pulling in Sharkey...${normal}"
mkdir -p $backup_working_directory/sharkey/.config
# database
postgres_backup postgres-db-1 misskey sharkey $backup_working_directory
# redis
redis_snapshot sharkey-redis-1
cp -r $backup_local_folder/sharkey/redis $backup_working_directory/sharkey
# configs, extra
2025-07-07 11:38:46 -05:00
cp $backup_local_folder/sharkey/compose.yaml $backup_working_directory/sharkey
2025-07-05 00:46:35 -05:00
cp -r $backup_local_folder/sharkey/.config $backup_working_directory/sharkey
# =============================================================================
# iceshrimp
echo "${blue}Pulling in Iceshrimp...${normal}"
mkdir -p $backup_working_directory/iceshrimp/config
# database
postgres_backup postgres-db-1 iceshrimp iceshrimp $backup_working_directory
# configs, extra
2025-07-07 11:38:46 -05:00
cp $backup_local_folder/iceshrimp/compose.yaml $backup_working_directory/iceshrimp
2025-07-05 00:46:35 -05:00
cp -r $backup_local_folder/iceshrimp/config $backup_working_directory/iceshrimp
# =============================================================================
# mastodon
echo "${blue}Pulling in Mastodon...${normal}"
mkdir -p $backup_working_directory/mastodon/.config
# database
postgres_backup postgres-db-1 mastodon mastodon $backup_working_directory
# redis
redis_snapshot mastodon-redis-1
cp -r $backup_local_folder/mastodon/redis $backup_working_directory/mastodon
# configs, extra
2025-07-07 11:38:46 -05:00
cp $backup_local_folder/mastodon/compose.yaml $backup_working_directory/mastodon
2025-07-05 00:46:35 -05:00
cp -r $backup_local_folder/mastodon/.config $backup_working_directory/mastodon
# =============================================================================
# pds
echo "${blue}Pulling in PDS...${normal}"
mkdir -p $backup_working_directory/pds
# there isn't a native way to "backup" the pds, so we shut it off and copy it
docker compose -f $backup_local_folder/pds/compose.yaml down
cp -r $backup_local_folder/pds/pds $backup_working_directory/pds
docker compose -f $backup_local_folder/pds/compose.yaml up -d
# configs, extra
2025-07-07 11:38:46 -05:00
cp $backup_local_folder/pds/compose.yaml $backup_working_directory/pds
2025-07-05 00:46:35 -05:00
# =============================================================================
2025-07-06 22:51:45 -05:00
# pull in any other common configs and secrets
echo "${blue}Pulling in other configurations...${normal}"
mkdir -p $backup_working_directory/other/etc/caddy
mkdir -p $backup_working_directory/other/etc/secrets
cp /etc/caddy/Caddyfile $backup_working_directory/other/etc/caddy/Caddyfile
cp -r /etc/secrets/* $backup_working_directory/other/etc/secrets/
# =============================================================================
# archive and compress everything
echo "${blue}Compressing everything into one archive...${normal}"
tar -cf "$backup_working_directory/$backup_output_tar" $backup_working_directory # create the archive
zstd -z -T3 -9 --rm "$backup_working_directory/$backup_output_tar" # compress the archive
# TODO: it may be possible to combine these steps so tar automatically compresses the archive with zstd instead of doing it separately
# =============================================================================
2025-07-07 11:38:46 -05:00
# upload backup to backblaze
2025-07-06 22:51:45 -05:00
echo "${blue}Uploading backup...${normal}"
2025-07-07 11:38:46 -05:00
b2_upload ${backup_working_directory} ${backup_output_tar}.zst
2025-07-06 22:51:45 -05:00
# =============================================================================
# cleanup
echo "${blue}Cleaning up...${normal}"
rm -fr ${backup_working_directory}/${backup_output_tar}.zst $backup_working_directory/*
# =============================================================================
# unload secrets - we already unload them for each vacuum/upload step, but we want to ensure they are
2025-07-05 00:46:35 -05:00
unset $(grep -v '^#' /etc/secrets/b2.env | sed -E 's/(.*)=.*/\1/' | xargs)
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
2025-07-07 11:38:46 -05:00
# variables - could probably be set locally but unsure how much this will dynamically change between systems
backup_local_folder=/srv/docker
backup_working_directory=/var/backups/neptunium
backup_output_tar=neptunium.tar
# =============================================================================
# initial steps - cleanup then create
rm -fr $backup_working_directory/*
mkdir -p $backup_working_directory
# =============================================================================
# call in database vacuuming function
echo "${blue}Calling in vacuuming...${normal}"
system_vacuum
# =============================================================================
# backup files - postgres (we just want to keep a copy of the compose file)
echo "${blue}Pulling in Postgres configurations...${normal}"
mkdir -p $backup_working_directory/postgres
cp -r $backup_local_folder/postgres/compose.yaml $backup_working_directory/postgres
# =============================================================================
# backup files - zitadel
echo "${blue}Pulling in ZITADEL...${normal}"
mkdir -p $backup_working_directory/zitadel
# database
postgres_backup postgres-db-1 zitadel zitadel $backup_working_directory
# configs, extra
cp $backup_local_folder/zitadel/compose.yaml $backup_working_directory/zitadel
# =============================================================================
# freshrss
echo "${blue}Pulling in FreshRSS...${normal}"
mkdir -p $backup_working_directory/freshrss
# database
postgres_backup postgres-db-1 freshrss freshrss $backup_working_directory
cp -r $backup_local_folder/freshrss/data $backup_working_directory/freshrss
# configs, extra
cp -r $backup_local_folder/freshrss/extensions $backup_working_directory/freshrss
cp $backup_local_folder/freshrss/compose.yaml $backup_working_directory/freshrss
# =============================================================================
# vaultwarden
echo "${blue}Pulling in Vaultwarden...${normal}"
mkdir -p $backup_working_directory/vaultwarden/.config
# data - similar case to the pds, there isn't a native way to make a backup
docker compose -f $backup_local_folder/vaultwarden/compose.yaml down
cp -r $backup_local_folder/vaultwarden/vw-data $backup_working_directory/vaultwarden
docker compose -f $backup_local_folder/vaultwarden/compose.yaml up -d
# configs, extra
cp $backup_local_folder/vaultwarden/compose.yaml $backup_working_directory/vaultwarden
# =============================================================================
# mailserver - TEMP: eventually we'll migrate to mailu and this will need to be changed!!!!!!
echo "${blue}Pulling in mailserver...${normal}"
mkdir -p $backup_working_directory/mailserver/docker-data
# data - once again - no native way to make a backup
docker compose -f $backup_local_folder/mailserver/compose.yaml down
cp -r $backup_local_folder/mailserver/docker-data/dms $backup_working_directory/mailserver/docker-data
docker compose -f $backup_local_folder/mailserver/compose.yaml up -d
# configs, extra
cp $backup_local_folder/mailserver/compose.yaml $backup_working_directory/mailserver
cp $backup_local_folder/mailserver/mailserver.env $backup_working_directory/mailserver
# =============================================================================
# ejabberd
echo "${blue}Pulling in ejabberd...${normal}"
mkdir -p $backup_working_directory/ejabberd
# database
postgres_backup postgres-db-1 ejabberd ejabberd $backup_working_directory
cp -r $backup_local_folder/ejabberd/files $backup_working_directory/ejabberd
# configs, extra
cp $backup_local_folder/ejabberd/compose.yaml $backup_working_directory/ejabberd
cp -r $backup_local_folder/ejabberd/conf $backup_working_directory/ejabberd
# =============================================================================
# forgejo
echo "${blue}Pulling in Forgejo...${normal}"
mkdir -p $backup_working_directory/forgejo
# database
postgres_backup postgres-db-1 forgejo forgejo $backup_working_directory
cp -r $backup_local_folder/forgejo/forgejo $backup_working_directory/forgejo
# configs, extra
cp $backup_local_folder/forgejo/compose.yaml $backup_working_directory/forgejo
# =============================================================================
# freshrss
echo "${blue}Pulling in Ask-js...${normal}"
mkdir -p $backup_working_directory/ask-js
# database
postgres_backup postgres-db-1 askjs ask-js $backup_working_directory
# configs, extra
cp $backup_local_folder/ask-js/compose.yaml $backup_working_directory/ask-js
cp $backup_local_folder/ask-js/config.json $backup_working_directory/ask-js
# =============================================================================
# pull in any other common configs and secrets
echo "${blue}Pulling in other configurations...${normal}"
mkdir -p $backup_working_directory/other/etc/caddy
mkdir -p $backup_working_directory/other/etc/secrets
mkdir -p $backup_working_directory/other/var/www/mta-sts/.well-known/
cp /etc/caddy/Caddyfile $backup_working_directory/other/etc/caddy/Caddyfile
cp -r /etc/secrets/* $backup_working_directory/other/etc/secrets/
cp /var/www/mta-sts/.well-known/mta-sts.txt $backup_working_directory/other/var/www/mta-sts/.well-known
# =============================================================================
# archive and compress everything
echo "${blue}Compressing everything into one archive...${normal}"
tar -cf "$backup_working_directory/$backup_output_tar" $backup_working_directory # create the archive
zstd -z -T3 -9 --rm "$backup_working_directory/$backup_output_tar" # compress the archive
# TODO: it may be possible to combine these steps so tar automatically compresses the archive with zstd instead of doing it separately
# =============================================================================
# upload backup to backblaze
echo "${blue}Uploading backup...${normal}"
b2_upload ${backup_working_directory} ${backup_output_tar}.zst
# =============================================================================
# cleanup
echo "${blue}Cleaning up...${normal}"
rm -fr ${backup_working_directory}/${backup_output_tar}.zst $backup_working_directory/*
# =============================================================================
# unload secrets - we already unload them for each vacuum/upload step, but we want to ensure they are
unset $(grep -v '^#' /etc/secrets/b2.env | sed -E 's/(.*)=.*/\1/' | xargs)
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
2025-07-05 00:46:35 -05:00
elif [ "$synth_current_system" = "cerium" ]; then # cerium
2025-07-06 22:51:45 -05:00
postgres_vacuum_self
2025-07-05 00:46:35 -05:00
elif [ "$synth_current_system" = "synthnix" ]; then # synthnix
# as synthnix doesn't really include much and serves as a place for members
# we just need to back up the home directory here
#
# WIP
2025-07-06 22:51:45 -05:00
echo "wip"
2025-07-04 22:45:05 -05:00
fi
2025-07-05 00:46:35 -05:00
echo "${green}System backup finished! beep!~${normal}"
2025-07-06 22:51:45 -05:00
}
# ╭─────────────╮
# │ vacuum step │
# ╰─────────────╯
function system_vacuum {
echo "${blue}vacuum:${normal} Running database vacuums for ${green}${synth_current_system}${normal}."
# vacuum
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
postgres_vacuum_self
postgres_vacuum postgres-db-1 misskey ${SHARKEY_POSTGRES_PASSWORD}
postgres_vacuum postgres-db-1 iceshrimp ${ICESHRIMP_POSTGRES_PASSWORD}
postgres_vacuum postgres-db-1 mastodon ${MASTODON_POSTGRES_PASSWORD}
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
postgres_vacuum_self
2025-07-07 23:13:32 -05:00
postgres_vacuum postgres-db-1 forgejo ${FORGEJO_POSTGRES_PASSWORD}
postgres_vacuum postgres-db-1 ejabberd ${EJABBERD_POSTGRES_PASSWORD}
postgres_vacuum postgres-db-1 askjs ${ASKJS_POSTGRES_PASSWORD}
postgres_vacuum postgres-db-1 freshrss ${FRESHRSS_POSTGRES_PASSWORD}
postgres_vacuum postgres-db-1 zitadel ${ZITADEL_POSTGRES_PASSWORD}
2025-07-06 22:51:45 -05:00
elif [ "$synth_current_system" = "cerium" ]; then # cerium
2025-07-07 11:38:46 -05:00
echo "${blue}vacuum:${normal} ${green}${synth_current_system}${normal} doesn't have anything to vacuum."
2025-07-06 22:51:45 -05:00
elif [ "$synth_current_system" = "synthnix" ]; then # synthnix
2025-07-07 11:38:46 -05:00
echo "${blue}vacuum:${normal} ${green}${synth_current_system}${normal} doesn't have anything to vacuum."
2025-07-06 22:51:45 -05:00
fi
2025-07-07 11:38:46 -05:00
# unload secrets - they already should be, but we want to ensure they are
2025-07-06 22:51:45 -05:00
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
echo "${green}Vacuuming complete! Beep!~${normal}${normal}"
}
2025-07-04 22:45:05 -05:00
2025-07-07 11:38:46 -05:00
# ╭────────────────╮
# │ docker cleanup │
# ╰────────────────╯
function docker_cleanup {
# check if docker exists on the system
if [ ! -x "$(command -v docker)" ]; then
echo "${red}docker-cleanup:${normal} $synth_current_system does not include Docker."
return 1
fi
# prune everything that isn't running/not tied to any existing container
# this is usually dangerous but everything we have is already running 24/7 or has (important) data stored outside of an erasable volume
echo "${blue}docker-cleanup:${normal} Cleaning up Docker..."
docker image prune -af
docker volume prune -af
docker container prune -f
docker network prune -f
echo "${green}Done. Beep!${normal}"
}
# ╭─────────────────────╮
# │ update certificates │
# ╰─────────────────────╯
# (for context: certificates are handled automatically by caddy. we just pull them out of caddy's special home directory to make some of them accessible to other services we run like email and xmpp)
function update_certificates {
# internal values - caddy's home may change at random
local caddy_home_directory=/var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory
local known_domains=("synth.download" "mx1.synth.download" "xmpp.synth.download" "muc.xmpp.synth.download" "upload.xmpp.synth.download" "proxy.xmpp.synth.download" "pubsub.xmpp.synth.download")
local certs_location=/etc/certs
echo "${blue}update-certs:${normal} Pulling certificates..."
mkdir -p $certs_location # it already should exist - but lets make sure
if [ -d "$caddy_home_directory" ]; then
for domain in "${known_domains[@]}"; do
cp $caddy_home_directory/$domain/$domain.crt $certs_location/$domain.crt
cp $caddy_home_directory/$domain/$domain.key $certs_location/$domain.key
done
# ensure permissions are set correctly
chmod 755 $certs_location
chmod 755 $certs_location/*
# done
echo "${green}Certificates pulled! beep!~${normal}"
return 0
else
echo "${red}update-certs:${normal} Failed to detect Caddy's home directory."
return 1
fi
}
2025-07-07 23:06:45 -05:00
# ╭───────────────────╮
# │ fediverse related │
# ╰───────────────────╯
# sync blocklists from sharkey to iceshrimp
function fedi_sync_blocklists {
local sharkey_instance=localhost:60628
local iceshrimp_instance=localhost:24042
# this command will only work on phosphorus, where the instances are directly being run from
# (it doesn't *have* to, but it's much faster to do it over localhost than domain)
if [[ "$synth_current_system" != "phosphorus" ]]; then
echo "${red}sync-blocklists:${normal} Sorry, this command will only work on phosphorus."
return 1
fi
echo "${blue}sync-blocklists:${normal} Syncing blocks from Sharkey to Iceshrimp..."
if [[ ${UID} == 0 ]]; then
echo
echo "${yellow}${bold}Notice:${normal} This command has been ran as root! For the sake of safety and Security™ reasons, please run this command as your standard user."
echo "If not already configured for yourself, please put a file under ${bold}\$XDG_DATA_HOME/fedi-tokens.env${normal} (directly) which should include the following, replacing the information as needed:"
echo "${gray}──────────────────────────────────────────────${normal}"
echo "MK_TOKEN=your_admin_misskey_token_here"
echo "SHRIMP_TOKEN=your_admin_iceshrimp_token_here"
return 1
else
if [ -f "$XDG_DATA_HOME/fedi-tokens.env" ]; then
# load keys
export $(grep -v '^#' $XDG_DATA_HOME/fedi-tokens.env | xargs)
# grab the instance data from sharkey
json_data=$(curl -s "http://"$sharkey_instance"/api/admin/meta" -H "Content-Type: application/json" -X POST -d "{\"i\": \"$MK_TOKEN\"}")
# throw into a loop to block all instances on iceshrimp
echo "$json_data" | jq -r -c '.blockedHosts[]' | while read -r host; do
curl -w "HTTP %{response_code} " "http://"$iceshrimp_instance"/api/iceshrimp/admin/instances/$host/block?imported=true&reason=Synced%20from%20booping.synth.download" -H "Host: beeping.synth.download" -H "Authorization: Bearer $SHRIMP_TOKEN" -X POST
echo Blocked host: $host
done
# unset keys
unset $(grep -v '^#' /etc/secrets/fedi-tokens.env | sed -E 's/(.*)=.*/\1/' | xargs)
# done
echo "${green}Done syncing blocks.${normal}"
else
echo "${red}sync-blocklists:${normal} $XDG_DATA_HOME/fedi-tokens.env doesn't exist."
return 1
fi
fi
}
# update standalone frontends
function fedi_update_frontends {
local mastodon_fe_url=https://github.com/sneexy-boi/synth.download/releases/latest/download/masto-fe.zip
local akkoma_fe_url=https://github.com/sneexy-boi/synth.download/releases/latest/download/akkoma-fe.zip
local phanpy_url=https://github.com/cheeaun/phanpy/releases/latest/download/phanpy-dist.zip
local pl_fe_url=https://pl.mkljczk.pl/pl-fe.zip
local frontend_folder=/var/www/fedi-frontends
# frontends are on neptunium so it's less hassle to forward with caddy
if [[ "$synth_current_system" != "neptunium" ]]; then
echo "${red}sync-blocklists:${normal} Sorry, this command will only work on neptunium."
return 1
fi
# _repeat_process [$frontend_fe_url] [file_name] [folder_name]
function _repeat_process {
wget "$1" -O /tmp/"$2".zip
unzip -o /tmp/"$2" -d $frontend_folder/"$3"
rm /tmp/"$2"
}
# TODO: it's probably possible to turn this into an array loop of some sort
echo "${blue}update-frontends:${normal} Updating standalone frontends..."
if [ -f "$frontend_folder" ]; then
# update mastodon
echo "${blue}Updating Mastodon...${normal}"
_repeat_process $mastodon_fe_url masto-fe chuckya-fe
echo "${green}Okay.${normal}"
# update akkoma
echo "${blue}Updating Akkoma...${normal}"
_repeat_process $akkoma_fe_url akkoma-fe akkoma-fe
echo "${green}Okay.${normal}"
# update phanpy
echo "${blue}Updating Phanpy...${normal}"
_repeat_process $phanpy_url phanpy phanpy
echo "${green}Okay.${normal}"
# update pl-fe
echo "${blue}Updating pl-fe...${normal}"
_repeat_process $pl_fe_url pl-fe pl-fe
echo "${green}Okay.${normal}"
else
echo "${red}update-frontends:${normal} $frontend_folder doesn't exist."
return 1
fi
}
2025-07-06 22:51:45 -05:00
# ╭────────────────────────────────────╮
# │ functions and variables - end here │
# ╰────────────────────────────────────╯
# =============================================================================
# ╭──────────────╮
# │ main program │
# ╰──────────────╯
# display the header
header
2025-07-07 11:38:46 -05:00
# check that everything we need is installed
#check_applications
2025-07-06 22:51:45 -05:00
# evaluate arguments and set environment variables to enable each command and see what should be executed
while [ -n "$1" ]; do
case "$1" in
-h | --help) # display help info
info_help
exit 0;;
-u | --upgrade) # upgrade system
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
system_upgrade;;
-b | --backup) # backup system
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
system_backup;;
-v | --vacuum) # vacuum database
root_check
if [ ! -v synth_current_system ]; then
detect_system
2025-07-07 11:38:46 -05:00
fi
2025-07-06 22:51:45 -05:00
system_vacuum;;
2025-07-07 11:38:46 -05:00
--docker-cleanup) # docker cleanup
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
docker_cleanup;;
--update-certs) # email certificates
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
update_certificates;;
2025-07-07 23:06:45 -05:00
--sync-blocklists) # fediverse: sync sharkey -> iceshrimp blocklists
if [ ! -v synth_current_system ]; then
detect_system
fi
fedi_sync_blocklists;;
--update-frontends) # fediverse: update standalone frontends
root_check
if [ ! -v synth_current_system ]; then
detect_system
fi
fedi_update_frontends;;
2025-07-06 22:51:45 -05:00
*) # invalid option was given
invalid_command $1
exit 1;;
esac
shift 1
done
# show help if we didn't recieve commands either
if [ ! -v synth_args_exist ]; then
info_help
exit 0
fi
2025-07-04 22:45:05 -05:00
# unset everything
2025-07-06 22:51:45 -05:00
for variable in "${all_known_variables[@]}"; do
unset $variable
done