898 lines
42 KiB
Bash
Executable file
898 lines
42 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
|
|
# ╭────────────────────────────────────────────────────╮
|
|
# │ _ _ _ _ owo │
|
|
# │ | |__ ___| |_ __ ___ _ __| |__ ___ | |_ │
|
|
# │ | '_ \ / _ \ | '_ \ / _ \ '__| '_ \ / _ \| __| │
|
|
# │ | | | | __/ | |_) | __/ | | |_) | (_) | |_ │
|
|
# │ |_| |_|\___|_| .__/ \___|_| |_.__/ \___/ \__| │
|
|
# │ |_| │
|
|
# ╰────────────────────────────────────────────────────╯
|
|
# helperbot - synth.download's all in one script for managing everything. beep!
|
|
|
|
# =============================================================================
|
|
|
|
# exit immediately if an error occurs somewhere to prevent things from getting Worse
|
|
# may remove at some point (it interferes with return 1 and kills the entire script instead of skipping), but unsure.
|
|
set -e
|
|
|
|
# ╭──────────────────────────────────────╮
|
|
# │ functions and variables - start here │
|
|
# ╰──────────────────────────────────────╯
|
|
# beware of function spam following this point :3
|
|
|
|
# unset everything - ensure we're working with a clean state
|
|
all_known_variables=("synth_current_system" "synth_args_exist")
|
|
|
|
for variable in "${all_known_variables[@]}"; do
|
|
unset $variable
|
|
done
|
|
|
|
# set variable to determine if we actually received any arguments
|
|
if [ -n "$1" ]; then
|
|
synth_args_exist=1
|
|
fi
|
|
|
|
# check that all required utils are installed
|
|
function check_applications {
|
|
local all_required_applications=("bash" "wget" "tar" "unzip" "zstd" "backblaze-b2" "jq")
|
|
|
|
for application in "${all_required_applications[@]}"; do
|
|
if [ ! -x "$(command -v $application)" ]; then
|
|
echo "${red}Error${normal}: "$application" is not installed. Please install it and try again. Beep!"
|
|
exit 1
|
|
fi
|
|
done
|
|
}
|
|
|
|
# attempt to detect the system based on hostname
|
|
function detect_system {
|
|
local valid_systems=("phosphorus" "neptunium")
|
|
local current_hostname=$(hostname)
|
|
|
|
for system in "${valid_systems[@]}"; do
|
|
if [ "$current_hostname" = "$system" ]; then
|
|
synth_current_system=$system
|
|
echo "Detected ${blue}${system}${normal}."
|
|
return 0
|
|
fi
|
|
done
|
|
|
|
# report if no valid system was found
|
|
echo "${red}Failed to detect system.${normal}"
|
|
echo "We're most likely being run in an environment we don't know of."
|
|
echo "Exiting..."
|
|
exit 1
|
|
}
|
|
|
|
# defining text formatting for text output
|
|
if [[ -t 1 ]]; then
|
|
red=$(tput setaf 1);
|
|
green=$(tput setaf 2);
|
|
yellow=$(tput setaf 3);
|
|
blue=$(tput setaf 4);
|
|
pink=$(tput setaf 5);
|
|
cyan=$(tput setaf 6);
|
|
gray=$(tput setaf 8);
|
|
bold=$(tput bold)
|
|
underline=$(tput smul)
|
|
normal=$(tput sgr 0);
|
|
fi
|
|
|
|
# =============================================================================
|
|
|
|
# ╭───────────────────╮
|
|
# │ defining messages │
|
|
# ╰───────────────────╯
|
|
# yes i know spamming echo is a bad way of doing this but other actually efficient requires me to remove indenting which makes it harder to read this code for me 💔
|
|
|
|
# header
|
|
function header {
|
|
echo "╭────────────────╮"
|
|
echo "│ helperbot! owo │"
|
|
echo "╰────────────────╯"
|
|
echo
|
|
sleep 1 # grace period
|
|
}
|
|
|
|
# help info
|
|
function info_help {
|
|
echo "${pink}${bold}${underline}Usage:${normal} ${bold}helperbot${normal} [-h|-u|-b|-v] [--update-email-certs|--sync-blocklists|--update-frontends]"
|
|
echo
|
|
echo "${blue}${bold}${underline}Options:${normal}"
|
|
echo "${bold}-h${normal}, ${bold}--help${normal}"
|
|
echo " Show this help page."
|
|
echo
|
|
echo "${green}${bold}${underline}System maintenance:${normal}"
|
|
echo "${bold}-u${normal}, ${bold}--upgrade${normal}"
|
|
echo " Perform a full system upgrade, including containers and services."
|
|
echo
|
|
echo "${bold}-b${normal}, ${bold}--backup${normal}"
|
|
echo " Perform a backup of all known services."
|
|
echo
|
|
echo "${bold}-v${normal}, ${bold}--vacuum${normal}"
|
|
echo " Vacuum the postgresql databases."
|
|
echo
|
|
echo "${bold}-r${normal}, ${bold}--reboot${normal}"
|
|
echo " Restart the system. Used if needed during command chains."
|
|
echo
|
|
echo "${bold}--psql${normal}"
|
|
echo " Enter the postgresql shell."
|
|
echo
|
|
echo "${bold}--docker-cleanup${normal}"
|
|
echo " Cleans up and purges all unused Docker images, networks, containers and volumes."
|
|
echo
|
|
echo "${bold}--update-certs${normal}"
|
|
echo " Pull in various certificates from Caddy into ${underline}/etc/certs${normal} required for other services."
|
|
echo
|
|
echo "${bold}--update-helperbot${normal}"
|
|
echo " Create a backup of the current helperbot into ${underline}/usr/local/bin/helperbot.bak${normal} and does an in-place upgrade of itself."
|
|
echo
|
|
echo "${cyan}${bold}${underline}Caddy:${normal}"
|
|
echo "${bold}-cr${normal}, ${bold}--caddy-reload${normal}"
|
|
echo " (Live-)Reload the Caddy configurations."
|
|
echo
|
|
echo "${bold}-cre${normal}, ${bold}--caddy-restart${normal}"
|
|
echo " Restart the Caddy container."
|
|
echo
|
|
echo "${cyan}${bold}${underline}Fediverse:${normal}"
|
|
echo "${bold}--sync-blocklists${normal}"
|
|
echo " Import blocklist from Sharkey -> Iceshrimp"
|
|
echo
|
|
echo "${bold}--update-frontends${normal}"
|
|
echo " Update standalone fediverse frontends."
|
|
echo
|
|
echo "helperbot automatically knows what to do for some actions based on this system's hostname. Beep!"
|
|
echo
|
|
echo "${yellow}${bold}This script is still generally a work-in-progress.${normal}"
|
|
echo "Report breakage or suggestions or improvments or whatever to here:"
|
|
echo "${blue}https://forged.synth.download/synth.download/synth.download${normal}"
|
|
echo
|
|
}
|
|
|
|
# invalid command message
|
|
function invalid_command {
|
|
echo "${red}Error:${normal} Invalid option \""$1"\"."
|
|
echo "\"helperbot is very confused... >~<\""
|
|
echo
|
|
echo "Run with ${bold}--help${normal} to see all options."
|
|
}
|
|
|
|
# root check
|
|
function root_check {
|
|
if [[ ${UID} != 0 ]]; then
|
|
echo "${red}helperbot must be run as root or with sudo permissions to perform this action!${normal} Beep!"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# ╭─────────────────╮
|
|
# │ upgrade related │
|
|
# ╰─────────────────╯
|
|
|
|
# base system upgrade - generic steps for debian/ubuntu based systems
|
|
function base_system_upgrade {
|
|
echo "${cyan}Upgrading base system.${normal}"
|
|
echo "${blue}Doing standard apt upgrade...${normal}"
|
|
apt update
|
|
apt upgrade -y
|
|
echo "${blue}Try upgrading distro base...${normal}"
|
|
apt dist-upgrade -y
|
|
echo "${blue}Apt cleanup...${normal}"
|
|
apt clean
|
|
echo "${green}Base system upgraded!.${normal}"
|
|
}
|
|
|
|
# docker container updates
|
|
# reusable steps to update containers - upgrade_docker_container [/srv/docker] [name_of_service_or_folder] [compose.yaml]
|
|
function update_docker_container {
|
|
if [ -d "$1/$2" ]; then
|
|
# pull the container
|
|
cd "$1"/"$2" && docker compose -f "$1/$2/$3" pull
|
|
else
|
|
echo "${red}docker:${normal} Folder $1/$2 does not exist."
|
|
fi
|
|
}
|
|
|
|
# deno install - just runs the demo script which will install/update deno, only used for neptunium currently for pds-dash
|
|
function deno_install {
|
|
if [ "$synth_current_system" = "neptunium" ]; then # only used on neptunium
|
|
echo "${blue}Running Deno installer.${normal}"
|
|
curl -fsSL https://deno.land/install.sh | sudo DENO_INSTALL=/usr/local sh -s -- -y # we assume deno has some error checking and with our script config will automatically stop for any issues
|
|
echo "${green}Okay.${normal}"
|
|
fi
|
|
}
|
|
|
|
# ╭──────────────╮
|
|
# │ upgrade step │
|
|
# ╰──────────────╯
|
|
function system_upgrade {
|
|
#timestamp=$(date +'%Y%m%d%H%M%S')
|
|
#synth_upgrade_log=/tmp/upgrade-output-${timestamp}.txt
|
|
echo "${blue}upgrade:${normal} Running full system upgrade for ${green}${synth_current_system}${normal}."
|
|
#echo "Upgrade will be logged into ${yellow}${synth_upgrade_log}${normal} if needed." # logging doesn't work properly - check on later
|
|
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
|
|
# apt/system related upgrade
|
|
base_system_upgrade
|
|
# docker
|
|
update_docker_container "/srv/docker" "aode" "compose.yaml"
|
|
update_docker_container "/srv/docker" "sharkey" "compose.yaml"
|
|
update_docker_container "/srv/docker" "iceshrimp" "compose.yaml"
|
|
# mastodon - requires db migrations to be executed manually
|
|
update_docker_container "/srv/docker" "mastodon" "compose.yaml"
|
|
cd /srv/docker/mastodon && \
|
|
docker compose run --rm -e SKIP_POST_DEPLOYMENT_MIGRATIONS=true web bundle exec rails db:migrate && \
|
|
docker compose down && docker compose up -d && \
|
|
docker compose run --rm web bundle exec rails db:migrate && \
|
|
docker compose down && docker compose up -d
|
|
# ========
|
|
update_docker_container "/srv/docker" "pds" "compose.yaml"
|
|
# done
|
|
echo "${green}System upgrade finished! beep!~${normal}"
|
|
echo "${yellow}Restart the system/selective services to complete upgrades.${normal}"
|
|
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
|
|
# apt/system related upgrade
|
|
base_system_upgrade
|
|
deno_install
|
|
# docker
|
|
update_docker_container "/srv/docker" "ask-js" "compose.yaml"
|
|
update_docker_container "/srv/docker" "caddy" "compose.yaml"
|
|
update_docker_container "/srv/docker" "ejabberd" "compose.yaml"
|
|
update_docker_container "/srv/docker" "forgejo" "compose.yaml"
|
|
update_docker_container "/srv/docker" "freshrss" "compose.yaml"
|
|
update_docker_container "/srv/docker" "mailserver" "compose.yaml"
|
|
update_docker_container "/srv/docker" "redlib" "compose.yaml"
|
|
#update_docker_container "/srv/docker" "safetwitch" "compose.yaml"
|
|
update_docker_container "/srv/docker" "vaultwarden" "compose.yaml"
|
|
update_docker_container "/srv/docker" "zitadel" "compose.yaml"
|
|
update_docker_container "/srv/docker/forgejo" "runner" "compose.yaml"
|
|
# sites/others
|
|
echo "${blue}Pulling main site...${normal}"
|
|
if [ ! -x "$(command -v npm)" ]; then # npm is required - don't continue if it's not installed
|
|
echo "${red}Error(site)${normal}: npm is not installed. Please install it and try again. Beep!"
|
|
exit 1
|
|
fi
|
|
cd /var/www/site && git pull && \
|
|
npm install && \
|
|
npm run build-clean
|
|
# pds-dash
|
|
echo "${blue}Updating pds-dash...${normal}"
|
|
cd /var/www/pds-dash && git pull && \
|
|
deno install && \
|
|
deno task build
|
|
# pdsadmin-web
|
|
echo "${blue}Updating pdsadmin-web...${normal}"
|
|
if [ ! -x "$(command -v pnpm)" ]; then # pnpm is used here - npm DOES work, but rather use what the devs want
|
|
echo "${red}Error(pdsadmin-web)${normal}: pnpm is not installed. Please install it and try again. Beep!"
|
|
exit 1
|
|
fi
|
|
cd /var/www/pdsadmin-web && git pull && \
|
|
pnpm i && \
|
|
pnpm build
|
|
# done
|
|
echo "${green}System upgrade finished! beep!~${normal}"
|
|
echo "${yellow}Restart the system/selective services to complete upgrades.${normal}"
|
|
fi
|
|
}
|
|
|
|
# ╭────────────────╮
|
|
# │ backup related │
|
|
# ╰────────────────╯
|
|
# mostly just symlinks to commands because i think it looks less ugly (and easier to update)
|
|
|
|
# TODO:
|
|
# https://askubuntu.com/questions/193055/how-to-make-files-accessible-only-by-root
|
|
# make secrets on server only accessible by root.
|
|
# maybe make a command to autocheck this?? idk
|
|
|
|
# psql vacuuming
|
|
# reusable step to vacuum databases - postgres_vacuum [postgres-db-1] [user_and_db_name] [password]
|
|
function postgres_vacuum {
|
|
# load postgres passwords
|
|
if [ -f /etc/secrets/postgres.env ]; then
|
|
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
|
|
else
|
|
echo "${red}postgres_vacuum:${normal} Postgresql Secrets don't exist. Exiting..."
|
|
return 1
|
|
fi
|
|
# vacuum
|
|
docker exec "$1" /bin/bash -c "POSTGRES_PASSWORD="$3" psql -U "$2" -d "$2" -c 'VACUUM ANALYZE;'"
|
|
# unset secrets
|
|
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
|
|
}
|
|
|
|
# postgres_vacuum_self
|
|
function postgres_vacuum_self {
|
|
# load postgres passwords
|
|
if [ -f /etc/secrets/postgres.env ]; then
|
|
export $(grep -v '^#' /etc/secrets/postgres.env | xargs)
|
|
else
|
|
echo "${red}postgres_vacuum_self:${normal} Postgresql Secrets don't exist. Exiting..."
|
|
return 1
|
|
fi
|
|
# vacuum self
|
|
docker exec postgres-db-1 /bin/bash -c "psql -U postgres -c 'VACUUM ANALYZE;'"
|
|
# unset secrets
|
|
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
|
|
}
|
|
|
|
# psql backup
|
|
# reusable step to backup databases - postgres_backup [postgres-db-1] [user_and_db_name] [output_name] [$backup_working_directory]
|
|
function postgres_backup {
|
|
# for some reason, doing a dump *doesn't* require a password apparently. huh
|
|
docker exec "$1" /bin/bash -c "pg_dump "$2" --username "$2" > /mnt/exp/"$3".sql"
|
|
mv /srv/docker/postgres/exp/$3.sql $4/$3/$3.sql # hardcoded value should be fine but maybe make this dynamic?
|
|
}
|
|
|
|
# redis snapshot
|
|
# tells redis to make a snapshot - redis_snapshot [whatever-redis-1] [port]
|
|
function redis_snapshot {
|
|
docker exec $1 redis-cli -p $2 SAVE
|
|
}
|
|
|
|
# b2 upload
|
|
# load secrets then start uploading to backblaze b2 - b2_upload [$backup_working_directory] [$backup_output_tar]
|
|
function b2_upload {
|
|
# load in secrets from external file
|
|
if [ -f /etc/secrets/b2.env ]; then
|
|
export $(grep -v '^#' /etc/secrets/b2.env | xargs)
|
|
else
|
|
echo "${red}b2_upload:${normal} B2 Secrets don't exist. Exiting..."
|
|
return 1
|
|
fi
|
|
# upload file specified
|
|
backblaze-b2 authorize-account $B2_KEYID $B2_SECRET
|
|
backblaze-b2 upload-file $B2_BACKUP_BUCKET "$1"/"$2" "$2"
|
|
backblaze-b2 clear-account # just to ensure we won't stay authenticated afterwards
|
|
# clear out secrets
|
|
unset $(grep -v '^#' /etc/secrets/b2.env | sed -E 's/(.*)=.*/\1/' | xargs)
|
|
}
|
|
|
|
# ╭─────────────╮
|
|
# │ backup step │
|
|
# ╰─────────────╯
|
|
# TODO: it's probably possible to throw some of these steps into an array in loop or something, if that's even a good idea
|
|
function system_backup {
|
|
echo "${blue}backup:${normal} Running full system backup for ${green}${synth_current_system}${normal}."
|
|
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
|
|
# variables - could probably be set locally but unsure how much this will dynamically change between systems
|
|
backup_local_folder=/srv/docker
|
|
backup_working_directory=/var/backups/phosphorus
|
|
backup_output_tar=phosphorus.tar
|
|
# =============================================================================
|
|
# initial steps - cleanup then create
|
|
rm -fr $backup_working_directory/*
|
|
mkdir -p $backup_working_directory
|
|
# =============================================================================
|
|
# backup files - postgres (we just want to keep a copy of the compose file)
|
|
echo "${blue}Pulling in Postgres configurations...${normal}"
|
|
mkdir -p $backup_working_directory/postgres
|
|
cp -r $backup_local_folder/postgres/compose.yaml $backup_working_directory/postgres
|
|
# =============================================================================
|
|
# sharkey
|
|
echo "${blue}Pulling in Sharkey...${normal}"
|
|
mkdir -p $backup_working_directory/sharkey
|
|
# database
|
|
postgres_backup postgres-db-1 misskey sharkey $backup_working_directory
|
|
# dragonflydb
|
|
redis_snapshot sharkey-dfdb-1 6379
|
|
cp -r $backup_local_folder/sharkey/dfdb $backup_working_directory/sharkey/dfdb
|
|
# configs, extra
|
|
cp $backup_local_folder/sharkey/compose.yaml $backup_working_directory/sharkey
|
|
cp $backup_local_folder/sharkey/default.yml $backup_working_directory/sharkey
|
|
cp $backup_local_folder/sharkey/.env.secrets $backup_working_directory/sharkey
|
|
# service configs
|
|
cp -r $backup_local_folder/sharkey/api $backup_working_directory/sharkey/api
|
|
cp -r $backup_local_folder/sharkey/worker $backup_working_directory/sharkey/worker
|
|
cp -r $backup_local_folder/sharkey/activity $backup_working_directory/sharkey/activity
|
|
cp -r $backup_local_folder/sharkey/media $backup_working_directory/sharkey/media
|
|
# =============================================================================
|
|
# iceshrimp
|
|
echo "${blue}Pulling in Iceshrimp...${normal}"
|
|
mkdir -p $backup_working_directory/iceshrimp/config
|
|
# database
|
|
postgres_backup postgres-db-1 iceshrimp iceshrimp $backup_working_directory
|
|
# configs, extra
|
|
cp $backup_local_folder/iceshrimp/compose.yaml $backup_working_directory/iceshrimp
|
|
cp -r $backup_local_folder/iceshrimp/config $backup_working_directory/iceshrimp
|
|
# =============================================================================
|
|
# mastodon
|
|
echo "${blue}Pulling in Mastodon...${normal}"
|
|
mkdir -p $backup_working_directory/mastodon/.config
|
|
# database
|
|
postgres_backup postgres-db-1 mastodon mastodon $backup_working_directory
|
|
# dragonflydb
|
|
redis_snapshot mastodon-dragonfly-1 6379
|
|
cp -r $backup_local_folder/mastodon/dragonfly $backup_working_directory/mastodon/dragonfly
|
|
# configs, extra
|
|
cp $backup_local_folder/mastodon/compose.yaml $backup_working_directory/mastodon
|
|
cp $backup_local_folder/mastodon/.env $backup_working_directory/mastodon
|
|
cp $backup_local_folder/mastodon/.env.secrets $backup_working_directory/mastodon
|
|
# =============================================================================
|
|
# pds
|
|
echo "${blue}Pulling in PDS...${normal}"
|
|
mkdir -p $backup_working_directory/pds
|
|
# there isn't a native way to "backup" the pds, so we shut it off and copy it
|
|
docker compose -f $backup_local_folder/pds/compose.yaml down
|
|
cp -r $backup_local_folder/pds/pds $backup_working_directory/pds
|
|
docker compose -f $backup_local_folder/pds/compose.yaml up -d
|
|
# configs, extra
|
|
cp $backup_local_folder/pds/compose.yaml $backup_working_directory/pds
|
|
# =============================================================================
|
|
# aoderelay
|
|
echo "${blue}Pulling in AodeRelay...${normal}"
|
|
mkdir -p $backup_working_directory/aode
|
|
# also no native way to "backup" the data, so repeat the steps as like the pds
|
|
docker compose -f $backup_local_folder/aode/compose.yaml down
|
|
cp -r $backup_local_folder/aode/relay $backup_working_directory/aode
|
|
docker compose -f $backup_local_folder/aode/compose.yaml up -d
|
|
# configs, extra
|
|
cp $backup_local_folder/aode/compose.yaml $backup_working_directory/aode
|
|
# =============================================================================
|
|
# pull in any other common configs and secrets
|
|
echo "${blue}Pulling in other configurations...${normal}"
|
|
mkdir -p $backup_working_directory/other/etc/secrets
|
|
cp -r /etc/secrets/* $backup_working_directory/other/etc/secrets/
|
|
# =============================================================================
|
|
# archive and compress everything
|
|
echo "${blue}Compressing everything into one archive...${normal}"
|
|
tar -cf "$backup_working_directory/$backup_output_tar" $backup_working_directory # create the archive
|
|
zstd -z -T3 -9 --rm "$backup_working_directory/$backup_output_tar" # compress the archive
|
|
# TODO: it may be possible to combine these steps so tar automatically compresses the archive with zstd instead of doing it separately
|
|
# =============================================================================
|
|
# upload backup to backblaze
|
|
echo "${blue}Uploading backup...${normal}"
|
|
b2_upload ${backup_working_directory} ${backup_output_tar}.zst
|
|
# =============================================================================
|
|
# cleanup
|
|
echo "${blue}Cleaning up...${normal}"
|
|
rm -fr ${backup_working_directory}/${backup_output_tar}.zst $backup_working_directory/*
|
|
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
|
|
# variables - could probably be set locally but unsure how much this will dynamically change between systems
|
|
backup_local_folder=/srv/docker
|
|
backup_working_directory=/var/backups/neptunium
|
|
backup_output_tar=neptunium.tar
|
|
# =============================================================================
|
|
# initial steps - cleanup then create
|
|
rm -fr $backup_working_directory/*
|
|
mkdir -p $backup_working_directory
|
|
# =============================================================================
|
|
# call in database vacuuming function
|
|
echo "${blue}Calling in vacuuming...${normal}"
|
|
system_vacuum
|
|
# =============================================================================
|
|
# backup files - postgres (we just want to keep a copy of the compose file)
|
|
echo "${blue}Pulling in Postgres configurations...${normal}"
|
|
mkdir -p $backup_working_directory/postgres
|
|
cp -r $backup_local_folder/postgres/compose.yaml $backup_working_directory/postgres
|
|
# =============================================================================
|
|
# backup files - zitadel
|
|
echo "${blue}Pulling in ZITADEL...${normal}"
|
|
mkdir -p $backup_working_directory/zitadel
|
|
# database
|
|
postgres_backup postgres-db-1 zitadel zitadel $backup_working_directory
|
|
# configs, extra
|
|
cp $backup_local_folder/zitadel/compose.yaml $backup_working_directory/zitadel
|
|
cp $backup_local_folder/zitadel/.env $backup_working_directory/zitadel
|
|
# =============================================================================
|
|
# freshrss
|
|
echo "${blue}Pulling in FreshRSS...${normal}"
|
|
mkdir -p $backup_working_directory/freshrss
|
|
# database
|
|
postgres_backup postgres-db-1 freshrss freshrss $backup_working_directory
|
|
cp -r $backup_local_folder/freshrss/data $backup_working_directory/freshrss
|
|
# configs, extra
|
|
cp -r $backup_local_folder/freshrss/extensions $backup_working_directory/freshrss
|
|
cp $backup_local_folder/freshrss/compose.yaml $backup_working_directory/freshrss
|
|
cp $backup_local_folder/freshrss/.env $backup_working_directory/freshrss
|
|
# =============================================================================
|
|
# vaultwarden
|
|
echo "${blue}Pulling in Vaultwarden...${normal}"
|
|
mkdir -p $backup_working_directory/vaultwarden/.config
|
|
# data - similar case to the pds, there isn't a native way to make a backup
|
|
docker compose -f $backup_local_folder/vaultwarden/compose.yaml down
|
|
cp -r $backup_local_folder/vaultwarden/vw-data $backup_working_directory/vaultwarden
|
|
docker compose -f $backup_local_folder/vaultwarden/compose.yaml up -d
|
|
# configs, extra
|
|
cp $backup_local_folder/vaultwarden/compose.yaml $backup_working_directory/vaultwarden
|
|
# =============================================================================
|
|
# mailserver
|
|
echo "${blue}Pulling in mailserver...${normal}"
|
|
mkdir -p $backup_working_directory/mailserver/docker-data
|
|
# data - once again - no native way to make a backup
|
|
docker compose -f $backup_local_folder/mailserver/compose.yaml down
|
|
cp -r $backup_local_folder/mailserver/docker-data/dms $backup_working_directory/mailserver/docker-data
|
|
docker compose -f $backup_local_folder/mailserver/compose.yaml up -d
|
|
# configs, extra
|
|
cp $backup_local_folder/mailserver/compose.yaml $backup_working_directory/mailserver
|
|
cp $backup_local_folder/mailserver/mailserver.env $backup_working_directory/mailserver
|
|
# =============================================================================
|
|
# ejabberd
|
|
echo "${blue}Pulling in ejabberd...${normal}"
|
|
mkdir -p $backup_working_directory/ejabberd
|
|
# database
|
|
postgres_backup postgres-db-1 ejabberd ejabberd $backup_working_directory
|
|
cp -r $backup_local_folder/ejabberd/files $backup_working_directory/ejabberd
|
|
# configs, extra
|
|
cp $backup_local_folder/ejabberd/compose.yaml $backup_working_directory/ejabberd
|
|
cp -r $backup_local_folder/ejabberd/conf $backup_working_directory/ejabberd
|
|
# =============================================================================
|
|
# forgejo
|
|
echo "${blue}Pulling in Forgejo...${normal}"
|
|
mkdir -p $backup_working_directory/forgejo
|
|
# database
|
|
postgres_backup postgres-db-1 forgejo forgejo $backup_working_directory
|
|
cp -r $backup_local_folder/forgejo/forgejo $backup_working_directory/forgejo
|
|
cp -r $backup_local_folder/forgejo/runner $backup_working_directory/forgejo
|
|
# configs, extra
|
|
cp $backup_local_folder/forgejo/compose.yaml $backup_working_directory/forgejo
|
|
cp $backup_local_folder/forgejo/.env $backup_working_directory/forgejo
|
|
# =============================================================================
|
|
# ask-js
|
|
echo "${blue}Pulling in Ask-js...${normal}"
|
|
mkdir -p $backup_working_directory/ask-js
|
|
# database
|
|
postgres_backup postgres-db-1 askjs ask-js $backup_working_directory
|
|
# configs, extra
|
|
cp $backup_local_folder/ask-js/compose.yaml $backup_working_directory/ask-js
|
|
cp $backup_local_folder/ask-js/config.json $backup_working_directory/ask-js
|
|
# =============================================================================
|
|
# mollysocket
|
|
echo "${blue}Pulling in MollySocket...${normal}"
|
|
mkdir -p $backup_working_directory/mollysocket/.config
|
|
# data - similar case to the pds, there isn't a native way to make a backup
|
|
docker compose -f $backup_local_folder/mollysocket/compose.yaml down
|
|
cp -r $backup_local_folder/mollysocket/data $backup_working_directory/mollysocket
|
|
docker compose -f $backup_local_folder/mollysocket/compose.yaml up -d
|
|
# configs, extra
|
|
cp $backup_local_folder/mollysocket/compose.yaml $backup_working_directory/mollysocket
|
|
# =============================================================================
|
|
# pull in any other common configs and secrets
|
|
echo "${blue}Pulling in other configurations...${normal}"
|
|
mkdir -p $backup_working_directory/system/etc/caddy
|
|
mkdir -p $backup_working_directory/system/etc/secrets
|
|
mkdir -p $backup_working_directory/system/var/www/mta-sts/.well-known/
|
|
cp -r /etc/caddy/* $backup_working_directory/system/etc/caddy/
|
|
cp -r /etc/secrets/* $backup_working_directory/system/etc/secrets/
|
|
# =============================================================================
|
|
# archive and compress everything
|
|
echo "${blue}Compressing everything into one archive...${normal}"
|
|
tar -cf "$backup_working_directory/$backup_output_tar" $backup_working_directory # create the archive
|
|
zstd -z -T3 -9 --rm "$backup_working_directory/$backup_output_tar" # compress the archive
|
|
# TODO: it may be possible to combine these steps so tar automatically compresses the archive with zstd instead of doing it separately
|
|
# =============================================================================
|
|
# upload backup to backblaze
|
|
echo "${blue}Uploading backup...${normal}"
|
|
b2_upload ${backup_working_directory} ${backup_output_tar}.zst
|
|
# =============================================================================
|
|
# cleanup
|
|
echo "${blue}Cleaning up...${normal}"
|
|
rm -fr ${backup_working_directory}/${backup_output_tar}.zst $backup_working_directory/*
|
|
fi
|
|
echo "${green}System backup finished! beep!~${normal}"
|
|
}
|
|
|
|
# ╭─────────────╮
|
|
# │ vacuum step │
|
|
# ╰─────────────╯
|
|
function system_vacuum {
|
|
echo "${blue}vacuum:${normal} Running database vacuums for ${green}${synth_current_system}${normal}."
|
|
# vacuum
|
|
if [ "$synth_current_system" = "phosphorus" ]; then # phosphorus
|
|
postgres_vacuum_self
|
|
postgres_vacuum postgres-db-1 misskey ${SHARKEY_POSTGRES_PASSWORD}
|
|
postgres_vacuum postgres-db-1 iceshrimp ${ICESHRIMP_POSTGRES_PASSWORD}
|
|
postgres_vacuum postgres-db-1 mastodon ${MASTODON_POSTGRES_PASSWORD}
|
|
elif [ "$synth_current_system" = "neptunium" ]; then # neptunium
|
|
postgres_vacuum_self
|
|
postgres_vacuum postgres-db-1 forgejo ${FORGEJO_POSTGRES_PASSWORD}
|
|
postgres_vacuum postgres-db-1 ejabberd ${EJABBERD_POSTGRES_PASSWORD}
|
|
postgres_vacuum postgres-db-1 askjs ${ASKJS_POSTGRES_PASSWORD}
|
|
postgres_vacuum postgres-db-1 freshrss ${FRESHRSS_POSTGRES_PASSWORD}
|
|
postgres_vacuum postgres-db-1 zitadel ${ZITADEL_POSTGRES_PASSWORD}
|
|
fi
|
|
# unload secrets - they already should be, but we want to ensure they are
|
|
unset $(grep -v '^#' /etc/secrets/postgres.env | sed -E 's/(.*)=.*/\1/' | xargs)
|
|
echo "${green}Vacuuming complete! Beep!~${normal}${normal}"
|
|
}
|
|
|
|
# ╭────────────────╮
|
|
# │ docker cleanup │
|
|
# ╰────────────────╯
|
|
function docker_cleanup {
|
|
# check if docker exists on the system
|
|
if [ ! -x "$(command -v docker)" ]; then
|
|
echo "${red}docker-cleanup:${normal} $synth_current_system does not include Docker."
|
|
return 1
|
|
fi
|
|
# prune everything that isn't running/not tied to any existing container
|
|
# this is usually dangerous but everything we have is already running 24/7 or has (important) data stored outside of an erasable volume
|
|
echo "${blue}docker-cleanup:${normal} Cleaning up Docker..."
|
|
docker image prune -af
|
|
docker volume prune -af
|
|
docker container prune -f
|
|
docker network prune -f
|
|
# ensure our ipv6 network still exists
|
|
echo "${blue}docker-cleanup:${normal} Ensuring IPv6 network still exists..."
|
|
if docker network ls | grep 'ip6net'; then
|
|
echo "${green}Docker IPv6 network still exists.${normal}"
|
|
else
|
|
docker network create --ipv6 --subnet fd00:cafe:face:feed::/64 ip6net
|
|
fi
|
|
echo "${green}Done. Beep!${normal}"
|
|
}
|
|
|
|
# ╭─────────────────────╮
|
|
# │ update certificates │
|
|
# ╰─────────────────────╯
|
|
# (for context: certificates are handled automatically by caddy. we just pull them out of caddy's special home directory to make some of them accessible to other services we run like email and xmpp)
|
|
function update_certificates {
|
|
# internal values - caddy's home may change at random
|
|
local caddy_home_directory=/srv/docker/caddy/caddy_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory
|
|
local known_domains=("synth.download" "wildcard_.synth.download" "muc.xmpp.synth.download" "upload.xmpp.synth.download" "proxy.xmpp.synth.download" "pubsub.xmpp.synth.download")
|
|
local certs_location=/etc/certs
|
|
|
|
echo "${blue}update-certs:${normal} Pulling certificates..."
|
|
mkdir -p $certs_location # it already should exist - but lets make sure
|
|
if [ -d "$caddy_home_directory" ]; then
|
|
for domain in "${known_domains[@]}"; do
|
|
cp $caddy_home_directory/$domain/$domain.crt $certs_location/$domain.crt
|
|
cp $caddy_home_directory/$domain/$domain.key $certs_location/$domain.key
|
|
done
|
|
# ensure permissions are set correctly
|
|
chmod 755 $certs_location
|
|
chmod 755 $certs_location/*
|
|
# done
|
|
echo "${green}Certificates pulled! beep!~${normal}"
|
|
return 0
|
|
else
|
|
echo "${red}update-certs:${normal} Failed to detect Caddy's home directory."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# ╭────────────╮
|
|
# │ enter psql │
|
|
# ╰────────────╯
|
|
function enter_psql {
|
|
if [[ ${UID} != 0 ]]; then
|
|
sudo docker exec -it postgres-db-1 /bin/bash -c "psql -U postgres"
|
|
else
|
|
docker exec -it postgres-db-1 /bin/bash -c "psql -U postgres"
|
|
fi
|
|
}
|
|
|
|
# ╭──────────────────╮
|
|
# │ update helperbot │
|
|
# ╰──────────────────╯
|
|
function update_helperbot {
|
|
local helperbot_location=/usr/local/bin/helperbot
|
|
local helperbot_upstream_url=https://forged.synth.download/synth.download/synth.download/raw/branch/main/helperbot
|
|
|
|
echo "${blue}update-helperbot:${normal} Updating helperbot..."
|
|
echo "Creating a backup into ${bold}${helperbot_location}.bak${normal}..."
|
|
# create a backup of ourself
|
|
cp ${helperbot_location} ${helperbot_location}.bak
|
|
# pull in script from git
|
|
echo "Updating..."
|
|
wget $helperbot_upstream_url -O $helperbot_location
|
|
chmod +x $helperbot_location
|
|
# done
|
|
echo "${green}Updated helperbot!${normal}"
|
|
}
|
|
|
|
# ╭───────────────────╮
|
|
# │ fediverse related │
|
|
# ╰───────────────────╯
|
|
|
|
# sync blocklists from sharkey to iceshrimp
|
|
function fedi_sync_blocklists {
|
|
local sharkey_instance=localhost:60628
|
|
local iceshrimp_instance=localhost:24042
|
|
|
|
# this command will only work on phosphorus, where the instances are directly being run from
|
|
# (it doesn't *have* to, but it's much faster to do it over localhost than domain)
|
|
if [[ "$synth_current_system" != "phosphorus" ]]; then
|
|
echo "${red}sync-blocklists:${normal} Sorry, this command will only work on phosphorus."
|
|
return 1
|
|
fi
|
|
|
|
echo "${blue}sync-blocklists:${normal} Syncing blocks from Sharkey to Iceshrimp..."
|
|
if [[ ${UID} == 0 ]]; then
|
|
echo
|
|
echo "${yellow}${bold}Notice:${normal} This command has been ran as root! For the sake of safety and Security™ reasons, please run this command as your standard user."
|
|
echo "If not already configured for yourself, please put a file under ${bold}\$XDG_DATA_HOME/fedi-tokens.env${normal} (directly) which should include the following, replacing the information as needed:"
|
|
echo "${gray}──────────────────────────────────────────────${normal}"
|
|
echo "MK_TOKEN=your_admin_misskey_token_here"
|
|
echo "SHRIMP_TOKEN=your_admin_iceshrimp_token_here"
|
|
return 1
|
|
else
|
|
if [ -f "$XDG_DATA_HOME/fedi-tokens.env" ]; then
|
|
# load keys
|
|
export $(grep -v '^#' $XDG_DATA_HOME/fedi-tokens.env | xargs)
|
|
# grab the instance data from sharkey
|
|
json_data=$(curl -s "http://"$sharkey_instance"/api/admin/meta" -H "Content-Type: application/json" -X POST -d "{\"i\": \"$MK_TOKEN\"}")
|
|
# throw into a loop to block all instances on iceshrimp
|
|
echo "$json_data" | jq -r -c '.blockedHosts[]' | while read -r host; do
|
|
curl -w "HTTP %{response_code} " "http://"$iceshrimp_instance"/api/iceshrimp/admin/instances/$host/block?imported=true&reason=Synced%20from%20booping.synth.download" -H "Host: beeping.synth.download" -H "Authorization: Bearer $SHRIMP_TOKEN" -X POST
|
|
echo Blocked host: $host
|
|
done
|
|
# unset keys
|
|
unset $(grep -v '^#' /etc/secrets/fedi-tokens.env | sed -E 's/(.*)=.*/\1/' | xargs)
|
|
# done
|
|
echo "${green}Done syncing blocks.${normal}"
|
|
else
|
|
echo "${red}sync-blocklists:${normal} $XDG_DATA_HOME/fedi-tokens.env doesn't exist."
|
|
return 1
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# update standalone frontends
|
|
function fedi_update_frontends {
|
|
local chuckya_fe_url=https://github.com/synth-download/actions/releases/download/fedi-frontend/chuckya-fe.zip
|
|
local akkoma_chuckya_url=https://github.com/synth-download/actions/releases/download/fedi-frontend/akkoma-fe-chuckya.zip
|
|
local akkoma_iceshrimp_url=https://github.com/synth-download/actions/releases/download/fedi-frontend/akkoma-fe-iceshrimp.zip
|
|
local phanpy_url=https://github.com/zenfyrdev/phanpy/releases/latest/download/phanpy-dist.zip
|
|
local pl_fe_url=https://pl.mkljczk.pl/pl-fe.zip
|
|
local frontend_folder=/var/www/fedi-frontends
|
|
|
|
# frontends are on neptunium along with the fediverse services
|
|
if [[ "$synth_current_system" != "neptunium" ]]; then
|
|
echo "${red}update-frontends:${normal} Sorry, this command will only work on phosphorus."
|
|
return 1
|
|
fi
|
|
|
|
# _repeat_process [$frontend_fe_url] [file_name] [folder_name]
|
|
function _repeat_process {
|
|
wget "$1" -O /tmp/"$2".zip
|
|
unzip -o /tmp/"$2" -d $frontend_folder/"$3"
|
|
rm /tmp/"$2".zip
|
|
echo "${green}Okay.${normal}"
|
|
}
|
|
|
|
# TODO: it's probably possible to turn this into an array loop of some sort
|
|
echo "${blue}update-frontends:${normal} Updating standalone frontends..."
|
|
if [ -d "$frontend_folder" ]; then
|
|
# update mastodon
|
|
echo "${blue}Updating Standalone Chuckya...${normal}"
|
|
_repeat_process $chuckya_fe_url chuckya-fe chuckya-fe
|
|
# update akkoma iceshrimp
|
|
echo "${blue}Updating Akkoma for Iceshrimp.NET...${normal}"
|
|
_repeat_process $akkoma_iceshrimp_url akkoma-fe-iceshrimp akkoma-fe-iceshrimp
|
|
# update akkoma mastodon
|
|
echo "${blue}Updating Akkoma for Chuckya...${normal}"
|
|
_repeat_process $akkoma_chuckya_url akkoma-fe-chuckya akkoma-fe-chuckya
|
|
# update phanpy
|
|
echo "${blue}Updating Phanpy...${normal}"
|
|
_repeat_process $phanpy_url phanpy phanpy
|
|
# update pl-fe
|
|
echo "${blue}Updating pl-fe...${normal}"
|
|
_repeat_process $pl_fe_url pl-fe pl-fe
|
|
else
|
|
echo "${red}update-frontends:${normal} $frontend_folder doesn't exist."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# ╭──────────────╮
|
|
# │ reload caddy │
|
|
# ╰──────────────╯
|
|
function caddy_reload {
|
|
local caddy_name=caddy
|
|
|
|
# TODO: should we implement some sort of error checking here? in either case this will fail the entire script anyways if caddy isn't running and verbally reports that, so probably not required.
|
|
echo "${blue}caddy:${normal} Reloading Caddy..."
|
|
docker exec -w /etc/caddy $caddy_name caddy reload
|
|
echo "${green}Reloaded.${normal}"
|
|
}
|
|
|
|
# ╭───────────────╮
|
|
# │ restart caddy │
|
|
# ╰───────────────╯
|
|
function caddy_restart {
|
|
local caddy_dir=/srv/docker/caddy
|
|
|
|
# TODO: see note for caddy_reload
|
|
echo "${blue}caddy:${normal} Restarting Caddy..."
|
|
docker compose -f $caddy_dir/compose.yaml down
|
|
docker compose -f $caddy_dir/compose.yaml up -d
|
|
echo "${green}Restarted.${normal}"
|
|
}
|
|
|
|
# ╭────────────────────────────────────╮
|
|
# │ functions and variables - end here │
|
|
# ╰────────────────────────────────────╯
|
|
|
|
# =============================================================================
|
|
|
|
# ╭──────────────╮
|
|
# │ main program │
|
|
# ╰──────────────╯
|
|
|
|
# display the header
|
|
header
|
|
|
|
# check that everything we need is installed
|
|
#check_applications
|
|
|
|
# evaluate arguments and set environment variables to enable each command and see what should be executed
|
|
while [ -n "$1" ]; do
|
|
case "$1" in
|
|
-h | --help) # display help info
|
|
info_help
|
|
exit 0;;
|
|
-u | --upgrade) # upgrade system
|
|
root_check
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
system_upgrade;;
|
|
-b | --backup) # backup system
|
|
root_check
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
system_backup;;
|
|
-v | --vacuum) # vacuum database
|
|
root_check
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
system_vacuum;;
|
|
-r | --reboot) # reboot system
|
|
root_check
|
|
echo "${yellow}Rebooting system...${normal}"
|
|
sleep 1
|
|
systemctl reboot;;
|
|
--psql) # enter the psql shell
|
|
enter_psql;;
|
|
--docker-cleanup) # docker cleanup
|
|
root_check
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
docker_cleanup;;
|
|
--update-certs) # email certificates
|
|
root_check
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
update_certificates;;
|
|
--update-helperbot) # update helperbot
|
|
root_check
|
|
update_helperbot;;
|
|
-cr | --caddy-reload) # reload caddy
|
|
root_check
|
|
caddy_reload;;
|
|
-cre | --caddy-restart) # restart caddy
|
|
root_check
|
|
caddy_restart;;
|
|
--sync-blocklists) # fediverse: sync sharkey -> iceshrimp blocklists
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
fedi_sync_blocklists;;
|
|
--update-frontends) # fediverse: update standalone frontends
|
|
root_check
|
|
if [ ! -v synth_current_system ]; then
|
|
detect_system
|
|
fi
|
|
fedi_update_frontends;;
|
|
*) # invalid option was given
|
|
invalid_command $1
|
|
exit 1;;
|
|
esac
|
|
shift 1
|
|
done
|
|
|
|
# show help if we didn't recieve commands either
|
|
if [ ! -v synth_args_exist ]; then
|
|
info_help
|
|
exit 0
|
|
fi
|
|
|
|
# unset everything
|
|
for variable in "${all_known_variables[@]}"; do
|
|
unset $variable
|
|
done
|