Reorganize pleroma deployment, add recovery script
This commit is contained in:
parent
0e7d875b7f
commit
f206e87f2a
@ -17,6 +17,11 @@
|
||||
password: "!"
|
||||
home: /opt/pleroma
|
||||
shell: /usr/sbin/nologin
|
||||
- name: Template out service
|
||||
template:
|
||||
src: "pleroma.service"
|
||||
dest: "/etc/systemd/system/pleroma.service"
|
||||
notify: restart pleroma
|
||||
- name: Set up PostgreSQL
|
||||
block:
|
||||
- name: Create DB user
|
||||
@ -68,11 +73,24 @@
|
||||
website_url: "{{ pleroma_url }}"
|
||||
- name: Install Pleroma
|
||||
block:
|
||||
- name: Disable service
|
||||
systemd:
|
||||
name: pleroma
|
||||
state: stopped
|
||||
- name: Get latest release zip
|
||||
get_url:
|
||||
url: "https://git.pleroma.social/api/v4/projects/2/jobs/artifacts/stable/download?job={{ pleroma_arch }}"
|
||||
dest: "/opt/pleroma/release.zip"
|
||||
register: r
|
||||
- name: Template out scripts
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/opt/pleroma/{{ item.dest }}"
|
||||
owner: pleroma
|
||||
group: pleroma
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- { src: "recover.sh", dest: "recover.sh", mode: "0755" }
|
||||
- name: Install Pleroma
|
||||
block:
|
||||
- name: Unzip release
|
||||
@ -137,11 +155,6 @@
|
||||
args:
|
||||
chdir: /opt/pleroma
|
||||
changed_when: false
|
||||
- name: Template out service
|
||||
template:
|
||||
src: "pleroma.service"
|
||||
dest: "/etc/systemd/system/pleroma.service"
|
||||
notify: restart pleroma
|
||||
- name: Start and enable service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
59
roles/pleroma/templates/recover.sh
Normal file
59
roles/pleroma/templates/recover.sh
Normal file
@ -0,0 +1,59 @@
|
||||
#! /bin/sh
|
||||
#
|
||||
# recover.sh
|
||||
# Recover a Pleroma server from our S3 bucket
|
||||
# Copyright (C) 2020 Vintage Salt <rehashedsalt@cock.li>
|
||||
#
|
||||
# Distributed under terms of the MIT license.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Get to our Pleroma dir
|
||||
export PLEROMA_DIR="/opt/pleroma"
|
||||
cd "$PLEROMA_DIR" || exit 50
|
||||
|
||||
# Make sure we have a backup
|
||||
if ! aws s3 ls "s3://{{ aws_backup_bucket }}/{{ pleroma_url }}/" > /dev/null 2>&1; then
|
||||
echo "No backups available"
|
||||
exit 0
|
||||
fi
|
||||
# If we have existing Pleroma uploads, remove them
|
||||
if [ -d /var/lib/pleroma/uploads ]; then
|
||||
echo "Backing up current uploads"
|
||||
tar czf "recover-uploads-$(date -Iseconds).tar.gz" /var/lib/pleroma/uploads --remove-files --force-local
|
||||
fi
|
||||
# If it STILL exists, then we have a problem
|
||||
if [ -d /var/lib/pleroma/uploads ]; then
|
||||
echo "Uploads still found after tarring; bailing"
|
||||
exit 51
|
||||
fi
|
||||
|
||||
# Get our latest good uploads backup
|
||||
backup_up="$(aws s3 ls "s3://{{ aws_backup_bucket }}/{{ pleroma_url }}/" | grep uploads | tail -n 1 | awk '{print $4}')"
|
||||
# And our latest good DB backup
|
||||
backup_db="$(aws s3 ls "s3://{{ aws_backup_bucket }}/{{ pleroma_url }}/" | grep pgdump | tail -n 1 | awk '{print $4}')"
|
||||
echo "Restoring backup: $backup_up $backup_db"
|
||||
# Get our backups
|
||||
aws s3 cp "s3://{{ aws_backup_bucket }}/{{ pleroma_url }}/$backup_up" uploads.tgz
|
||||
aws s3 cp "s3://{{ aws_backup_bucket }}/{{ pleroma_url }}/$backup_db" db.pgdump.gz
|
||||
# Decompress
|
||||
tar xzf uploads.tgz
|
||||
gunzip db.pgdump.gz
|
||||
# Find uploads
|
||||
uploaddir="$(find ./var -type d -name "uploads" | head -n 1)"
|
||||
# Ensure we're stopped
|
||||
systemctl stop pleroma
|
||||
# Move it into place
|
||||
mv "$uploaddir" /var/lib/pleroma/
|
||||
# Assure ownership
|
||||
chown pleroma. /var/lib/pleroma/uploads
|
||||
# Remove the var dir, if it exists
|
||||
if [ -d "var" ]; then
|
||||
rm -rf "$PLEROMA_DIR/var"
|
||||
fi
|
||||
# Drop and recreate our database
|
||||
sudo -Hu postgres psql -c 'DROP DATABASE <pleroma_db>;'
|
||||
sudo -Hu postgres psql -c 'CREATE DATABASE <pleroma_db>;'
|
||||
sudo -Hu postgres pg_restore -d pleroma -v -1 db.pgdump
|
||||
sudo -Hu postgres vacuumdb --all --analyze-in-stages
|
Loading…
Reference in New Issue
Block a user