Compare commits
265 Commits
Author | SHA1 | Date | |
---|---|---|---|
0b838511a3 | |||
38edcf6906 | |||
2107f823fe | |||
103236b331 | |||
517f04de68 | |||
05a5551650 | |||
7c4ba6b23e | |||
d052683651 | |||
a054fb29d8 | |||
5fe3396446 | |||
01dbb36c37 | |||
99f8132f5e | |||
7f56771749 | |||
71ceec2ecd | |||
82a43f8610 | |||
cfc7b94b7f | |||
2acc2bd1b8 | |||
5509bf56a6 | |||
dc1ea05fe1 | |||
2080cd6b5c | |||
15667e26d3 | |||
c953fadd88 | |||
ecf00b5f74 | |||
52b32bf2cf | |||
f54e099c45 | |||
88862fa509 | |||
668a8441b7 | |||
d31cb4e1dd | |||
223140fd3e | |||
d4a6f23cac | |||
745adfafae | |||
1638436439 | |||
d76250e92e | |||
d78d321247 | |||
855e26f4d0 | |||
53294574b4 | |||
9b6a917320 | |||
a5891093c9 | |||
69b5c5816a | |||
1f05df9e09 | |||
1ab5b3fda0 | |||
11fa90fdde | |||
a381abb88b | |||
799b5bac29 | |||
f762a1fdfc | |||
be0078cbc6 | |||
b90a272b6c | |||
8972cf2cf2 | |||
f42623d1e3 | |||
e43820d75f | |||
bfc432d2e5 | |||
be6d51c035 | |||
b066e2a7fd | |||
9c15b15507 | |||
2affc1a8fe | |||
e07232c448 | |||
faea62fedb | |||
823f3297fc | |||
83c1aa9cc2 | |||
beadee9668 | |||
4bbb4ba16b | |||
db3ddabfe2 | |||
0637bc434f | |||
2b686da51a | |||
d84da547cb | |||
c963a5649f | |||
1cb8da6515 | |||
6ef5ff5cd2 | |||
9ada152e04 | |||
20be80b2ce | |||
c628a280ac | |||
2a154699d7 | |||
bb4d5548ee | |||
9123c62cff | |||
4ba22dcef7 | |||
2ccdcca4f1 | |||
8885daa1b2 | |||
d2bc8915ca | |||
920d972346 | |||
b501cf1cdf | |||
13785d3f43 | |||
e38aa3edf9 | |||
77722be801 | |||
6de9d965ce | |||
7d8d7a781b | |||
2260176040 | |||
be7c9313c7 | |||
7f906a5983 | |||
128d1092dc | |||
5853fd21c3 | |||
2981e0bc03 | |||
7926287536 | |||
e0eb632d63 | |||
7c7bada344 | |||
58f4464001 | |||
7d637ad2d5 | |||
7537a2bca9 | |||
335660d518 | |||
9065584cee | |||
61bf29481d | |||
6edb911936 | |||
a78ee05bfd | |||
f38df0c407 | |||
67e136494f | |||
c8c5460979 | |||
30d2ecef07 | |||
be2bf484b4 | |||
18048a085b | |||
18d9bec579 | |||
9d9096a998 | |||
ed62d9f722 | |||
389380dd0c | |||
f7fbf43569 | |||
07e96002ac | |||
4fa09d1ed1 | |||
c9d779b871 | |||
57e0d5b369 | |||
418b570ea5 | |||
63cb53fd16 | |||
88214fff2c | |||
2edbd1c9e8 | |||
b793ebf587 | |||
601d9543ec | |||
c181965242 | |||
e6e8427227 | |||
7f75bdb5cd | |||
98a77d5f28 | |||
834f40d3ad | |||
bfce95d50d | |||
d3ee28fe56 | |||
8b0b900375 | |||
42f84c2d54 | |||
51cf91e0c4 | |||
2bfc6f489d | |||
40e165c5a6 | |||
8b743f3b9e | |||
a2971b3df4 | |||
c2068dc103 | |||
5d8238e029 | |||
8e6cbb69ff | |||
ddac9fe542 | |||
e651396604 | |||
09bdb80712 | |||
e9eccef348 | |||
05b4bcc4f1 | |||
eea79389c9 | |||
52b9ceb3a3 | |||
8cffa77d38 | |||
502d7397cd | |||
0ffd8ef535 | |||
c9984c448c | |||
f3520c10ae | |||
f8be177789 | |||
9b261e5085 | |||
2fd9668b51 | |||
896143d009 | |||
ced9d6b983 | |||
6afad6fcd9 | |||
07845384ac | |||
6b6e8f7b64 | |||
a25c45536e | |||
b5de12a767 | |||
4ac296ed41 | |||
31818924b3 | |||
e4060ca9a0 | |||
43ccced1c5 | |||
725687e05e | |||
8a64774a77 | |||
4cd34284c6 | |||
7b004ca82c | |||
d50f6a1135 | |||
51bf0e5c62 | |||
14799abdcf | |||
ba1530a7c1 | |||
11d5b23b50 | |||
9c843f0375 | |||
506c58a18e | |||
9962c09fb5 | |||
c2623c70ee | |||
fad97a4ba0 | |||
469a9e7069 | |||
7cf0b5da3d | |||
f319ee6ad2 | |||
190a88e57c | |||
f323cd8987 | |||
6f57f2ed32 | |||
72bc460c4f | |||
1440db6afc | |||
fba7d30a40 | |||
b58a23e87a | |||
505c20c2b0 | |||
a18ec49e20 | |||
0940535d2a | |||
424d5cd75c | |||
537f2c9824 | |||
a40a30eec4 | |||
7d2afdfaef | |||
ef036fca76 | |||
b53ce3efaa | |||
63fc4417db | |||
4c4108ab0a | |||
658888bda8 | |||
5651f6f50a | |||
07ab0b472e | |||
9a39b79895 | |||
ee40990c51 | |||
fc23453e5a | |||
1e037bf3bc | |||
c8aca49ff6 | |||
61c37b4650 | |||
ec77cdbc46 | |||
7bc017e583 | |||
ba37a7b4fa | |||
bc8dd6d2bd | |||
391e424199 | |||
f23d6ed738 | |||
a0d1ae0a4a | |||
760af8dabe | |||
7a72280c6e | |||
74a6a1ce96 | |||
227f0a5df5 | |||
db36aa7eae | |||
85c039e4dc | |||
702a4c5f4c | |||
68e8f35064 | |||
b250ce9dc8 | |||
142e589f84 | |||
9dda82edb3 | |||
a6b8c7ef64 | |||
b19602f205 | |||
e0e0d626f9 | |||
b6dc8629f9 | |||
4d59231fb0 | |||
f4117b46f8 | |||
5c87b1c0e7 | |||
e9971f304e | |||
f5474f6f0e | |||
45904e221d | |||
2efb5b2554 | |||
8f2dc691f2 | |||
1b119a94e3 | |||
5ef2bcd3e7 | |||
ca9882adde | |||
e63898f328 | |||
12f187e1e2 | |||
9cad3d4867 | |||
320427cba4 | |||
5000876b93 | |||
73c108e3ac | |||
e47bfd13d9 | |||
baf057cd91 | |||
6052aa95ef | |||
43585e1494 | |||
22ff009dda | |||
8dfb76578f | |||
de84b7b4f3 | |||
b37f882d7d | |||
b54710a72e | |||
f683c16f85 | |||
3ca88c4d88 | |||
260a797f68 | |||
45b4d66673 | |||
ba798bf36f | |||
9901171d21 | |||
47d25cd91c |
@ -43,6 +43,7 @@ after_script:
|
||||
Lint:
|
||||
stage: lint
|
||||
interruptible: yes
|
||||
allow_failure: yes
|
||||
except:
|
||||
- pipelines
|
||||
- schedules
|
||||
@ -64,31 +65,34 @@ Test:
|
||||
# PRE-MAIN CONFIGURATION
|
||||
Local:
|
||||
stage: play-pre
|
||||
only:
|
||||
- pipelines
|
||||
- schedules
|
||||
script:
|
||||
- ansible-playbook --skip-tags no-auto playbooks/site_local.yml --ssh-common-args='-o ProxyCommand="ssh -W %h:%p -q ansible@bastion1.dallas.mgmt.desu.ltd"' --vault-password-file ~/.vault_pass
|
||||
Pre:
|
||||
stage: play-pre
|
||||
only:
|
||||
- pipelines
|
||||
- schedules
|
||||
script:
|
||||
- ansible-playbook --skip-tags no-auto playbooks/site_pre.yml --ssh-common-args='-o ProxyCommand="ssh -W %h:%p -q ansible@bastion1.dallas.mgmt.desu.ltd"' --vault-password-file ~/.vault_pass
|
||||
|
||||
# MAIN CONFIGURATION
|
||||
Main:
|
||||
stage: play-main
|
||||
only:
|
||||
- pipelines
|
||||
- schedules
|
||||
retry: 1
|
||||
script:
|
||||
- ansible-playbook --skip-tags no-auto playbooks/site_main.yml --ssh-common-args='-o ProxyCommand="ssh -W %h:%p -q ansible@bastion1.dallas.mgmt.desu.ltd"' --vault-password-file ~/.vault_pass
|
||||
Common:
|
||||
stage: play-main
|
||||
script:
|
||||
- ansible-playbook --skip-tags no-auto playbooks/site_common.yml --ssh-common-args='-o ProxyCommand="ssh -W %h:%p -q ansible@bastion1.dallas.mgmt.desu.ltd"' --vault-password-file ~/.vault_pass
|
||||
Nagios:
|
||||
stage: play-main
|
||||
retry: 1
|
||||
script:
|
||||
- ansible-playbook -l vm-general-1.ashburn.mgmt.desu.ltd playbooks/prod_web.yml --tags nagios --ssh-common-args='-o ProxyCommand="ssh -W %h:%p -q ansible@bastion1.dallas.mgmt.desu.ltd"' --vault-password-file ~/.vault_pass
|
||||
|
||||
# CLEANUP
|
||||
Cleanup:
|
||||
stage: play-post
|
||||
only:
|
||||
- pipelines
|
||||
- schedules
|
||||
script:
|
||||
- ansible-playbook --skip-tags no-auto playbooks/site_post.yml --ssh-common-args='-o ProxyCommand="ssh -W %h:%p -q ansible@bastion1.dallas.mgmt.desu.ltd"' --vault-password-file ~/.vault_pass
|
||||
|
79
README.md
79
README.md
@ -1,17 +1,60 @@
|
||||
# Salt's Ansible Repository
|
||||
# Desu LTD Ansible
|
||||
|
||||
Useful for management across all of 9iron, thefuck, and desu.
|
||||
Ansible scripts that manage infra for all of Desu LTD
|
||||
|
||||
## Initialization
|
||||
|
||||
* Clone
|
||||
* `ansible-galaxy install -r requirements.yml`
|
||||
Clone the repo, then:
|
||||
|
||||
For quick bootstrapping of tools and libraries used in this repo, see [rehashedsalt/ansible-env](https://gitlab.com/rehashedsalt/docker-ansible-env). I use that exact image for CI/CD.
|
||||
```bash
|
||||
# Set up execution environment
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
pip3 install -r requirements.txt
|
||||
# Set up Ansible Galaxy roles
|
||||
ansible-galaxy install -r requirements.yml
|
||||
# Set up password
|
||||
# This one's optional if you want to --ask-vault-pass instead
|
||||
touch ~/.vault_pass
|
||||
chmod 0600 ~/.vault_pass
|
||||
vim ~/.vault_pass
|
||||
```
|
||||
|
||||
## Deployment
|
||||
Regular runs of this repo are invoked in [rehashedsalt/ansible-env](https://gitlab.com/rehashedsalt/docker-ansible-env). See Obsidian notes for details.
|
||||
|
||||
### Linux Machines
|
||||
## Usage
|
||||
|
||||
To run the whole playbook:
|
||||
|
||||
```bash
|
||||
./site.yml
|
||||
```
|
||||
|
||||
To deploy a core service to a single machine while you're working on it:
|
||||
|
||||
```bash
|
||||
./playbooks/site_main.yml -l my.host --tags someservice
|
||||
```
|
||||
|
||||
All `yml` files that can be invoked at the command line are marked executable and have a shebang at the top. If they do not have these features, you're looking at an include or something.
|
||||
|
||||
## Structure
|
||||
|
||||
The structure of the playbooks in this repo is as follows:
|
||||
|
||||
* `site.yml` - Master playbook, calls in:
|
||||
|
||||
* `playbooks/site_local.yml` - Tasks that run solely on the Ansible controller. Mostly used for DNS
|
||||
|
||||
* `playbooks/site_pre.yml` - Basic machine bootstrapping and configuration that must be done before services are deployed. Does things like connect a machine to the management Zerotier network, ensure basic packages, ensure monitoring can hook in, etc.
|
||||
|
||||
* `playbooks/site_main.yml` - Main service deployment is done here. If you're iterating on a service, invoke this one
|
||||
|
||||
* `playbooks/site_post.yml` - Cleanup tasks. Mostly relevant for the regular autoruns. Cleans up old Docker images and reboots boxes
|
||||
|
||||
Most services are containerized -- their definitions are in `playbooks/tasks` and are included where relevant.
|
||||
|
||||
## Bootstrapping
|
||||
|
||||
Each Linux machine will require the following to be fulfilled for Ansible to access it:
|
||||
|
||||
@ -25,24 +68,14 @@ Each Linux machine will require the following to be fulfilled for Ansible to acc
|
||||
|
||||
To automate these host-local steps, use the script file `contrib/bootstrap.sh`.
|
||||
|
||||
### Windows Machines
|
||||
## Netbox
|
||||
|
||||
lol don't
|
||||
These playbooks depend heavily on Netbox for:
|
||||
|
||||
### All Machines
|
||||
* Inventory, including primary IP, hostname, etc.
|
||||
|
||||
Adding a new server will require these:
|
||||
* Data on what services to deploy
|
||||
|
||||
* The server is accessible from the Ansible host;
|
||||
* Data on what services to monitor
|
||||
|
||||
* The server has been added to NetBox OR in `inventory-hard`
|
||||
|
||||
* DNS records for the machine are set; and
|
||||
|
||||
From there, running the playbook `site.yml` should get the machine up to snuff.
|
||||
|
||||
## Zerotier
|
||||
|
||||
A lot of my home-network side of things is connected together via ZeroTier; initial deployment/repairs may require specifying an `ansible_host` for the inventory item in question to connect to it locally. Subsequent plays will require connectivity to my home ZeroTier network.
|
||||
|
||||
Cloud-managed devices require no such workarounds.
|
||||
Thus, if Netbox is inaccessible, a large portion of these scripts will malfunction. If you anticipate Netbox will be unavailable for whatever reason, run `ansible-inventory` by hand and save the output to a file. Macros for things like monitoring will not work, but you'll at least have an inventory and tags.
|
||||
|
13
ansible.cfg
13
ansible.cfg
@ -1,14 +1,12 @@
|
||||
[defaults]
|
||||
# I have a large number of machines, which warrants a large forks setting
|
||||
# here.
|
||||
forks = 16
|
||||
# Tune this higher if you have a large number of machines
|
||||
forks = 8
|
||||
# We set gathering to smart here as I'm often executing the site-wide playbook,
|
||||
# which means a ton of redundant time gathering facts that haven't changed
|
||||
# otherwise.
|
||||
gathering = smart
|
||||
# host_key_checking is disabled because nearly 90% of my Ansible plays are in
|
||||
# ephemeral environments and I'm constantly spinning machines up and down.
|
||||
# In theory this is an attack vector that I need to work on a solution for.
|
||||
host_key_checking = false
|
||||
# Explicitly set the python3 interpreter for legacy hosts.
|
||||
interpreter_python = python3
|
||||
@ -28,7 +26,7 @@ roles_path = .roles:roles
|
||||
system_warnings = true
|
||||
# We set this to avoid circumstances in which we time out waiting for a privesc
|
||||
# prompt. Zerotier, as a management network, can be a bit slow at times.
|
||||
timeout = 60
|
||||
#timeout = 30
|
||||
# Bad
|
||||
vault_password_file = ~/.vault_pass
|
||||
|
||||
@ -41,9 +39,8 @@ always = true
|
||||
become = true
|
||||
|
||||
[ssh_connection]
|
||||
# The number of retries here is insane because of the volatility of my home
|
||||
# network, where a number of my machines live.
|
||||
retries = 15
|
||||
# We set retries to be a fairly higher number, all things considered.
|
||||
#retries = 3
|
||||
# These extra args are used for bastioning, where the ephemeral Ansible
|
||||
# controller remotes into a bastion machine to access the rest of the
|
||||
# environment.
|
||||
|
33
contrib/cache-prod-inventory.sh
Executable file
33
contrib/cache-prod-inventory.sh
Executable file
@ -0,0 +1,33 @@
|
||||
#! /bin/sh
|
||||
#
|
||||
# cache-prod-inventory.sh
|
||||
# Copyright (C) 2025 Jacob Babor <jacob@babor.tech>
|
||||
#
|
||||
# Distributed under terms of the MIT license.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
proddir="inventories/production"
|
||||
invdir="inventories/production-cache"
|
||||
|
||||
# Sanity check
|
||||
[ -d "$invdir" ] || {
|
||||
echo "Could not find $invdir; are you in the root of the repo?"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Get the new data
|
||||
[ -e "$invdir"/hosts.yml.new ] && rm "$invdir"/hosts.yml.new
|
||||
ansible-inventory -i "$proddir" --list -y > "$invdir"/hosts.yml.new || {
|
||||
# And handle errors
|
||||
echo "Failed to get inventory; see above and $invdir/hosts.yml.new for errors"
|
||||
exit 2
|
||||
}
|
||||
|
||||
# Shuffle shit around
|
||||
[ -e "$invdir"/hosts.yml.old ] && rm "$invdir"/hosts.yml.old
|
||||
[ -e "$invdir"/hosts.yml ] && mv "$invdir"/hosts.yml{,.old}
|
||||
[ -e "$invdir"/hosts.yml.new ] && mv "$invdir"/hosts.yml{.new,}
|
||||
|
||||
echo "Inventory cached. Use -i \"$invdir\""
|
4
contrib/submodule-checkout-masters-and-update.sh
Executable file
4
contrib/submodule-checkout-masters-and-update.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#! /bin/sh
|
||||
git submodule update --recursive --remote --init
|
||||
git submodule -q foreach 'git checkout -q master && git pull'
|
||||
git status
|
@ -1 +0,0 @@
|
||||
../production/host_vars
|
1
inventories/production-cache/.gitignore
vendored
Normal file
1
inventories/production-cache/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
hosts.yml*
|
1
inventories/production-cache/group_vars
Symbolic link
1
inventories/production-cache/group_vars
Symbolic link
@ -0,0 +1 @@
|
||||
../production/group_vars
|
@ -1 +0,0 @@
|
||||
../production/host_vars
|
@ -17,6 +17,102 @@ netbox_token: !vault |
|
||||
37323530333463383062396363616263386430356438306133393130626365333932323734383165
|
||||
3064663435626339393836353837643730333266366436373033
|
||||
|
||||
# Terraria modlists
|
||||
tml_basic_qol:
|
||||
# Better Zoom: Enables zooming out further than 100% for higher-res monitors
|
||||
- "2562953970"
|
||||
# Smarter Cursor: Cursor be smarter idort
|
||||
- "2877850919"
|
||||
# Heart Crystal & Life Fruit Glow
|
||||
- "2853619836"
|
||||
# Ore Excavation (Veinminer)
|
||||
- "2565639705"
|
||||
# Shared World Map
|
||||
- "2815010161"
|
||||
# Boss Cursor
|
||||
- "2816694149"
|
||||
# WMITF (What Mod Is This From (WAILA (WAWLA (WTFAILA))))
|
||||
- "2563851005"
|
||||
# Multiplayer Boss Fight Stats
|
||||
- "2822937879"
|
||||
# Census (Shows you all the NPCs and their move-in requirements)
|
||||
- "2687866031"
|
||||
# Shop Expander (Prevents overloading shops)
|
||||
- "2828370879"
|
||||
# Boss Checklist
|
||||
- "2669644269"
|
||||
# Auto Trash
|
||||
- "2565540604"
|
||||
tml_advanced_qol:
|
||||
# Quality of Terraria (IT HAS INSTA HOIKS LET'S FUCKING GO)
|
||||
# Also adds the "Can be shimmered into" and similar text
|
||||
- "2797518634"
|
||||
# Chat Source
|
||||
- "2566083800"
|
||||
# The Shop Market (it's like the Market from that one Minecraft mod)
|
||||
- "2572367426"
|
||||
# Fishing with Explosives
|
||||
- "3238219681"
|
||||
# Generated Housing (Adds pregenned home)
|
||||
- "3141716573"
|
||||
# Happiness Removal
|
||||
- "2563345152"
|
||||
tml_libs:
|
||||
# Luminance, library mod
|
||||
- "3222493606"
|
||||
# Subworld Lib: Required by a few mods (TSA and others)
|
||||
- "2785100219"
|
||||
tml_basics:
|
||||
# Magic Storage Starter Kit
|
||||
- "2906446375"
|
||||
# Magic Storage, absoluteAquarian utilities
|
||||
- "2563309347"
|
||||
- "2908170107"
|
||||
# Wing Slot Extra
|
||||
- "2597324266"
|
||||
# Better Caves
|
||||
- "3158254975"
|
||||
tml_calamity:
|
||||
# Calamity, Calamity Music, CalValEX
|
||||
- "2824688072"
|
||||
- "2824688266"
|
||||
- "2824688804"
|
||||
tml_calamity_classes:
|
||||
# Calamity Ranger Expansion
|
||||
- "2860270524"
|
||||
# Calamity Whips
|
||||
- "2839001756"
|
||||
tml_calamity_clamity:
|
||||
# Clamity (sic), Music
|
||||
- "3028584450"
|
||||
- "3161277410"
|
||||
tml_fargos:
|
||||
# Luminance, library mod
|
||||
- "3222493606"
|
||||
# Fargos Mutant Mod. Adds the NPC and infinite items and instas and stuff
|
||||
- "2570931073"
|
||||
# Fargos Souls, adds... souls
|
||||
- "2815540735"
|
||||
# Fargos Souls DLC (Calamity compat)
|
||||
- "3044249615"
|
||||
# Fargos Souls More Cross-Mod (Consolaria, Spirit, Mod of Redemption compat)
|
||||
- "3326463997"
|
||||
tml_touhou:
|
||||
# Gensokyo (UN Owen Was Her plays in the distance)
|
||||
- "2817254924"
|
||||
tml_spirit:
|
||||
# Spirit Mod
|
||||
- "2982372319"
|
||||
tml_secrets:
|
||||
# Secrets of the Shadows
|
||||
- "2843112914"
|
||||
tml_yoyo_revamp:
|
||||
# Moomoo's Yoyo Revamp (and Lib)
|
||||
- "2977808495"
|
||||
- "3069154070"
|
||||
tml_summoners_association:
|
||||
- "2561619075"
|
||||
|
||||
# Admin user configuration
|
||||
adminuser_name: salt
|
||||
adminuser_ssh_authorized_keys:
|
||||
@ -26,30 +122,38 @@ adminuser_ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFS78eNBEZ1fWnGt0qyagCRG7P+8i3kYBqTYgou3O4U8 putty-generated on dsk-ryzen-0.desu.ltd
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINq8NPEqSM0w7CkhdhsSgDsrcpgAvVg18oz9OybkqhHg salt@dsk-ryzen-0
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGwFJmaV4JuxOOgF6Bqwo6FaCN5Mpcvd4/Vee7PsMBxu salt@lap-fw-diy-1.ws.mgmt.desu.ltd
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKwcV0mKhhQveIOjFKwt01S8WVtOn3Pfz6qa2P4/JR7S salt@lap-s76-lemp13-0.ws.mgmt.desu.ltd
|
||||
|
||||
# For backups
|
||||
backup_restic_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65623036653432326435353932623037626532316631613763623237323533363938363462316237
|
||||
6363613363346239666630323134643866653436633537300a663732363565383061326135656539
|
||||
33313334656330366632613334366664613366313631363964373038396636623735633830386336
|
||||
3230316663373966390a663732373134323561313633363435376263643834383739643739303761
|
||||
62376231353936333666613661323864343439383736386636356561636463626266
|
||||
backup_s3_bucket: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61393939633736616361336162633564356434363963303737366236373332653265366132393439
|
||||
3333643463306561616261636466303631373866353962310a356561633833633533353937323265
|
||||
64656235616637366363323330346134656366663733393462346333613535633838333938653434
|
||||
6133326433613239650a386333626339363263323134313830353963326265666336306130656534
|
||||
6534
|
||||
66316231643933316261303631656432376339663264666661663634616465326537303331626634
|
||||
6235616564316638386434366534663639656236393861630a303530333835353432326131653735
|
||||
30313734383265376238306333323330366338646636336137653661373365633365393732386466
|
||||
3263373233653261330a663435643835643430326464623834303864646363373265336134643136
|
||||
6162
|
||||
backup_s3_aws_access_key_id: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61353734383466366564333832643738313238666235336332303539383639626263633231396261
|
||||
6165393062393266343661643466633163383164383032340a333833656566336331323565386162
|
||||
35646665353539616538353339616531346564636466643639326366353165313861373761396537
|
||||
3731653463643838330a383065313135343763636534656133343666363237356462326236643631
|
||||
34366564373661396434663633346635663331393538363362376265653334623538
|
||||
62343334333230643465623639633334363331353266366533366464643162333238333363633763
|
||||
3431663162666566393738396165396639353230633537610a393863663234626134373962393132
|
||||
33356236626337313435383362336233366637646336663465366638343461663533373362316161
|
||||
3639313537393734350a636365366137353763333032366338323334333936633330333439376161
|
||||
62613232363231346562643064383066393761353566366438363766353536386461
|
||||
backup_s3_aws_secret_access_key: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64316231613337333231383837333930336561633164393762343838646136393165626361346637
|
||||
3364643830346533623137643530323438366665393632320a633032336664616261353734343661
|
||||
36646565383532616133353530343331663731663965656662363830363063303361373861663762
|
||||
3032613362626233350a613464333230363830383334363032303730646134306331383733363036
|
||||
34346334306633306664323337643433356336366633396239306539613539633535386238346662
|
||||
6232313138393062626631386135383234376361643362353966
|
||||
32616664316437316638636263653237386665396632313639363962376361393763373535356130
|
||||
6136353736616263326166633261356233383530613462370a353039303261306231366465326662
|
||||
39326233306565306639366165393930656461383334383931323263363031623333313462316433
|
||||
3635616437373236650a353661343131303332376161316664333833393833373830623130666633
|
||||
66356130646434653039363863346630363931383832353637636131626530616434
|
||||
backup_s3_aws_endpoint_url: "https://s3.us-east-005.backblazeb2.com"
|
||||
|
||||
|
||||
# For zerotier
|
||||
@ -68,6 +172,34 @@ zerotier_management_network_id: !vault |
|
||||
3430303130303766610a633131656431396332626336653562616363666433366664373635613934
|
||||
30316335396166633361666466346232323630396534386332613937366232613965
|
||||
|
||||
# For GCI
|
||||
secret_gci_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
62616132613539386133343261393839636630613735323432346530353465383833323665356433
|
||||
3139396531383838616534643235313434646638356331630a336339323336343631396364316434
|
||||
32303163613863356465353761666666333037396633613461363939333730306362363965373636
|
||||
3265343639643432620a303637323461643866313062303838383038363334636666316138326638
|
||||
63646662353561353234326536343562666336636135303930663564353939376665
|
||||
secret_gci_secret_key: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
33333164393639613865613664316639396338393335643533353237343430613030313234383364
|
||||
3239303838373162303031303061663236353736393635390a313534356530333230613037313765
|
||||
39313330303039656630316437363535393765326234356463383063316235396463323066393465
|
||||
3235636465363833390a636662336361663731343030343163633933363133373533333338386531
|
||||
38383331353465363432383564303666373033376434336635303633373836366134626565336232
|
||||
39663834656165636365343961663831373834333566623934336132633966353636656263643234
|
||||
626264646365633638343230343266393338
|
||||
|
||||
|
||||
# For 5dd
|
||||
five_dd_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31343335306261333630316366366536356165346437393631643630636436626265616239666562
|
||||
3233353738643136356564396339666137353163393465330a306431376364353734346465643261
|
||||
64633065383939383562346332323636306565336139343734323861316335333932383863363233
|
||||
6130353534363563340a636164666631393132346535393936363963326430643638323330663437
|
||||
31396433303762633139376237373236383732623734626538653933366464623135
|
||||
|
||||
# For ara
|
||||
secret_ara_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
@ -86,78 +218,6 @@ secret_ara_secret_key: !vault |
|
||||
31346465336361316433383865613233373836643366346538633330616232386132636662643963
|
||||
303938396531623561653335646231616239
|
||||
|
||||
# For Firefly III
|
||||
secret_firefly_app_key: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
36326365626537313464373434303833373261303835643035666431326335633634376364376233
|
||||
3664323235383337313266316466363734643331313862630a636164616462623965353331373266
|
||||
65653363353039653231316464653366303938656363333239313165313662636163323366303433
|
||||
6432633664666339660a383938333531333536666361633762633831363132366563396237346330
|
||||
32323266346363656336396264626231653331343862636632646466353236393438363037623466
|
||||
6535373866616238323339326338316330383064336138646663
|
||||
secret_firefly_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31386133326239313637393462633930626634653562303361326634323633363037303862313361
|
||||
3133356362333833653636623761386163646435633239370a613632646461303534353134346431
|
||||
36613930393235653862376639626238326561633064333565383564626330636639633136643365
|
||||
3565316233663262360a353631323762313130326361643532626334363263636539313233646362
|
||||
37633961633162353936386366623136633436306235336235363566616563366563
|
||||
secret_firefly_access_token: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65663238653330636264353332336237306565373135666462623937363564393366636231333662
|
||||
6130333864613462323864333832343261393730383332340a383032353036626630366564396231
|
||||
31396233383763653739363939343938663866666664623463616462376337383433646436303932
|
||||
6265396236383437380a633432633233663562303062316164343463636565356437353633663964
|
||||
32356462393036346433306366613333613236656535643662666237663335646461613434613738
|
||||
33626634333235323561633134653362636461306439663834623136376134353733653039653635
|
||||
61323863663566336265323831633532396337653432376362366533313064303635366539623033
|
||||
38353063366135646566376338333536376335653766666336306664616664616633616562663339
|
||||
32373138666262326666616234303938353631333663303834376161396232633635393133313235
|
||||
65626337356536383430346538616366336134383731643536656235376563303063306263306562
|
||||
62343631613837346138393936366135646636643463333637656137313931346661643261633437
|
||||
35343261643339343861636235323331346432656435323564396535376539303764663031393164
|
||||
63353932653866366634656631633133633333303564626466333265363138376638636534316166
|
||||
36353839383264613634336237343463366662313432376161643532356566626162313362383339
|
||||
64663739343365346264316363653939663663656231373262653439653765613764346336306465
|
||||
65336561396363323637396432633362376537626361383765326363363635306537613533356436
|
||||
62303439656661343337353933643963623730653732393236616533626564386339383965623334
|
||||
38366332666131303230636431626237623561623333313236636438613564333631633237663961
|
||||
61386435633832656639363962653138363863363861616162326430623133373330336236336232
|
||||
34636134333230393064303234343962633166323462363939323732336263346662643066633436
|
||||
37666234393733306364346161653138616564646534393266326632666435303531333866633332
|
||||
38323638393066623937646237393738343433393637346338356164346439333632343033366233
|
||||
66356163326164313735353738386637336365623331383337306538326663373666373639393238
|
||||
33363537376633373336376633666136386530633961373430313666313463616637663161303436
|
||||
32363265313739646164666534323165373562303766326338623534396434323162623533386337
|
||||
33653262663935306365393438613137373162353063306431666439383161613937653062313366
|
||||
35376630376530643464363364626561373137646165353464363937613235353635353833386661
|
||||
38613862303236316632646532373635303865643531663665386536613233373863346331633138
|
||||
33303561303637366138663834633634653861623462666634396237393663613465653032306237
|
||||
36303566356163666363653535616632366533633365306265333962303134306132656131316464
|
||||
39343864386139616230643238356335653736623064336163393832386332656130306465353566
|
||||
37393364323263623838663464346439373038303766643033356137633238343439303234326330
|
||||
65373037613435366232306530653734623036353333383935353937376136326237316361303538
|
||||
62343033333339613935393061323039396332646537656263386230373664336661653237663365
|
||||
66613961366531316631653334373563353032396462303265636464326261353531643132633764
|
||||
63663133636264386364393435323736303831313162646336646166396361643834313865303536
|
||||
65343734386630326432633930343462643065383535393033663132383933626337613732623536
|
||||
64323964396133326432336538616130303631306330343361366339343736373062313861663431
|
||||
63303031326561303566303164376531376535646665386263653630303832636661393561373233
|
||||
37663039633934666332336132343262626132613764343138376165633637656237353565646536
|
||||
34663965626333353034666134363966366531356635323739363331383761396638356265666537
|
||||
38326235613035383235396166323663343139663439613834306462666364643530633038373763
|
||||
31393431393464393530656435326531656665343362646634303734646436633364366339626139
|
||||
35326636343031626631653230633636393561663736623931316637323435626336383430613365
|
||||
32663237313161376261656261313737636465316664643531313639356533616265646264393636
|
||||
32646465663035336537363236643461666663653838626531333130383261653637313762623735
|
||||
35616362343331313035396232656361313032633630656530613833313064376335393365636439
|
||||
39646334663436643466633561646364373265366230656662633364646463373435623963306464
|
||||
61346164623739303335306138636531333938363566326336393462666132383838613837326664
|
||||
34613334306336656564636636393934303963626533616365363634353232326235653735663666
|
||||
33623938373530373166386162353635333135613837626437383435656439643064303961326664
|
||||
65613139313836663038393164363264383738376564363730616635326233376533313161303564
|
||||
66636639663531333166616635396630616237666232343464653139646364653339
|
||||
|
||||
# For GVM
|
||||
secret_gvm_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
@ -222,22 +282,15 @@ secret_gitlab_db_pass: !vault |
|
||||
3365636636316534660a373562346462363935393565366636353061343932663763663532383565
|
||||
36666438366337303362373838626234363266646132363235323436653131363735
|
||||
|
||||
# For Nagios
|
||||
secret_nagios_admin_pass: !vault |
|
||||
# For Grafana
|
||||
secret_grafana_matrix_token: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64333231393831303031616363363030613464653161313531316465346263313063626638363437
|
||||
3965303861646232393663633066363039636637343161340a643162633133336335313632383861
|
||||
34616338636630633539353335336631313361656633333539323130626132356263653436343363
|
||||
3930323538613137370a373861376566376631356564623665313662636562626234643862343863
|
||||
61326232633266633262613931303631396163326266386363366639366639613938
|
||||
secret_nagios_matrix_token: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66366665666437643765366533646666386162393038653262333461376566333366363332643135
|
||||
6233376362633566303939623832636366333330393238370a323766366164393733383736633435
|
||||
37633137626634643530653665613166633439376333633663633561313864396465623036653063
|
||||
6433376138386531380a383762393137613738643538343438633730313135613730613139393536
|
||||
35666133666262383862663637623738643836383633653864626231623034613662646563623936
|
||||
3763356331333561383833386162616664376335333139376363
|
||||
62313634386364663564353664656437623863366137343938666635663638313464663838343135
|
||||
6361366536363232396434333136653632376539343432390a623033636534313865306465373563
|
||||
31343565343937376336393263616134373333336237623166333966633639646535613234316638
|
||||
6634313534336635610a373363313737643165346264333736316362316438376662643665333661
|
||||
30326666616362366133396562323433323435613232666337336430623230383765346333343232
|
||||
3765346238303835633337636233376263366130303436336439
|
||||
|
||||
# For Netbox
|
||||
secret_netbox_user_pass: !vault |
|
||||
@ -379,15 +432,6 @@ secret_synapse_db_pass: !vault |
|
||||
3663623537333161630a616263656362633461366462613366323262363734353233373330393932
|
||||
36653333643632313139396631633962386533323330346639363736353863313763
|
||||
|
||||
# For Vaultwarden
|
||||
secret_vaultwarden_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61396131623266353764386535373334653337353337326464353636343863643733663333333531
|
||||
6664376235396139616466646462623666663164323461610a336566396135343431356332626337
|
||||
32373535343266613565313531653061316438313332333261353435366661353437663361346434
|
||||
3536306466306362340a313563333065383733373834393131306661383932643565373161356162
|
||||
33643434396436343037656339343336653637356233313034356632626538616366
|
||||
|
||||
# For home media stuff
|
||||
secret_transmission_user_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
|
@ -2,35 +2,3 @@
|
||||
|
||||
# Docker settings
|
||||
docker_apt_arch: arm64
|
||||
|
||||
# DB secrets
|
||||
secret_grafana_local_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
32326333383035393665316566363266623130313435353165613463336663393634353261623738
|
||||
3466636437303938363332633635363666633965386534630a646132666239623666306133313034
|
||||
63343030613033653964303330643063326636346263363264333061663964373036386536313333
|
||||
6432613734616361380a346138396335366638323266613963623731633437653964326465373538
|
||||
63613762633635613232303565383032313164393935303531356666303965663463366335376137
|
||||
6135376566336662313734333235633362386132333064303534
|
||||
secret_netbox_local_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
33333232623431393930626435313138643963663731336530663066633563666439383936316538
|
||||
6337376232613937303635386235346561326134616265300a326266373834303137623439366438
|
||||
33616365353663633434653463643964613231343335326234343331396137363439666138376332
|
||||
3564356231336230630a336639656337353538633931623536303430363836386137646563613338
|
||||
66326661313064306162363265303636333765383736336231346136383763613131
|
||||
secret_keepalived_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65353963616637303932643435643262333438666566333138373539393836636135656162323965
|
||||
3036313035343835393439663065326536323464316566340a613966333731356631613536643332
|
||||
64613934346234316564613564363863356663653063333432316434353633333138643561316638
|
||||
6563386233656364310a626363663234623161363537323035663663383333353138386239623934
|
||||
65613231666661633262633439393462316337393532623263363630353133373236
|
||||
secret_firefly_db_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31656262333131613762373430323032663634316133346661333762323631323931633633623666
|
||||
6665373939396238383965653635653039336635313361350a333133303239323262383938303436
|
||||
64396137343737346362646330323662333731376332306663336638333161313835626261343031
|
||||
3165643531336534650a393237623435663566346332313838616137343831643030333230356230
|
||||
65386234316565666465376538333661623938326234323136303764376239326135
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
zerotier_repo_deb: "deb http://download.zerotier.com/debian/jammy jammy main"
|
@ -1,2 +0,0 @@
|
||||
# vim:ft=ansible
|
||||
docker_apt_repository: "deb https://download.docker.com/linux/ubuntu focal stable"
|
12
oneoffs/local_backup.yml
Executable file
12
oneoffs/local_backup.yml
Executable file
@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
# Home desktops
|
||||
- hosts: localhost
|
||||
roles:
|
||||
- role: backup
|
||||
vars:
|
||||
backup_s3backup_tar_args_extra: h
|
||||
backup_s3backup_list_extra:
|
||||
- /home/salt/.backup/
|
||||
tags: [ backup ]
|
@ -5,7 +5,4 @@
|
||||
become: no
|
||||
tasks:
|
||||
- name: print os info
|
||||
debug: msg="{{ item }}"
|
||||
with_items:
|
||||
- "{{ ansible_distribution }}"
|
||||
- "{{ ansible_distribution_version }}"
|
||||
debug: msg="{{ inventory_hostname }} - {{ ansible_distribution }} {{ ansible_distribution_version }}"
|
||||
|
@ -22,7 +22,6 @@
|
||||
PermitRootLogin: no
|
||||
PrintMotd: no
|
||||
PubkeyAuthentication: yes
|
||||
Subsystem: "sftp /usr/lib/openssh/sftp-server"
|
||||
UsePAM: yes
|
||||
X11Forwarding: no
|
||||
# We avoid running on "atomic_container" distros since they already ship
|
||||
|
@ -3,7 +3,6 @@
|
||||
---
|
||||
# Home desktops
|
||||
- hosts: device_roles_bastion
|
||||
gather_facts: no
|
||||
roles:
|
||||
- role: backup
|
||||
vars:
|
||||
|
@ -4,26 +4,23 @@
|
||||
# Home desktops
|
||||
- hosts: device_roles_workstation
|
||||
roles:
|
||||
- role: backup
|
||||
vars:
|
||||
backup_s3backup_exclude_list_extra:
|
||||
# This isn't prefixed with / because, on ostree systems, this is in /var/home
|
||||
- "home/*/.var/app/com.valvesoftware.Steam"
|
||||
- "home/*/.var/app/com.visualstudio.code"
|
||||
- "home/*/.var/app/com.vscodium.codium"
|
||||
- "home/*/.cache"
|
||||
- "home/*/.ollama"
|
||||
- "home/*/.local/share/containers"
|
||||
- "home/*/.local/share/Trash"
|
||||
tags: [ backup ]
|
||||
- role: desktop
|
||||
tags: [ desktop ]
|
||||
- role: udev
|
||||
vars:
|
||||
udev_rules:
|
||||
# Switch RCM stuff
|
||||
- SUBSYSTEM=="usb", ATTR{idVendor}=="0955", MODE="0664", GROUP="plugdev"
|
||||
tags: [ desktop, udev ]
|
||||
- hosts: lap-fw-diy-1.ws.mgmt.desu.ltd
|
||||
roles:
|
||||
- role: backup
|
||||
vars:
|
||||
backup_s3backup_tar_args_extra: h
|
||||
backup_s3backup_list_extra:
|
||||
- /home/salt/.backup/
|
||||
tags: [ backup ]
|
||||
- hosts: dsk-ryzen-1.ws.mgmt.desu.ltd
|
||||
roles:
|
||||
- role: desktop
|
||||
- role: backup
|
||||
vars:
|
||||
backup_s3backup_tar_args_extra: h
|
||||
backup_s3backup_list_extra:
|
||||
- /home/salt/.backup/
|
||||
tags: [ backup ]
|
@ -2,8 +2,7 @@
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
# Home media storage Pi
|
||||
- hosts: pi-homeauto-1.home.mgmt.desu.ltd
|
||||
gather_facts: no
|
||||
- hosts: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
module_defaults:
|
||||
docker_container:
|
||||
state: started
|
||||
@ -15,10 +14,22 @@
|
||||
tags: [ docker ]
|
||||
tasks:
|
||||
- name: include tasks for apps
|
||||
include_tasks: tasks/app/{{ task }}
|
||||
include_tasks: tasks/{{ task }}
|
||||
with_items:
|
||||
- ddns-route53.yml
|
||||
- homeassistant.yml
|
||||
# Home automation shit
|
||||
- app/ddns-route53.yml
|
||||
- app/homeassistant.yml
|
||||
- app/prometheus-netgear-exporter.yml
|
||||
# Media acquisition
|
||||
- web/lidarr.yml
|
||||
- web/prowlarr.yml
|
||||
- web/radarr.yml
|
||||
- web/sonarr.yml
|
||||
- web/bazarr.yml
|
||||
- web/transmission.yml
|
||||
# Media presentation
|
||||
- web/navidrome.yml
|
||||
- web/jellyfin.yml
|
||||
loop_control:
|
||||
loop_var: task
|
||||
tags: [ always ]
|
||||
@ -27,18 +38,11 @@
|
||||
vars:
|
||||
backup_s3backup_list_extra:
|
||||
- /data
|
||||
backup_time: "Sun *-*-* 02:00:00"
|
||||
tags: [ backup ]
|
||||
- role: ingress
|
||||
- role: ingress-traefik
|
||||
vars:
|
||||
ingress_container_image: "nginx:latest"
|
||||
ingress_container_ports:
|
||||
- 80:80
|
||||
ingress_container_config_mount: /etc/nginx/conf.d
|
||||
ingress_container_persist_dir: /data/nginx
|
||||
ingress_listen_args: 80
|
||||
ingress_listen_tls: no
|
||||
ingress_servers:
|
||||
- name: homeauto.local.desu.ltd
|
||||
proxy_pass: http://localhost:8123
|
||||
ingress_container_tls: no
|
||||
ingress_container_dashboard: no
|
||||
tags: [ ingress ]
|
||||
# - role: kodi
|
||||
# tags: [ kodi ]
|
||||
|
@ -89,15 +89,14 @@
|
||||
type: "{{ item.type | default('CNAME', true) }}"
|
||||
ttl: 3600
|
||||
state: "{{ item.state | default('present', true) }}"
|
||||
zone: "{{ item.zone | default('desu.ltd', true) }}"
|
||||
value: [ "{{ item.value }}" ]
|
||||
with_items:
|
||||
# Public
|
||||
- record: firefly.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: firefly-importer.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: git.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: grafana.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: matrix.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: movie.desu.ltd
|
||||
@ -108,15 +107,42 @@
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: netbox.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: prometheus.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
# Games
|
||||
- record: 5dd.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
# Public media stuff
|
||||
# music and jellyfin are proxied through ashburn
|
||||
- record: music.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: jellyfin.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- record: lidarr.media.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- record: prowlarr.media.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- record: slskd.media.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- record: sonarr.media.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- record: radarr.media.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- record: bazarr.media.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- record: transmission.media.desu.ltd
|
||||
value: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
# HA
|
||||
- record: homeassistant.desu.ltd
|
||||
value: srv-fw-13-1.home.mgmt.desu.ltd
|
||||
# Secondary projects
|
||||
- record: guncadindex.com
|
||||
value: 5.161.185.67
|
||||
type: A
|
||||
zone: guncadindex.com
|
||||
- record: www.guncadindex.com
|
||||
value: guncadindex.com
|
||||
zone: guncadindex.com
|
||||
loop_control:
|
||||
label: "{{ item.record }}"
|
||||
delegate_to: localhost
|
||||
|
@ -2,10 +2,45 @@
|
||||
# vim:ft=ansible:
|
||||
# Database servers
|
||||
---
|
||||
- hosts: vm-general-1.ashburn.mgmt.desu.ltd,vm-general-2.ashburn.mgmt.desu.ltd
|
||||
tasks:
|
||||
- name: assure postgresql repo key
|
||||
ansible.builtin.apt_key:
|
||||
url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
|
||||
state: present
|
||||
tags: [ db, psql, repo ]
|
||||
- name: assure postgresql repo
|
||||
ansible.builtin.apt_repository:
|
||||
# Ex. "focal-pgdg main"
|
||||
repo: 'deb http://apt.postgresql.org/pub/repos/apt {{ ansible_distribution_release }}-pgdg main'
|
||||
state: present
|
||||
tags: [ db, psql, repo ]
|
||||
- hosts: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
tasks:
|
||||
- name: assure prometheus psql exporter
|
||||
ansible.builtin.docker_container:
|
||||
name: prometheus-psql-exporter
|
||||
image: quay.io/prometheuscommunity/postgres-exporter
|
||||
restart_policy: unless-stopped
|
||||
env:
|
||||
DATA_SOURCE_URI: "10.0.0.2:5432/postgres"
|
||||
DATA_SOURCE_USER: "nagios"
|
||||
DATA_SOURCE_PASS: "{{ secret_postgresql_monitoring_password }}"
|
||||
ports:
|
||||
- 9102:9187/tcp
|
||||
tags: [ db, psql, prometheus, monitoring, docker ]
|
||||
roles:
|
||||
- role: geerlingguy.postgresql
|
||||
vars:
|
||||
postgresql_version: "14"
|
||||
postgresql_data_dir: "/var/lib/postgresql/{{ postgresql_version }}/main"
|
||||
postgresql_bin_path: "/var/lib/postgresql/{{ postgresql_version }}/bin"
|
||||
postgresql_config_path: "/etc/postgresql/{{ postgresql_version }}/main"
|
||||
postgresql_packages:
|
||||
- "postgresql-{{ postgresql_version }}"
|
||||
- "postgresql-client-{{ postgresql_version }}"
|
||||
- "postgresql-server-dev-{{ postgresql_version }}"
|
||||
- libpq-dev
|
||||
postgresql_global_config_options:
|
||||
- option: listen_addresses
|
||||
value: 10.0.0.2,127.0.0.1
|
||||
@ -25,49 +60,78 @@
|
||||
# Used for internal access from Docker
|
||||
- { type: host, database: all, user: all, address: '172.16.0.0/12', auth_method: md5 }
|
||||
postgresql_users:
|
||||
- name: ara-desultd
|
||||
password: "{{ secret_ara_db_pass }}"
|
||||
- name: firefly-desultd
|
||||
password: "{{ secret_firefly_db_pass }}"
|
||||
- name: gitea-desultd
|
||||
password: "{{ secret_gitea_db_pass }}"
|
||||
- name: gitlab-desultd
|
||||
password: "{{ secret_gitlab_db_pass }}"
|
||||
- name: nagios
|
||||
password: "{{ secret_postgresql_monitoring_password }}"
|
||||
- name: netbox-desultd
|
||||
password: "{{ secret_netbox_db_pass }}"
|
||||
- name: nextcloud-desultd
|
||||
password: "{{ secret_nextcloud_db_pass }}"
|
||||
- name: peertube-cowfee
|
||||
password: "{{ secret_peertube_db_pass }}"
|
||||
- name: pleroma-cowfee
|
||||
password: "{{ secret_pleroma_9iron_db_pass }}"
|
||||
- name: synapse-desultd
|
||||
password: "{{ secret_synapse_db_pass }}"
|
||||
- name: vaultwarden-desultd
|
||||
password: "{{ secret_vaultwarden_db_pass }}"
|
||||
postgresql_databases:
|
||||
- name: ara-desultd
|
||||
owner: ara-desultd
|
||||
- name: firefly-desultd
|
||||
owner: firefly-desultd
|
||||
- name: gitea-desultd
|
||||
owner: gitea-desultd
|
||||
- name: gitlab-desultd
|
||||
owner: gitlab-desultd
|
||||
- name: netbox-desultd
|
||||
owner: netbox-desultd
|
||||
- name: nextcloud-desultd
|
||||
owner: nextcloud-desultd
|
||||
- name: pleroma_cowfee
|
||||
owner: pleroma-cowfee
|
||||
- name: peertube
|
||||
owner: peertube-cowfee
|
||||
- name: synapse-desultd
|
||||
lc_collate: C
|
||||
lc_ctype: C
|
||||
owner: synapse-desultd
|
||||
- name: vaultwarden-desultd
|
||||
owner: vaultwarden-desultd
|
||||
tags: [ db, psql ]
|
||||
- hosts: vm-general-2.ashburn.mgmt.desu.ltd
|
||||
tasks:
|
||||
- name: assure prometheus psql exporter
|
||||
ansible.builtin.docker_container:
|
||||
name: prometheus-psql-exporter
|
||||
image: quay.io/prometheuscommunity/postgres-exporter
|
||||
restart_policy: unless-stopped
|
||||
env:
|
||||
DATA_SOURCE_URI: "10.0.0.2:5432/postgres"
|
||||
DATA_SOURCE_USER: "nagios"
|
||||
DATA_SOURCE_PASS: "{{ secret_postgresql_monitoring_password }}"
|
||||
ports:
|
||||
- 9102:9187/tcp
|
||||
tags: [ db, psql, prometheus, monitoring, docker ]
|
||||
roles:
|
||||
- role: geerlingguy.postgresql
|
||||
vars:
|
||||
postgresql_version: "14"
|
||||
postgresql_data_dir: "/var/lib/postgresql/{{ postgresql_version }}/main"
|
||||
postgresql_bin_path: "/var/lib/postgresql/{{ postgresql_version }}/bin"
|
||||
postgresql_config_path: "/etc/postgresql/{{ postgresql_version }}/main"
|
||||
postgresql_packages:
|
||||
- "postgresql-{{ postgresql_version }}"
|
||||
- "postgresql-client-{{ postgresql_version }}"
|
||||
- "postgresql-server-dev-{{ postgresql_version }}"
|
||||
- libpq-dev
|
||||
postgresql_global_config_options:
|
||||
- option: listen_addresses
|
||||
value: 10.0.0.2,127.0.0.1
|
||||
- option: max_connections
|
||||
value: 240
|
||||
- option: shared_buffers
|
||||
value: 128MB
|
||||
- option: log_directory
|
||||
value: 'log'
|
||||
postgresql_hba_entries:
|
||||
- { type: local, database: all, user: postgres, auth_method: trust }
|
||||
- { type: local, database: all, user: all, auth_method: md5 }
|
||||
- { type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5 }
|
||||
- { type: host, database: all, user: all, address: '::1/128', auth_method: md5 }
|
||||
# Used for internal access from other nodes
|
||||
- { type: host, database: all, user: all, address: '10.0.0.0/8', auth_method: md5 }
|
||||
# Used for internal access from Docker
|
||||
- { type: host, database: all, user: all, address: '172.16.0.0/12', auth_method: md5 }
|
||||
postgresql_users:
|
||||
- name: nagios
|
||||
password: "{{ secret_postgresql_monitoring_password }}"
|
||||
- name: guncad-index-prod
|
||||
password: "{{ secret_gci_db_pass }}"
|
||||
postgresql_databases:
|
||||
- name: guncad-index-prod
|
||||
owner: guncad-index-prod
|
||||
tags: [ db, psql ]
|
||||
|
@ -2,8 +2,131 @@
|
||||
# vim:ft=ansible:
|
||||
# Webservers
|
||||
---
|
||||
- hosts: vm-general-2.ashburn.mgmt.desu.ltd
|
||||
module_defaults:
|
||||
docker_container:
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
pre_tasks:
|
||||
- name: ensure docker network
|
||||
docker_network: name=web
|
||||
tags: [ docker ]
|
||||
tasks:
|
||||
- name: docker deploy guncad-index
|
||||
docker_container:
|
||||
name: guncad-index
|
||||
state: started
|
||||
image: registry.gitlab.com/guncad-index/index:latest
|
||||
env:
|
||||
# Global settings
|
||||
TZ: "America/Chicago"
|
||||
# Django/Gunicorn settings
|
||||
GUNCAD_HTTPS: "True"
|
||||
GUNCAD_ALLOWED_HOSTS: "guncadindex.com"
|
||||
GUNCAD_CSRF_ORIGINS: "https://guncadindex.com"
|
||||
GUNCAD_SECRET_KEY: "{{ secret_gci_secret_key }}"
|
||||
GUNCAD_SITE_ID: com-guncadindex
|
||||
GUNCAD_GUNICORN_WORKERS: "16"
|
||||
# GCI settings
|
||||
GUNCAD_SITE_NAME: GunCAD Index
|
||||
GUNCAD_SITE_TAGLINE: A search engine for guns
|
||||
GUNCAD_ADMIN_CONTACT: |
|
||||
Join the Matrix space <a href="https://matrix.to/#/#guncad-index:matrix.org">#guncad-index:matrix.org</a><br />
|
||||
Hit me up on twitter <a href="https://x.com/theshittinator" target="_blank">@theshittinator</a><br /><br />
|
||||
You can also <a href="https://ko-fi.com/theshittinator" target="_blank">support development on ko-fi</a>
|
||||
# DB connection info
|
||||
GUNCAD_DB_USER: guncad-index-prod
|
||||
GUNCAD_DB_PASS: "{{ secret_gci_db_pass }}"
|
||||
GUNCAD_DB_NAME: guncad-index-prod
|
||||
GUNCAD_DB_HOST: 10.0.0.2
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "guncad-index" ]
|
||||
volumes:
|
||||
- /data/guncad-index/data:/data
|
||||
- /data/guncad-index/lbry:/home/django/.local/share/lbry
|
||||
tags: [ docker, guncad-index, guncad, index, gci ]
|
||||
roles:
|
||||
- role: backup
|
||||
vars:
|
||||
backup_s3backup_list_extra:
|
||||
- /data
|
||||
- role: ingress
|
||||
vars:
|
||||
ingress_head: |
|
||||
server_tokens off;
|
||||
open_file_cache max=10000 inactive=6h;
|
||||
open_file_cache_valid 5m;
|
||||
open_file_cache_min_uses 1;
|
||||
open_file_cache_errors on;
|
||||
geo $whitelist {
|
||||
{{ common_home_address }}/{{ common_home_address_mask }} 1;
|
||||
}
|
||||
map $whitelist $limit {
|
||||
0 $binary_remote_addr;
|
||||
1 "";
|
||||
}
|
||||
limit_req_zone $limit zone=site:10m rate=20r/s;
|
||||
limit_req_zone $limit zone=api:10m rate=20r/s;
|
||||
proxy_cache_path /var/cache/nginx/proxy_cache levels=1:2 keys_zone=gci_cache:100m inactive=60m;
|
||||
proxy_cache_key "$scheme$request_method$host$request_uri";
|
||||
ingress_container_volumes_extra:
|
||||
- /data/guncad-index/data/static:/var/www/gci/static:ro
|
||||
- /data/nginx-certbot/proxy_cache:/var/cache/nginx/proxy_cache
|
||||
ingress_servers:
|
||||
- name: guncadindex.com
|
||||
proxies:
|
||||
- location: "/"
|
||||
extra: |
|
||||
set $bypass_cache 0;
|
||||
if ($arg_sort = "random") {
|
||||
set $bypass_cache 1;
|
||||
}
|
||||
if ($uri ~* "^/(api|admin|tools)") {
|
||||
set $bypass_cache 1;
|
||||
}
|
||||
proxy_cache gci_cache;
|
||||
proxy_cache_bypass $bypass_cache $cookie_sessionid;
|
||||
proxy_no_cache $bypass_cache $cookie_sessionid;
|
||||
proxy_cache_valid 200 30m;
|
||||
proxy_cache_valid 404 1m;
|
||||
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
|
||||
limit_req_status 429;
|
||||
limit_req zone=site burst=25 delay=10;
|
||||
add_header X-Cache $upstream_cache_status;
|
||||
error_page 502 /static/maintenance.html;
|
||||
pass: http://guncad-index:8080
|
||||
- location: "/api"
|
||||
extra: |
|
||||
limit_req_status 429;
|
||||
limit_req zone=api burst=50 delay=10;
|
||||
pass: http://guncad-index:8080
|
||||
- location: "/admin"
|
||||
extra: |
|
||||
limit_req_status 429;
|
||||
limit_req zone=site burst=25 delay=10;
|
||||
pass: http://guncad-index:8080
|
||||
locations:
|
||||
- location: "/static"
|
||||
contents: |
|
||||
root /var/www/gci;
|
||||
expires 1y;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
add_header Cache-Control "public, max-age=31536000, immutable";
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
- location: "/static/maintenance.html"
|
||||
contents: |
|
||||
root /var/www/gci;
|
||||
- name: www.guncadindex.com
|
||||
locations:
|
||||
- location: "/"
|
||||
contents: |
|
||||
return 301 $scheme://guncadindex.com$request_uri;
|
||||
tags: [ web, docker, ingress ]
|
||||
- hosts: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
gather_facts: no
|
||||
#gather_facts: no
|
||||
module_defaults:
|
||||
docker_container:
|
||||
restart_policy: unless-stopped
|
||||
@ -16,25 +139,27 @@
|
||||
- name: include tasks for applications
|
||||
include_tasks: tasks/{{ item }}
|
||||
with_items:
|
||||
# Applications
|
||||
- app/gitlab-runner.yml
|
||||
- app/redis.yml
|
||||
# Frontend web services
|
||||
- web/9iron.yml
|
||||
- web/desultd.yml
|
||||
- web/element-web.yml
|
||||
- web/firefly-iii.yml
|
||||
- web/gitea.yml
|
||||
- web/grafana.yml
|
||||
- web/netbox.yml
|
||||
- web/nextcloud.yml
|
||||
- web/prowlarr.yml
|
||||
- web/radarr.yml
|
||||
- web/sonarr.yml
|
||||
- web/srv.yml
|
||||
- web/synapse.yml
|
||||
- web/transmission.yml
|
||||
# Backend web services
|
||||
- web/srv.yml
|
||||
# Games
|
||||
- game/factorio.yml
|
||||
- game/minecraft-vanilla.yml
|
||||
- game/minecraft-direwolf20.yml
|
||||
- game/minecraft-createfarming.yml
|
||||
- game/minecraft-magicpack.yml
|
||||
- game/minecraft-weedie.yml
|
||||
- game/zomboid.yml
|
||||
- game/satisfactory.yml
|
||||
tags: [ always ]
|
||||
roles:
|
||||
- role: backup
|
||||
@ -43,184 +168,45 @@
|
||||
- /app/gitea/gitea
|
||||
- /data
|
||||
backup_s3backup_exclude_list_extra:
|
||||
- /var/lib/gitea/log
|
||||
- /data/gitea/data/gitea/log
|
||||
- /data/minecraft/oldpack/backups
|
||||
- /data/minecraft/stoneblock/backups
|
||||
- /data/minecraft/create-extra/backups
|
||||
- /data/minecraft/magicpack/backups
|
||||
- /data/minecraft/direwolf20/backups
|
||||
- /data/minecraft/prominence/FeedTheBeast/world/.git
|
||||
- /data/sb-mirror
|
||||
- /data/minecraft/weedie/backups
|
||||
- /data/shared/media
|
||||
- /data/shared/downloads
|
||||
- /data/terraria/generic/backups
|
||||
- /data/zomboid/ZomboidDedicatedServer/steamapps/workshop
|
||||
tags: [ backup ]
|
||||
# - role: docker-tmodloader14
|
||||
# tags: [ terraria, tmodloader ]
|
||||
# - role: docker-tmodloader14
|
||||
# vars:
|
||||
# tmodloader_external_port: "7778"
|
||||
# tmodloader_name: "test"
|
||||
# tags: [ terraria-test, tmodloader-test ]
|
||||
- role: git
|
||||
vars:
|
||||
git_repos:
|
||||
- repo: https://git.desu.ltd/salt/gitea-custom
|
||||
dest: /data/gitea/data/gitea/custom
|
||||
tags: [ web, git ]
|
||||
- role: nagios
|
||||
- role: prometheus
|
||||
tags: [ prometheus, monitoring, no-test ]
|
||||
- role: gameserver-terraria
|
||||
vars:
|
||||
nagios_matrix_server: "https://matrix.desu.ltd"
|
||||
nagios_matrix_room: "!NWNCKlNmOTcarMcMIh:desu.ltd"
|
||||
nagios_matrix_token: "{{ secret_nagios_matrix_token }}"
|
||||
nagios_data_dir: /data/nagios
|
||||
nagios_admin_pass: "{{ secret_nagios_admin_pass }}"
|
||||
nagios_contacts:
|
||||
- name: matrix
|
||||
host_notification_commands: notify-host-by-matrix
|
||||
service_notification_commands: notify-service-by-matrix
|
||||
host_notification_period: ansible-not-late-at-night
|
||||
service_notification_period: ansible-not-late-at-night
|
||||
extra:
|
||||
- key: contactgroups
|
||||
value: ansible
|
||||
- name: salt
|
||||
host_notification_commands: notify-host-by-email
|
||||
service_notification_commands: notify-service-by-email
|
||||
extra:
|
||||
- key: email
|
||||
value: alerts@babor.tech
|
||||
nagios_commands:
|
||||
# This command is included in the container image
|
||||
- name: check_nrpe
|
||||
command: "$USER1$/check_nrpe -H $HOSTADDRESS$ -c $ARG1$"
|
||||
- name: check_by_ssh
|
||||
command: "$USER1$/check_by_ssh -H $HOSTADDRESS$ -F /opt/nagios/etc/ssh_config -t 30 -q -i /opt/nagios/etc/id_ed25519 -l nagios-checker -C \"$ARG1$\""
|
||||
- name: notify-host-by-matrix
|
||||
command: "/usr/bin/printf \"%b\" \"$NOTIFICATIONTYPE$\\n$HOSTNAME$ is $HOSTSTATE$\\nAddress: $HOSTADDRESS$\\nInfo: $HOSTOUTPUT$\\nDate/Time: $LONGDATETIME$\" | /opt/Custom-Nagios-Plugins/notify-by-matrix"
|
||||
- name: notify-service-by-matrix
|
||||
command: "/usr/bin/printf \"%b\" \"$NOTIFICATIONTYPE$\\nService $HOSTALIAS$ - $SERVICEDESC$ is $SERVICESTATE$\\nInfo: $SERVICEOUTPUT$\\nDate/Time: $LONGDATETIME$\" | /opt/Custom-Nagios-Plugins/notify-by-matrix"
|
||||
nagios_services:
|
||||
# Agentless checks
|
||||
- name: HTTP
|
||||
command: check_http
|
||||
hostgroup: tag-nagios-checkhttp
|
||||
- name: HTTPS
|
||||
command: check_http!--ssl
|
||||
hostgroup: tag-nagios-checkhttp
|
||||
- name: SSH
|
||||
command: check_ssh
|
||||
# check_by_ssh checks
|
||||
- name: CPU Utilization
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_cpu_stats -w 75 -c 90
|
||||
- name: DNS Resolution
|
||||
command: check_by_ssh!/usr/lib/nagios/plugins/check_etc_resolv
|
||||
- name: Executables in tmp
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_executables_in_tmp
|
||||
- name: Last Ansible Play
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_file_age /var/lib/ansible-last-run -w 432000 -c 604800
|
||||
- name: Memory Usage
|
||||
command: check_by_ssh!/usr/lib/nagios/plugins/check_memory -w 10% -c 5%
|
||||
hostgroup: "ansible,!tag-prov-zfs"
|
||||
- name: Ping Self over DNS
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_ping_by_hostname
|
||||
- name: Reboot Required
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_reboot_required
|
||||
- name: Unit atd.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit atd.service
|
||||
- name: Unit backup.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit backup.service
|
||||
hostgroup: "ansible,!role-hypervisor"
|
||||
- name: Unit backup.timer
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit backup.timer
|
||||
hostgroup: "ansible,!role-hypervisor"
|
||||
- name: Unit cron.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit cron.service
|
||||
- name: Unit dbus.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit dbus.service
|
||||
- name: Unit ssh.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit ssh.service
|
||||
- name: Unit systemd-resolved.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit systemd-resolved.service
|
||||
hostgroup: "ansible,!role-hypervisor"
|
||||
- name: Users
|
||||
command: check_by_ssh!/usr/lib/nagios/plugins/check_users -w 3 -c 5
|
||||
# Privileged checks
|
||||
# Required because check_disk may attempt to get the free space of
|
||||
# restricted mountpoints
|
||||
- name: Disk Usage
|
||||
command: check_by_ssh!/usr/bin/sudo /usr/lib/nagios/plugins/check_disk -M -u GB -X nfs -X tracefs -X cgroup -X tmpfs -X overlay -X shm -w 15% -c 10% -W 15% -K 10% -A -I '^/run/' -I '^udev$' -I '^/var/lib/kubelet/' -I '^/tmp/.mount_' -I '^/dev/loop'
|
||||
# Device type checks
|
||||
# R720
|
||||
- name: CPU0 Temperature
|
||||
command: check_by_ssh!/usr/bin/sudo /usr/local/bin/monitoring-scripts/check_temp -n -w 65 -c 75 --sensor coretemp-isa-0000
|
||||
hostgroup: device-type-r720
|
||||
- name: CPU1 Temperature
|
||||
command: check_by_ssh!/usr/bin/sudo /usr/local/bin/monitoring-scripts/check_temp -n -w 65 -c 75 --sensor coretemp-isa-0001
|
||||
hostgroup: device-type-r720
|
||||
# Pi 4 4G
|
||||
- name: CPU Temperature
|
||||
command: check_by_ssh!/usr/bin/sudo /usr/local/bin/monitoring-scripts/check_temp -n -w 65 -c 75 --sensor cpu_thermal-virtual-0
|
||||
hostgroup: device-type-pi4b-2g,device-type-pi4b-4g,device-type-pi4b-4g-storage
|
||||
# Device role checks
|
||||
# hypervisor (which is assumed to be Proxmox)
|
||||
- name: PVE Unit pve-firewall.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pve-firewall.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit spiceproxy.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit spiceproxy.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit pve-ha-crm.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pve-ha-crm.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit pvedaemon.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pvedaemon.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit pvefw-logger.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pvefw-logger.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit pveproxy.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pveproxy.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit pve-cluster.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pve-cluster.service
|
||||
hostgroup: role-hypervisor
|
||||
- name: PVE Unit pvestatd.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit pvestatd.service
|
||||
hostgroup: role-hypervisor
|
||||
# Tag-specific checks
|
||||
# docker
|
||||
- name: Unit docker.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit docker.service
|
||||
hostgroup: "ansible,!tag-no-docker"
|
||||
- name: Docker Status
|
||||
command: check_by_ssh!/usr/bin/sudo /usr/local/bin/monitoring-scripts/check_docker --no-ok --status running
|
||||
hostgroup: tag-nagios-checkdocker
|
||||
# nagios-checkpgsql
|
||||
- name: PSQL
|
||||
command: "check_by_ssh!/usr/lib/nagios/plugins/check_pgsql -H localhost -l nagios -p {{ secret_postgresql_monitoring_password }} -w 2 -c 5"
|
||||
hostgroup: tag-nagios-checkpgsql
|
||||
- name: PSQL Connections
|
||||
command: "check_by_ssh!/usr/lib/nagios/plugins/check_pgsql -H localhost -l nagios -p {{ secret_postgresql_monitoring_password }} -w 2 -c 5 -q 'select (select count(*)::float used from pg_stat_activity) / (select setting::int max_conn from pg_settings where name=\\$\\$max_connections\\$\\$)' -W 0.7-0.8 -C 0.8-1.0"
|
||||
hostgroup: tag-nagios-checkpgsql
|
||||
# https://rhaas.blogspot.com/2020/02/useless-vacuuming.html
|
||||
- name: PSQL Old Xacts
|
||||
command: "check_by_ssh!/usr/lib/nagios/plugins/check_pgsql -H localhost -l nagios -p {{ secret_postgresql_monitoring_password }} -w 2 -c 5 -q 'select count(*)::float from pg_prepared_xacts where age(transaction) > 5000000' -W 500-1000 -C 1000-1000000"
|
||||
hostgroup: tag-nagios-checkpgsql
|
||||
- name: Unit postgresql.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit postgresql.service
|
||||
hostgroup: tag-nagios-checkpgsql
|
||||
# nagios-checkswap
|
||||
- name: Swap Usage
|
||||
command: check_by_ssh!/usr/lib/nagios/plugins/check_swap -w 20% -c 10%
|
||||
hostgroup: tag-nagios-checkswap
|
||||
# zerotier
|
||||
- name: Unit zerotier-one.service
|
||||
command: check_by_ssh!/usr/local/bin/monitoring-scripts/check_systemd_unit zerotier-one.service
|
||||
hostgroup: tag-zt-personal
|
||||
tags: [ nagios, no-auto ]
|
||||
terraria_server_name: "lea-wants-to-play"
|
||||
terraria_motd: "DID SOMEBODY SAY MEATLOAF??"
|
||||
terraria_world_name: "SuperBepisLand"
|
||||
terraria_world_seed: "Make it 'all eight'. As many eights as you can fit in the text box."
|
||||
terraria_mods: "{{ tml_basics + tml_basic_qol + tml_libs + tml_calamity + tml_yoyo_revamp + tml_calamity_classes + tml_summoners_association }}"
|
||||
tags: [ terraria, tmodloader, lea ]
|
||||
# - role: gameserver-terraria
|
||||
# vars:
|
||||
# terraria_server_remove: yes
|
||||
# terraria_server_name: "generic"
|
||||
# terraria_world_name: "Seaborgium"
|
||||
# terraria_world_seed: "benis"
|
||||
# terraria_mods: "{{ tml_basic_qol + tml_advanced_qol + tml_libs + tml_basics + tml_calamity + tml_calamity_classes + tml_calamity_clamity + tml_fargos + tml_touhou + tml_yoyo_revamp + tml_spirit + tml_secrets + tml_yoyo_revamp }}"
|
||||
# tags: [ terraria, tmodloader, generic ]
|
||||
- role: ingress
|
||||
vars:
|
||||
ingress_head: |
|
||||
# Used by Grafana, required for its API or some shit
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
ingress_servers:
|
||||
# desu.ltd
|
||||
- name: desu.ltd
|
||||
@ -234,15 +220,18 @@
|
||||
contents: |
|
||||
default_type application/json;
|
||||
return 200 '{"m.homeserver":{"base_url":"https://matrix.desu.ltd"}}';
|
||||
- name: firefly.desu.ltd
|
||||
proxy_pass: http://firefly:8080
|
||||
- name: firefly-importer.desu.ltd
|
||||
directives:
|
||||
- "allow {{ common_home_address }}/{{ common_home_address_mask }}"
|
||||
- "deny all"
|
||||
proxy_pass: http://firefly-importer:8080
|
||||
- name: git.desu.ltd
|
||||
proxy_pass: http://gitea:3000
|
||||
- name: grafana.desu.ltd
|
||||
proxy_pass: http://grafana:3000
|
||||
locations:
|
||||
- location: "/api/live/"
|
||||
contents: |
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://grafana:3000;
|
||||
- name: matrix.desu.ltd
|
||||
proxies:
|
||||
- location: "~* ^(\/_matrix|\/_synapse|\/client|\/health)"
|
||||
@ -251,12 +240,12 @@
|
||||
pass: http://element:80
|
||||
directives:
|
||||
- "client_max_body_size 0"
|
||||
- name: nagios.desu.ltd
|
||||
proxy_pass: http://nagios:80
|
||||
- name: nc.desu.ltd
|
||||
directives:
|
||||
- "add_header Strict-Transport-Security \"max-age=31536000\""
|
||||
- "client_max_body_size 0"
|
||||
- "keepalive_requests 99999"
|
||||
- "keepalive_timeout 600"
|
||||
proxy_pass: http://nextcloud:80
|
||||
locations:
|
||||
- location: "^~ /.well-known"
|
||||
@ -267,27 +256,21 @@
|
||||
try_files $uri $uri/ =404;
|
||||
- name: netbox.desu.ltd
|
||||
proxy_pass: http://netbox:8080
|
||||
# desu.ltd media bullshit
|
||||
- name: prowlarr.media.desu.ltd
|
||||
- name: prometheus.desu.ltd
|
||||
directives:
|
||||
- "allow {{ common_home_address }}/{{ common_home_address_mask }}"
|
||||
- "allow 10.0.0.0/8"
|
||||
- "allow 172.16.0.0/12"
|
||||
- "allow 192.168.0.0/16"
|
||||
# TODO: Replace this with a dynamically-generated list of public IPs from inv
|
||||
- "allow 45.79.58.44/32" # bastion1.dallas.mgmt.desu.ltd
|
||||
- "deny all"
|
||||
proxy_pass: http://prowlarr:9696
|
||||
- name: sonarr.media.desu.ltd
|
||||
directives:
|
||||
- "allow {{ common_home_address }}/{{ common_home_address_mask }}"
|
||||
- "deny all"
|
||||
proxy_pass: http://sonarr:8989
|
||||
- name: radarr.media.desu.ltd
|
||||
directives:
|
||||
- "allow {{ common_home_address }}/{{ common_home_address_mask }}"
|
||||
- "deny all"
|
||||
proxy_pass: http://radarr:7878
|
||||
- name: transmission.media.desu.ltd
|
||||
directives:
|
||||
- "allow {{ common_home_address }}/{{ common_home_address_mask }}"
|
||||
- "deny all"
|
||||
proxy_pass: http://transmission:9091
|
||||
proxy_pass: http://prometheus:9090
|
||||
# media.desu.ltd proxies
|
||||
- name: music.desu.ltd
|
||||
proxy_pass: http://zt0.srv-fw-13-1.home.mgmt.desu.ltd
|
||||
- name: jellyfin.desu.ltd
|
||||
proxy_pass: http://zt0.srv-fw-13-1.home.mgmt.desu.ltd
|
||||
# 9iron
|
||||
- name: www.9iron.club
|
||||
directives:
|
||||
|
@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
# Supplementary tags
|
||||
- import_playbook: tags_ansible.yml
|
@ -8,3 +8,5 @@
|
||||
- import_playbook: prod_web.yml
|
||||
# Home automation stuff
|
||||
- import_playbook: home_automation.yml
|
||||
# Backup management stuff
|
||||
- import_playbook: tags_restic-prune.yml
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
- hosts: tags_ansible
|
||||
gather_facts: no
|
||||
roles:
|
||||
- role: ansible
|
||||
tags: [ ansible ]
|
@ -3,34 +3,11 @@
|
||||
---
|
||||
- hosts: tags_autoreboot
|
||||
gather_facts: no
|
||||
module_defaults:
|
||||
nagios:
|
||||
author: Ansible
|
||||
action: downtime
|
||||
cmdfile: /data/nagios/var/rw/nagios.cmd
|
||||
comment: "Ansible tags_autoreboot task"
|
||||
host: "{{ inventory_hostname }}"
|
||||
minutes: 10
|
||||
serial: 1
|
||||
tasks:
|
||||
- name: check for reboot-required
|
||||
ansible.builtin.stat: path=/var/run/reboot-required
|
||||
register: s
|
||||
- name: reboot
|
||||
block:
|
||||
- name: attempt to schedule downtime
|
||||
block:
|
||||
- name: register nagios host downtime
|
||||
nagios:
|
||||
service: host
|
||||
delegate_to: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
- name: register nagios service downtime
|
||||
nagios:
|
||||
service: all
|
||||
delegate_to: vm-general-1.ashburn.mgmt.desu.ltd
|
||||
rescue:
|
||||
- name: notify of failure to reboot
|
||||
ansible.builtin.debug: msg="Miscellaneous failure when scheduling downtime"
|
||||
- name: reboot
|
||||
ansible.builtin.reboot: reboot_timeout=600
|
||||
when: s.stat.exists
|
||||
|
@ -2,43 +2,56 @@
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
- hosts: tags_nagios
|
||||
gather_facts: no
|
||||
roles:
|
||||
- role: git
|
||||
vars:
|
||||
git_repos:
|
||||
- repo: https://git.desu.ltd/salt/monitoring-scripts
|
||||
dest: /usr/local/bin/monitoring-scripts
|
||||
tags: [ nagios, git ]
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
- name: assure nagios plugin packages
|
||||
ansible.builtin.apt: name=monitoring-plugins,nagios-plugins-contrib
|
||||
tags: [ nagios ]
|
||||
- name: assure nagios user
|
||||
ansible.builtin.user: name=nagios-checker state=present system=yes
|
||||
tags: [ nagios ]
|
||||
- name: assure nagios user ssh key
|
||||
authorized_key:
|
||||
user: nagios-checker
|
||||
state: present
|
||||
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKNavw28C0mKIQVRLQDW2aoovliU1XCGaenDhIMwumK/ Nagios monitoring"
|
||||
tags: [ nagios ]
|
||||
- name: assure nagios user sudo rule file
|
||||
ansible.builtin.file: path=/etc/sudoers.d/50-nagios-checker mode=0750 owner=root group=root state=touch modification_time=preserve access_time=preserve
|
||||
tags: [ nagios, sudo ]
|
||||
- name: assure nagios user sudo rules
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers.d/50-nagios-checker
|
||||
line: "nagios-checker ALL = (root) NOPASSWD: {{ item }}"
|
||||
with_items:
|
||||
- /usr/lib/nagios/plugins/check_disk
|
||||
- /usr/local/bin/monitoring-scripts/check_docker
|
||||
- /usr/local/bin/monitoring-scripts/check_temp
|
||||
tags: [ nagios, sudo ]
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: disable nagios user when not tagged
|
||||
ansible.builtin.user: name=nagios-checker state=absent remove=yes
|
||||
when: "'tags_nagios' not in group_names"
|
||||
tags: [ nagios ]
|
||||
- name: assure prometheus containers for docker hosts
|
||||
block:
|
||||
- name: assure prometheus node exporter
|
||||
# https://github.com/prometheus/node_exporter
|
||||
ansible.builtin.docker_container:
|
||||
name: prometheus-node-exporter
|
||||
image: quay.io/prometheus/node-exporter:latest
|
||||
restart_policy: unless-stopped
|
||||
command:
|
||||
- '--path.rootfs=/host'
|
||||
- '--collector.interrupts'
|
||||
- '--collector.processes'
|
||||
network_mode: host
|
||||
pid_mode: host
|
||||
volumes:
|
||||
- /:/host:ro,rslave
|
||||
tags: [ prometheus ]
|
||||
- name: assure prometheus cadvisor exporter
|
||||
ansible.builtin.docker_container:
|
||||
name: prometheus-cadvisor-exporter
|
||||
image: gcr.io/cadvisor/cadvisor:latest
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- 9101:8080/tcp
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
- /var/run:/var/run:ro
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker:/var/lib/docker:ro
|
||||
- /dev/disk:/dev/disk:ro
|
||||
devices:
|
||||
- /dev/kmsg
|
||||
when: ansible_pkg_mgr != "atomic_container"
|
||||
- name: assure prometheus containers for coreos
|
||||
block:
|
||||
- name: assure prometheus node exporter
|
||||
# https://github.com/prometheus/node_exporter
|
||||
containers.podman.podman_container:
|
||||
name: prometheus-node-exporter
|
||||
image: quay.io/prometheus/node-exporter:latest
|
||||
restart_policy: unless-stopped
|
||||
command:
|
||||
- '--path.rootfs=/host'
|
||||
- '--collector.interrupts'
|
||||
- '--collector.processes'
|
||||
network_mode: host
|
||||
pid_mode: host
|
||||
volumes:
|
||||
- /:/host:ro,rslave
|
||||
tags: [ prometheus ]
|
||||
when: ansible_pkg_mgr == "atomic_container"
|
||||
|
10
playbooks/tags_restic-prune.yml
Executable file
10
playbooks/tags_restic-prune.yml
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
- hosts: tags_restic-prune
|
||||
roles:
|
||||
- role: backup
|
||||
vars:
|
||||
backup_restic: no
|
||||
backup_restic_prune: yes
|
||||
tags: [ backup, prune, restic, restic-prune ]
|
@ -7,7 +7,7 @@
|
||||
docker_container:
|
||||
name: ddns-route53
|
||||
state: started
|
||||
image: crazymax/ddns-route53:latest
|
||||
image: ghcr.io/crazy-max/ddns-route53:latest
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
env:
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy gitlab runner
|
||||
docker_container:
|
||||
name: gitlab-runner
|
||||
image: gitlab/gitlab-runner:latest
|
||||
image: registry.gitlab.com/gitlab-org/gitlab-runner:latest
|
||||
restart_policy: unless-stopped
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy homeassistant
|
||||
docker_container:
|
||||
name: homeassistant
|
||||
image: "ghcr.io/home-assistant/raspberrypi4-homeassistant:stable"
|
||||
image: ghcr.io/home-assistant/home-assistant:latest
|
||||
privileged: yes
|
||||
network_mode: host
|
||||
volumes:
|
||||
|
30
playbooks/tasks/app/prometheus-netgear-exporter.yml
Normal file
30
playbooks/tasks/app/prometheus-netgear-exporter.yml
Normal file
@ -0,0 +1,30 @@
|
||||
# vim:ft=ansible:
|
||||
#
|
||||
# Bless this man. Bless him dearly:
|
||||
# https://github.com/DRuggeri/netgear_exporter
|
||||
#
|
||||
- name: docker deploy netgear prometheus exporter
|
||||
vars:
|
||||
netgear_admin_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31346635363565363532653831613034376535653530376237343261623736326230393333326337
|
||||
3062643963353334323439306361356437653834613832310a666366393662303166313733393831
|
||||
32373465356638393138633963666337643333303435653537666361363437633533333263303938
|
||||
6536353530323036350a656330326662373836393736383961393537666537353138346439626566
|
||||
64336631656538343335343535343338613465393635333937656237333531303230
|
||||
docker_container:
|
||||
name: prometheus-netgear-exporter
|
||||
image: ghcr.io/druggeri/netgear_exporter
|
||||
env:
|
||||
NETGEAR_EXPORTER_PASSWORD: "{{ netgear_admin_password }}"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "redis" ]
|
||||
ports:
|
||||
- "9192:9192/tcp"
|
||||
command:
|
||||
- "--url=http://192.168.1.1:5000" # Set the URL to the SOAP port of the router, NOT the admin interface
|
||||
- "--insecure" # Required when accessing over IP
|
||||
- "--timeout=15" # The router is slow as balls
|
||||
- "--filter.collectors=Client,Traffic" # Filter out SystemInfo to lower collection time
|
||||
tags: [ docker, prometheus, netgear, prometheus-netgear ]
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy redis
|
||||
docker_container:
|
||||
name: redis
|
||||
image: redis:6-alpine
|
||||
image: docker.io/redis:6-alpine
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "redis" ]
|
||||
|
@ -3,7 +3,7 @@
|
||||
docker_container:
|
||||
name: factorio
|
||||
state: absent
|
||||
image: factoriotools/factorio:stable
|
||||
image: docker.io/factoriotools/factorio:stable
|
||||
restart_policy: unless-stopped
|
||||
interactive: yes
|
||||
pull: yes
|
||||
|
@ -2,20 +2,38 @@
|
||||
- name: docker deploy minecraft - create farming and delights
|
||||
docker_container:
|
||||
name: minecraft-createfarming
|
||||
state: started
|
||||
image: itzg/minecraft-server:latest
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
state: absent
|
||||
image: ghcr.io/itzg/minecraft-server:latest
|
||||
env:
|
||||
# Common envvars
|
||||
EULA: "true"
|
||||
OPS: "VintageSalt"
|
||||
SNOOPER_ENABLED: "false"
|
||||
SPAWN_PROTECTION: "0"
|
||||
USE_AIKAR_FLAGS: "true"
|
||||
RCON_CMDS_STARTUP: |-
|
||||
scoreboard objectives add Deaths deathCount
|
||||
#scoreboard objectives add Health health {"text":"❤","color":"red"}
|
||||
RCON_CMDS_ON_CONNECT: |-
|
||||
scoreboard objectives setdisplay list Deaths
|
||||
#scoreboard objectives setdisplay belowName Health
|
||||
# Pack-specific stuff
|
||||
MODRINTH_PROJECT: "https://modrinth.com/modpack/create-farmersdelight/version/1.0.0"
|
||||
MOTD: "Create Farming and Delights! Spinny trains!"
|
||||
TYPE: "MODRINTH"
|
||||
VERSION: "1.20.1"
|
||||
MAX_MEMORY: "6G"
|
||||
#VIEW_DISTANCE: "10"
|
||||
ports:
|
||||
- "25565:25565/tcp"
|
||||
- "25565:25565/udp"
|
||||
- "24454:24454/udp"
|
||||
# Prometheus exporter for Forge
|
||||
# https://www.curseforge.com/minecraft/mc-mods/prometheus-exporter
|
||||
#- "19565:19565/tcp"
|
||||
# Prometheus exporter for Fabric
|
||||
# https://modrinth.com/mod/fabricexporter
|
||||
- "19565:25585/tcp"
|
||||
volumes:
|
||||
- /data/minecraft/createfarming:/data
|
||||
tags: [ docker, minecraft ]
|
||||
tags: [ docker, minecraft, create, createfarming ]
|
||||
|
@ -1,34 +0,0 @@
|
||||
# vim:ft=ansible:
|
||||
- name: docker deploy minecraft - direwolf20
|
||||
docker_container:
|
||||
name: minecraft-direwolf20
|
||||
state: started
|
||||
image: itzg/minecraft-server:latest
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
env:
|
||||
EULA: "true"
|
||||
GENERIC_PACK: "/modpacks/1.20.1-direwolf20/Da Bois.zip"
|
||||
TYPE: "NEOFORGE"
|
||||
VERSION: "1.20.1"
|
||||
FORGE_VERSION: "47.1.105"
|
||||
MEMORY: "8G"
|
||||
MOTD: "Tannerite Dog Edition\\n#abolishtheatf"
|
||||
OPS: "VintageSalt"
|
||||
RCON_CMDS_STARTUP: |-
|
||||
scoreboard objectives add Deaths deathCount
|
||||
scoreboard objectives add Health health {"text":"❤","color":"red"}
|
||||
RCON_CMDS_ON_CONNECT: |-
|
||||
scoreboard objectives setdisplay list Deaths
|
||||
scoreboard objectives setdisplay belowName Health
|
||||
SNOOPER_ENABLED: "false"
|
||||
SPAWN_PROTECTION: "0"
|
||||
USE_AIKAR_FLAGS: "true"
|
||||
VIEW_DISTANCE: "10"
|
||||
ports:
|
||||
- "25567:25565/tcp"
|
||||
- "25567:25565/udp"
|
||||
volumes:
|
||||
- /data/srv/packs:/modpacks
|
||||
- /data/minecraft/direwolf20:/data
|
||||
tags: [ docker, minecraft, direwolf20 ]
|
50
playbooks/tasks/game/minecraft-magicpack.yml
Normal file
50
playbooks/tasks/game/minecraft-magicpack.yml
Normal file
@ -0,0 +1,50 @@
|
||||
# vim:ft=ansible:
|
||||
- name: docker deploy minecraft - magicpack
|
||||
docker_container:
|
||||
name: minecraft-magicpack
|
||||
state: absent
|
||||
image: ghcr.io/itzg/minecraft-server:java8
|
||||
env:
|
||||
# Common envvars
|
||||
EULA: "true"
|
||||
OPS: "VintageSalt"
|
||||
SNOOPER_ENABLED: "false"
|
||||
SPAWN_PROTECTION: "0"
|
||||
USE_AIKAR_FLAGS: "true"
|
||||
#
|
||||
# This enables the use of Ely.by as an auth and skin server
|
||||
# Comment this and the above line out if you'd like to use Mojang's
|
||||
# https://docs.ely.by/en/authlib-injector.html
|
||||
#
|
||||
# All players should register on Ely.by in order for this to work.
|
||||
# They should also use Fjord Launcher by Unmojang:
|
||||
# https://github.com/unmojang/FjordLauncher
|
||||
#
|
||||
JVM_OPTS: "-javaagent:/authlib-injector.jar=ely.by"
|
||||
RCON_CMDS_STARTUP: |-
|
||||
scoreboard objectives add Deaths deathCount
|
||||
#scoreboard objectives add Health health {"text":"❤","color":"red"}
|
||||
RCON_CMDS_ON_CONNECT: |-
|
||||
scoreboard objectives setdisplay list Deaths
|
||||
#scoreboard objectives setdisplay belowName Health
|
||||
# Pack-specific stuff
|
||||
MODRINTH_PROJECT: "https://srv.9iron.club/files/packs/1.7.10-magicpack/server.mrpack"
|
||||
MOTD: "It's ya boy, uh, skrunkly modpack"
|
||||
TYPE: "MODRINTH"
|
||||
VERSION: "1.7.10"
|
||||
MAX_MEMORY: "6G"
|
||||
#VIEW_DISTANCE: "10"
|
||||
ports:
|
||||
- "25565:25565/tcp"
|
||||
- "25565:25565/udp"
|
||||
- "24454:24454/udp"
|
||||
# Prometheus exporter for Forge
|
||||
# https://www.curseforge.com/minecraft/mc-mods/prometheus-exporter
|
||||
- "19565:19565/tcp"
|
||||
# Prometheus exporter for Fabric
|
||||
# https://modrinth.com/mod/fabricexporter
|
||||
#- "19565:25585/tcp"
|
||||
volumes:
|
||||
- /data/minecraft/magicpack:/data
|
||||
- /data/minecraft/authlib-injector-1.2.5.jar:/authlib-injector.jar
|
||||
tags: [ docker, minecraft, magicpack ]
|
@ -1,33 +0,0 @@
|
||||
# vim:ft=ansible:
|
||||
- name: docker deploy minecraft - vanilla
|
||||
docker_container:
|
||||
name: minecraft-vanilla
|
||||
state: absent
|
||||
image: itzg/minecraft-server:latest
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
env:
|
||||
DIFFICULTY: "normal"
|
||||
ENABLE_COMMAND_BLOCK: "true"
|
||||
EULA: "true"
|
||||
MAX_PLAYERS: "8"
|
||||
MODRINTH_PROJECT: "https://modrinth.com/modpack/adrenaserver"
|
||||
MOTD: "Tannerite Dog Edition\\n#abolishtheatf"
|
||||
OPS: "VintageSalt"
|
||||
RCON_CMDS_STARTUP: |-
|
||||
scoreboard objectives add Deaths deathCount
|
||||
scoreboard objectives add Health health {"text":"❤","color":"red"}
|
||||
RCON_CMDS_ON_CONNECT: |-
|
||||
scoreboard objectives setdisplay list Deaths
|
||||
scoreboard objectives setdisplay belowName Health
|
||||
SNOOPER_ENABLED: "false"
|
||||
SPAWN_PROTECTION: "0"
|
||||
TYPE: "MODRINTH"
|
||||
USE_AIKAR_FLAGS: "true"
|
||||
VIEW_DISTANCE: "12"
|
||||
ports:
|
||||
- "26565:25565/tcp"
|
||||
- "26565:25565/udp"
|
||||
volumes:
|
||||
- /data/minecraft/vanilla:/data
|
||||
tags: [ docker, minecraft ]
|
44
playbooks/tasks/game/minecraft-weedie.yml
Normal file
44
playbooks/tasks/game/minecraft-weedie.yml
Normal file
@ -0,0 +1,44 @@
|
||||
# vim:ft=ansible:
|
||||
- name: docker deploy minecraft - weediewack next gen pack
|
||||
docker_container:
|
||||
name: minecraft-weedie
|
||||
state: absent
|
||||
image: ghcr.io/itzg/minecraft-server:latest
|
||||
env:
|
||||
# Common envvars
|
||||
EULA: "true"
|
||||
OPS: "VintageSalt"
|
||||
SNOOPER_ENABLED: "false"
|
||||
SPAWN_PROTECTION: "0"
|
||||
USE_AIKAR_FLAGS: "true"
|
||||
ALLOW_FLIGHT: "true"
|
||||
RCON_CMDS_STARTUP: |-
|
||||
scoreboard objectives add Deaths deathCount
|
||||
scoreboard objectives add Health health {"text":"❤","color":"red"}
|
||||
RCON_CMDS_ON_CONNECT: |-
|
||||
scoreboard objectives setdisplay list Deaths
|
||||
scoreboard objectives setdisplay belowName Health
|
||||
# Pack-specific stuff
|
||||
TYPE: "Forge"
|
||||
MOTD: "We're doing it a-fucking-gain!"
|
||||
VERSION: "1.20.1"
|
||||
FORGE_VERSION: "47.3.11"
|
||||
MAX_MEMORY: "8G"
|
||||
#GENERIC_PACKS: "Server Files 1.3.7"
|
||||
#GENERIC_PACKS_PREFIX: "https://mediafilez.forgecdn.net/files/5832/451/"
|
||||
#GENERIC_PACKS_SUFFIX: ".zip"
|
||||
#SKIP_GENERIC_PACK_UPDATE_CHECK: "true"
|
||||
#VIEW_DISTANCE: "10"
|
||||
ports:
|
||||
- "25565:25565/tcp"
|
||||
- "25565:25565/udp"
|
||||
- "24454:24454/udp"
|
||||
# Prometheus exporter for Forge
|
||||
# https://www.curseforge.com/minecraft/mc-mods/prometheus-exporter
|
||||
- "19566:19565/tcp"
|
||||
# Prometheus exporter for Fabric
|
||||
# https://modrinth.com/mod/fabricexporter
|
||||
#- "19565:25585/tcp"
|
||||
volumes:
|
||||
- /data/minecraft/weedie:/data
|
||||
tags: [ docker, minecraft, weedie ]
|
47
playbooks/tasks/game/satisfactory.yml
Normal file
47
playbooks/tasks/game/satisfactory.yml
Normal file
@ -0,0 +1,47 @@
|
||||
# vim:ft=ansible:
|
||||
- name: ensure docker network
|
||||
docker_network: name=satisfactory
|
||||
tags: [ satisfactory, docker, network ]
|
||||
- name: docker deploy satisfactory
|
||||
docker_container:
|
||||
name: satisfactory
|
||||
state: absent
|
||||
image: ghcr.io/wolveix/satisfactory-server:latest
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
networks:
|
||||
- name: satisfactory
|
||||
aliases: [ "gameserver" ]
|
||||
env:
|
||||
MAXPLAYERS: "8"
|
||||
# We have this turned on for modding's sake
|
||||
#SKIPUPDATE: "true"
|
||||
ports:
|
||||
- '7777:7777/udp'
|
||||
- '7777:7777/tcp'
|
||||
volumes:
|
||||
- /data/satisfactory/config:/config
|
||||
tags: [ docker, satisfactory ]
|
||||
- name: docker deploy satisfactory sftp
|
||||
docker_container:
|
||||
name: satisfactory-sftp
|
||||
state: absent
|
||||
image: ghcr.io/atmoz/sftp/alpine:latest
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
ulimits:
|
||||
- 'nofile:262144:262144'
|
||||
ports:
|
||||
- '7776:22/tcp'
|
||||
volumes:
|
||||
- /data/satisfactory/config:/home/servermgr/game
|
||||
command: 'servermgr:{{ server_password }}:1000'
|
||||
vars:
|
||||
server_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
33336138656461646462323661363336623235333861663730373535656331623230313334353239
|
||||
6535623833343237626161383833663435643262376133320a616634613764396661316332373339
|
||||
33633662366666623931643635313162366339306539666632643437396637616632633432326631
|
||||
3038333932623638390a386362653463306338326436396230633562313466336464663764643461
|
||||
3134
|
||||
tags: [ docker, satisfactory, sidecar, sftp ]
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy zomboid
|
||||
community.docker.docker_container:
|
||||
name: zomboid
|
||||
state: absent
|
||||
state: started
|
||||
# Wanted to use latest but:
|
||||
# https://github.com/Renegade-Master/zomboid-dedicated-server/issues/74
|
||||
# https://github.com/Renegade-Master/zomboid-dedicated-server/issues/68
|
||||
@ -16,14 +16,14 @@
|
||||
ADMIN_USERNAME: "Salt"
|
||||
ADMIN_PASSWORD: "SuperMegaDicks"
|
||||
MAX_PLAYERS: "8"
|
||||
MAP_NAMES: "vehicle_interior;MotoriousExpandedSpawnZones,VehicleSpawnZonesExpandedRedRace;Basements;Louisville"
|
||||
MAP_NAMES: "vehicle_interior;MotoriousExpandedSpawnZones;VehicleSpawnZonesExpandedRedRace;AZSpawn;Louisville"
|
||||
# Generating this list by hand is asinine
|
||||
# Go here: https://getcollectionids.moonguy.me/
|
||||
# Use this: https://steamcommunity.com/sharedfiles/filedetails/?id=3145884377
|
||||
# Or this: 3145884377
|
||||
# Add mods to that collection if you want to add them here, then regen these two fields.
|
||||
MOD_NAMES: "P4HasBeenRead;AutoSewing;AutoMechanics;BulbMechanics;ShowBulbCondition;modoptions;BoredomTweaks;MoreCLR_desc4mood;MiniHealthPanel;CombatText;manageContainers;EQUIPMENT_UI;ModManager;MoreDescriptionForTraits4166;SkillRecoveryJournal;RV_Interior_MP;RV_Interior_Vanilla;FRUsedCars;FRUsedCarsNRN;Lingering Voices;MapSymbolSizeSlider;VISIBLE_BACKPACK_BACKGROUND;BetterSortCC;MapLegendUI;BB_CommonSense;DRAW_ON_MAP;coavinsfirearmbase;coavinsfirearmsupport1;coavinsfirearmsupport2;coavinsfirearmsupport3;coavinsfirearmsupport4;coavinsfirearmsupport5;Shrek1and2intheirENTIRETYasvhs;NoVanillaVehicles;AnotherPlayersOnMinimap;AnimSync;DescriptiveSkillTooltips;noirrsling;Susceptible;ToadTraits;TheStar;BION_PlainMoodles;FH;ProximityInventory;SlowConsumption;MaintenanceImprovesRepair;fhqExpVehSpawn;fhqExpVehSpawnGageFarmDisable;fhqExpVehSpawnM911FarmDisable;fhqExpVehSpawnP19AFarmDisable;fhqExpVehSpawnNoVanilla;fhqExpVehSpawnRedRace;RUNE-EXP;NestedContainer01;AddRandomSprinters;TrueActionsDancing;VFExpansion1;Squishmallows;1989Porsche911Turbo;suprabase;IceCreamTruckFreezer;GarbageTruck;T3;MarTraitsBlind;BraStorage;KuromiBackpack;TalsCannedRat;happygilmoretape;SimpleReadWhileWalking41;FasterHoodOpening;SchizophreniaTrait;TwinkiesVan;Basements;LouisVille SP;hf_point_blank;UIAPI;WaterDispenser;BasementsPatch;No Mo Culling;FancyHandwork;BrutalHandwork;TheOnlyCure;WanderingZombies"
|
||||
MOD_WORKSHOP_IDS: "2544353492;2584991527;2588598892;2778537451;2964435557;2169435993;2725360009;2763647806;2866258937;2286124931;2650547917;2950902979;2694448564;2685168362;2503622437;2822286426;1510950729;2874678809;2734705913;2808679062;2313387159;2710167561;2875848298;2804531012;3101379739;3138722707;2535461640;3117340325;2959512313;3134776712;2949818236;2786499395;2795677303;1299328280;2619072426;3008416736;2447729538;2847184718;2864231031;2920089312;2793164190;2758443202;2946221823;2797104510;2648779556;2667899942;3109119611;1687801932;2567438952;2689292423;2783373547;2783580134;2748047915;3121062639;3045079599;3022845661;3056136040;3163764362;2845952197;2584112711;2711720885;2838950860;2849247394;2678653895;2990322197;2760035814;2687798127;2949998111;3115293671;3236152598;2904920097;2934621024;2983905789"
|
||||
MOD_NAMES: "P4HasBeenRead;AutoSewing;AutoMechanics;BulbMechanics;ShowBulbCondition;modoptions;BoredomTweaks;MoreCLR_desc4mood;MiniHealthPanel;CombatText;manageContainers;EQUIPMENT_UI;ModManager;MoreDescriptionForTraits4166;SkillRecoveryJournal;RV_Interior_MP;RV_Interior_Vanilla;FRUsedCars;FRUsedCarsNRN;Lingering Voices;MapSymbolSizeSlider;VISIBLE_BACKPACK_BACKGROUND;BetterSortCC;MapLegendUI;BB_CommonSense;DRAW_ON_MAP;coavinsfirearmbase;coavinsfirearmsupport1;coavinsfirearmsupport2;coavinsfirearmsupport3;coavinsfirearmsupport4;coavinsfirearmsupport5;Shrek1and2intheirENTIRETYasvhs's;NoVanillaVehicles;AnotherPlayersOnMinimap;AnimSync;DescriptiveSkillTooltips;darkPatches;noirrsling;Susceptible;ToadTraits;TheStar;BION_PlainMoodles;FH;ProximityInventory;SlowConsumption;MaintenanceImprovesRepair;fhqExpVehSpawn;fhqExpVehSpawnGageFarmDisable;fhqExpVehSpawnM911FarmDisable;fhqExpVehSpawnP19AFarmDisable;fhqExpVehSpawnNoVanilla;fhqExpVehSpawnRedRace;RUNE-EXP;NestedContainer01;AddRandomSprinters;TrueActionsDancing;VFExpansion1;Squishmallows;DeLoreanDMC-12;1989Porsche911Turbo;suprabase;IceCreamTruckFreezer;GarbageTruck;T3;MarTraitsBlind;BraStorage;KuromiBackpack;TalsCannedRat;happygilmoretape;SimpleReadWhileWalking41;FasterHoodOpening;SchizophreniaTrait;TwinkiesVan;LouisVille SP;hf_point_blank;UIAPI;WaterDispenser;TheOnlyCure;FancyHandwork;BrutalHandwork;WanderingZombies;Authentic Z - Current;ReloadAllMagazines;jiggasGreenfireMod;amclub;SpnClothHideFix;SpnOpenCloth;SpnHairAPI;PwSleepingbags;Video_Game_Consoles;metal_mod_pariah;truemusic;tm_grunge;TPAM;EasyLaundry;DropRollMod;9301;No Mo Culling;SpnCloth;SpnClothHideFix;SpnHair;lore_friendly_music;AmmoLootDropVFE;tsarslib;ItemTweakerAPIExtraClothingAddon;ItemTweakerAPI;TsarcraftCache2;TrueMusicMoodImprovement;StickyWeight"
|
||||
MOD_WORKSHOP_IDS: "2544353492;2584991527;2588598892;2778537451;2964435557;2169435993;2725360009;2763647806;2866258937;2286124931;2650547917;2950902979;2694448564;2685168362;2503622437;2822286426;1510950729;2874678809;2734705913;2808679062;2313387159;2710167561;2875848298;2804531012;3101379739;3138722707;2535461640;3117340325;2959512313;3134776712;2949818236;2786499395;2795677303;1299328280;2619072426;3008416736;2447729538;2847184718;2864231031;2920089312;2793164190;2758443202;2946221823;2797104510;2648779556;2667899942;3109119611;1687801932;2567438952;2689292423;2783373547;2783580134;2748047915;3121062639;3045079599;3022845661;3056136040;3163764362;2845952197;2584112711;2711720885;2838950860;2849247394;2678653895;2990322197;2760035814;2687798127;2949998111;3115293671;3236152598;2904920097;2934621024;2983905789;2335368829;2907834593;2920899878;1703604612;2778576730;2812326159;3041733782;2714848168;2831786301;2853710135;2613146550;2810869183;2717792692;2925034918;2908614026;2866536557;2684285534;2463184726;2839277937;3041910754;2392709985;2810800927;566115016;2688809268;3048902085;2997503254"
|
||||
RCON_PASSWORD: "SuperMegaDicks"
|
||||
SERVER_NAME: "The Salty Spitoon"
|
||||
SERVER_PASSWORD: "dicks"
|
||||
|
39
playbooks/tasks/web/5dd.yml
Normal file
39
playbooks/tasks/web/5dd.yml
Normal file
@ -0,0 +1,39 @@
|
||||
# vim:ft=ansible:
|
||||
#
|
||||
# This is a really stupid game, source here:
|
||||
# https://github.com/Oliveriver/5d-diplomacy-with-multiverse-time-travel
|
||||
#
|
||||
- name: docker deploy 5d-diplomacy-with-multiverse-timetravel
|
||||
docker_container:
|
||||
name: 5d-diplomacy-with-multiverse-timetravel
|
||||
state: started
|
||||
#image: deluan/5d-diplomacy-with-multiverse-timetravel:latest
|
||||
image: rehashedsalt/5dd:latest
|
||||
env:
|
||||
ConnectionStrings__Database: "Server=5dd-mssql;Database=diplomacy;User=SA;Password={{ five_dd_db_pass }};Encrypt=True;TrustServerCertificate=True"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "5d-diplomacy-with-multiverse-timetravel" ]
|
||||
# For unproxied use
|
||||
ports:
|
||||
- 5173:8080
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.5d-diplomacy-with-multiverse-timetravel.rule: Host(`5dd.desu.ltd`)
|
||||
traefik.http.routers.5d-diplomacy-with-multiverse-timetravel.entrypoints: web
|
||||
tags: [ docker, 5d-diplomacy-with-multiverse-timetravel ]
|
||||
- name: docker deploy 5dd mssql db
|
||||
docker_container:
|
||||
name: 5dd-mssql
|
||||
image: mcr.microsoft.com/mssql/server:2022-latest
|
||||
user: root
|
||||
env:
|
||||
ACCEPT_EULA: "y"
|
||||
MSSQL_SA_PASSWORD: "{{ five_dd_db_pass }}"
|
||||
volumes:
|
||||
- /data/5dd/mssql/data:/var/opt/mssql/data
|
||||
- /data/5dd/mssql/log:/var/opt/mssql/log
|
||||
- /data/5dd/mssql/secrets:/var/opt/mssql/secrets
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "5dd-mssql" ]
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy 9iron
|
||||
docker_container:
|
||||
name: 9iron
|
||||
image: rehashedsalt/9iron:latest
|
||||
image: docker.io/rehashedsalt/9iron:latest
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "9iron" ]
|
||||
|
17
playbooks/tasks/web/bazarr.yml
Normal file
17
playbooks/tasks/web/bazarr.yml
Normal file
@ -0,0 +1,17 @@
|
||||
# vim:ft=ansible:
|
||||
- name: docker deploy bazarr
|
||||
docker_container:
|
||||
name: bazarr
|
||||
image: ghcr.io/linuxserver/bazarr:latest
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "bazarr" ]
|
||||
volumes:
|
||||
- /data/bazarr/config:/config
|
||||
- /data/shared/downloads:/data
|
||||
- /data/shared/media/shows:/tv
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.bazarr.rule: Host(`bazarr.media.desu.ltd`)
|
||||
traefik.http.routers.bazarr.entrypoints: web
|
||||
tags: [ docker, bazarr ]
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy desultd
|
||||
docker_container:
|
||||
name: desultd
|
||||
image: rehashedsalt/desultd:latest
|
||||
image: docker.io/rehashedsalt/desultd:latest
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "desultd" ]
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy element-web
|
||||
docker_container:
|
||||
name: element-web
|
||||
image: vectorim/element-web:latest
|
||||
image: ghcr.io/element-hq/element-web:develop
|
||||
env:
|
||||
TZ: "America/Chicago"
|
||||
networks:
|
||||
|
@ -1,53 +0,0 @@
|
||||
# vim:ft=ansible:
|
||||
#
|
||||
# NOTE: This app is currently not fully-functional. It needs a cronjob
|
||||
# implemented for things like recurring transactions and budgets.
|
||||
#
|
||||
# https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/
|
||||
#
|
||||
- name: docker deploy firefly
|
||||
docker_container:
|
||||
name: firefly
|
||||
image: fireflyiii/core:latest
|
||||
env:
|
||||
APP_KEY: "{{ secret_firefly_app_key }}"
|
||||
APP_URL: "https://firefly.desu.ltd"
|
||||
# We set TRUSTED_PROXIES to * here, which allows any app in the docker
|
||||
# network to proxy this. I'm considering this a fine security concession
|
||||
# since we can't guarantee the IP of the ingress container
|
||||
TRUSTED_PROXIES: "*"
|
||||
DB_HOST: "10.0.0.2"
|
||||
DB_PORT: "5432"
|
||||
DB_CONNECTION: pgsql
|
||||
DB_DATABASE: "firefly-desultd"
|
||||
DB_USERNAME: "firefly-desultd"
|
||||
DB_PASSWORD: "{{ secret_firefly_db_pass }}"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "firefly" ]
|
||||
volumes:
|
||||
- /data/firefly/export:/var/www/html/storage/export
|
||||
- /data/firefly/logs:/var/www/html/storage/logs
|
||||
- /data/firefly/upload:/var/www/html/storage/upload
|
||||
tags: [ docker, firefly ]
|
||||
- name: docker deploy firefly importer
|
||||
docker_container:
|
||||
name: firefly-importer
|
||||
image: fireflyiii/data-importer:latest
|
||||
# We need to use this workaround with custom DNS servers due to some host
|
||||
# entries on the container host
|
||||
dns_servers:
|
||||
- "8.8.8.8"
|
||||
- "8.8.4.4"
|
||||
env:
|
||||
# This TRUSTED_PROXIES line is still undocumented
|
||||
# https://github.com/firefly-iii/firefly-iii/issues/3256
|
||||
# God fucking dammit
|
||||
TRUSTED_PROXIES: "*"
|
||||
FIREFLY_III_ACCESS_TOKEN: "{{ secret_firefly_access_token }}"
|
||||
FIREFLY_III_URL: "http://firefly:8080"
|
||||
VANITY_URL: "https://firefly.desu.ltd"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "firefly-importer" ]
|
||||
tags: [ docker, firefly ]
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy gitea
|
||||
docker_container:
|
||||
name: gitea
|
||||
image: gitea/gitea:1
|
||||
image: docker.io/gitea/gitea:1
|
||||
env:
|
||||
USER_UID: "1002"
|
||||
USER_GID: "1002"
|
||||
|
42
playbooks/tasks/web/grafana.yml
Normal file
42
playbooks/tasks/web/grafana.yml
Normal file
@ -0,0 +1,42 @@
|
||||
# vim:ft=ansible:
|
||||
- name: ensure grafana dirs
|
||||
ansible.builtin.file:
|
||||
state: directory
|
||||
owner: 472
|
||||
group: 472
|
||||
mode: "0750"
|
||||
path: "{{ item }}"
|
||||
with_items:
|
||||
- /data/grafana/storage
|
||||
- /data/grafana/logs
|
||||
tags: [ docker, grafana, monitoring ]
|
||||
- name: docker deploy grafana
|
||||
docker_container:
|
||||
name: grafana
|
||||
image: docker.io/grafana/grafana-oss:main
|
||||
env:
|
||||
TZ: "America/Chicago"
|
||||
# This enables logging to STDOUT for log aggregators to more easily hook it
|
||||
GF_LOG_MODE: "console file"
|
||||
GF_SERVER_DOMAIN: "grafana.desu.ltd"
|
||||
GF_SERVER_PROTOCOL: "http"
|
||||
GF_SERVER_ROOT_URL: "https://grafana.desu.ltd"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "grafana" ]
|
||||
volumes:
|
||||
- /data/grafana/storage:/var/lib/grafana
|
||||
- /data/grafana/logs:/var/log/grafana
|
||||
tags: [ docker, grafana, monitoring ]
|
||||
- name: docker deploy grafana matrix bridge
|
||||
docker_container:
|
||||
name: grafana-matrix-bridge
|
||||
image: registry.gitlab.com/hctrdev/grafana-matrix-forwarder:latest
|
||||
env:
|
||||
GMF_MATRIX_USER: "@grafana:desu.ltd"
|
||||
GMF_MATRIX_PASSWORD: "{{ secret_grafana_matrix_token }}"
|
||||
GMF_MATRIX_HOMESERVER: matrix.desu.ltd
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "grafana-matrix-bridge" ]
|
||||
tags: [ docker, grafana, monitoring, bridge, matrix ]
|
44
playbooks/tasks/web/jellyfin.yml
Normal file
44
playbooks/tasks/web/jellyfin.yml
Normal file
@ -0,0 +1,44 @@
|
||||
# vim:ft=ansible:
|
||||
#
|
||||
# This is a really stupid game, source here:
|
||||
# https://github.com/Oliveriver/5d-diplomacy-with-multiverse-time-travel
|
||||
#
|
||||
- name: set up jellyfin dirs
|
||||
ansible.builtin.file:
|
||||
state: directory
|
||||
owner: 911
|
||||
group: 911
|
||||
mode: "0750"
|
||||
path: "{{ item }}"
|
||||
with_items:
|
||||
- /data/jellyfin/config
|
||||
- /data/jellyfin/cache
|
||||
tags: [ docker, jellyfin ]
|
||||
- name: docker deploy jellyfin
|
||||
docker_container:
|
||||
name: jellyfin
|
||||
state: started
|
||||
image: ghcr.io/jellyfin/jellyfin:latest
|
||||
user: 911:911
|
||||
groups:
|
||||
- 109 # render on Ubuntu systems
|
||||
env:
|
||||
JELLYFIN_PublishedServerUrl: "http://jellyfin.desu.ltd"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "jellyfin" ]
|
||||
# For unproxied use
|
||||
#ports:
|
||||
# - 8096/tcp
|
||||
volumes:
|
||||
- /data/jellyfin/config:/config
|
||||
- /data/jellyfin/cache:/cache
|
||||
- /data/shared/media:/media
|
||||
devices:
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.jellyfin.rule: Host(`jellyfin.desu.ltd`)
|
||||
traefik.http.routers.jellyfin.entrypoints: web
|
||||
traefik.http.services.jellyfin.loadbalancer.server.port: "8096"
|
||||
tags: [ docker, jellyfin ]
|
@ -2,14 +2,55 @@
|
||||
- name: docker deploy lidarr
|
||||
docker_container:
|
||||
name: lidarr
|
||||
image: linuxserver/lidarr:latest
|
||||
state: started
|
||||
#image: linuxserver/lidarr:latest
|
||||
image: ghcr.io/hotio/lidarr:pr-plugins
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "lidarr" ]
|
||||
env:
|
||||
PUID: "911"
|
||||
PGID: "911"
|
||||
TZ: "America/Chicago"
|
||||
VPN_ENABLED: "false"
|
||||
volumes:
|
||||
# https://github.com/RandomNinjaAtk/arr-scripts?tab=readme-ov-file
|
||||
- /data/lidarr/bin:/usr/local/bin
|
||||
- /data/lidarr/config:/config
|
||||
- /data/shared/downloads:/data
|
||||
- /data/shared/media/music:/music
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.lidarr.rule: Host(`lidarr.media.desu.ltd`)
|
||||
traefik.http.routers.lidarr.entrypoints: web
|
||||
tags: [ docker, lidarr ]
|
||||
- name: assure slskd cleanup cronjob
|
||||
ansible.builtin.cron:
|
||||
user: root
|
||||
name: slskd-cleanup
|
||||
state: present
|
||||
hour: 4
|
||||
job: "find /data/shared/downloads/soulseek -mtime +7 -print -delete"
|
||||
tags: [ slskd, cron, cleanup ]
|
||||
- name: docker deploy slskd
|
||||
docker_container:
|
||||
name: lidarr-slskd
|
||||
state: started
|
||||
image: ghcr.io/slskd/slskd:latest
|
||||
user: "911:911"
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "slskd" ]
|
||||
env:
|
||||
SLSKD_REMOTE_CONFIGURATION: "true"
|
||||
ports:
|
||||
- "50300:50300"
|
||||
volumes:
|
||||
- /data/slskd:/app
|
||||
- /data/shared/downloads/soulseek:/app/downloads
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.lidarr-slskd.rule: Host(`slskd.media.desu.ltd`)
|
||||
traefik.http.routers.lidarr-slskd.entrypoints: web
|
||||
traefik.http.services.lidarr-slskd.loadbalancer.server.port: "5030"
|
||||
tags: [ docker, slskd ]
|
||||
|
39
playbooks/tasks/web/navidrome.yml
Normal file
39
playbooks/tasks/web/navidrome.yml
Normal file
@ -0,0 +1,39 @@
|
||||
# vim:ft=ansible:
|
||||
- name: docker deploy navidrome
|
||||
docker_container:
|
||||
name: navidrome
|
||||
state: started
|
||||
image: ghcr.io/navidrome/navidrome:latest
|
||||
user: 911:911
|
||||
env:
|
||||
ND_BASEURL: "https://music.desu.ltd"
|
||||
ND_PROMETHEUS_ENABLED: "true"
|
||||
ND_LOGLEVEL: "info"
|
||||
ND_LASTFM_ENABLED: "true"
|
||||
ND_LASTFM_APIKEY: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
63333239613931623033656233353537653830623065386632393232316537356261393938323533
|
||||
6632633034643637653136633235393335303535653136340a363331653839383930396633363133
|
||||
62313964396161326231376534333064343736633466363962313662353665313230396237666363
|
||||
6565613939666663300a313462366137363661373839326636613064643032356437376536333366
|
||||
30366238646363316639373730343336373234313338663261616331666162653362626364323463
|
||||
3131666231383138623965656163373364326432353137663665
|
||||
ND_LASTFM_SECRET: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39316232373136663435323662333137636635326535643735383734666562303339663134336137
|
||||
3132613237613436336663303330623334663262313337350a393963653765343262333533373763
|
||||
37623230393638616535623861333135353038646532343038313865626435623830343361633938
|
||||
3232646462346163380a616462366435343934326232366233636564626262653965333564363731
|
||||
66656532663965616561313032646231663366663636663838633535393566363631346535383866
|
||||
6335623230303333346266306637353061356665383264333266
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "navidrome" ]
|
||||
volumes:
|
||||
- /data/navidrome/data:/data
|
||||
- /data/shared/media/music:/music:ro
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.navidrome.rule: Host(`music.desu.ltd`)
|
||||
traefik.http.routers.navidrome.entrypoints: web
|
||||
tags: [ docker, navidrome ]
|
@ -2,7 +2,7 @@
|
||||
- name: deploy netbox
|
||||
module_defaults:
|
||||
docker_container:
|
||||
image: netboxcommunity/netbox:v3.1.5
|
||||
image: ghcr.io/netbox-community/netbox:v3.1.5
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
pull: yes
|
||||
|
@ -2,17 +2,7 @@
|
||||
- name: docker deploy nextcloud
|
||||
docker_container:
|
||||
name: nextcloud
|
||||
image: nextcloud:27
|
||||
# The entrypoint workaround is for this issue:
|
||||
#
|
||||
# https://github.com/nextcloud/docker/issues/1414
|
||||
#
|
||||
# This installs imagemagick to allow for SVG support and to clear the last
|
||||
# setup warning in the application.
|
||||
# It can be safely removed upon closure of this issue. I'm just doing it to
|
||||
# make the big bad triangle go away.
|
||||
entrypoint: /bin/sh
|
||||
command: -c "apt-get update; apt-get install -y libmagickcore-6.q16-6-extra; /entrypoint.sh apache2-foreground"
|
||||
image: docker.io/nextcloud:30
|
||||
env:
|
||||
PHP_UPLOAD_LIMIT: 1024M
|
||||
networks:
|
||||
@ -23,11 +13,22 @@
|
||||
- /data/nextcloud/config:/var/www/html/config
|
||||
- /data/nextcloud/themes:/var/www/html/themes
|
||||
- /data/nextcloud/data:/var/www/html/data
|
||||
- /data/shared:/shared
|
||||
tags: [ docker, nextcloud ]
|
||||
# Vanilla Nextcloud cron
|
||||
- name: assure nextcloud cron cronjob
|
||||
ansible.builtin.cron: user=root name=nextcloud minute=*/5 job="docker exec --user www-data nextcloud php -f /var/www/html/cron.php"
|
||||
tags: [ docker, nextcloud, cron ]
|
||||
# Plugin crons
|
||||
- name: assure nextcloud preview generator cronjob
|
||||
ansible.builtin.cron: user=root name=nextcloud-preview-generator hour=1 minute=10 job="docker exec --user www-data nextcloud php occ preview:pre-generate"
|
||||
tags: [ docker, nextcloud, cron ]
|
||||
# Maintenance tasks
|
||||
- name: assure nextcloud update cronjob
|
||||
ansible.builtin.cron: user=root name=nextcloud-update minute=*/30 job="docker exec --user www-data nextcloud php occ app:update --all"
|
||||
tags: [ docker, nextcloud, cron ]
|
||||
- name: assure nextcloud db indices cronjob
|
||||
ansible.builtin.cron: user=root name=nextcloud-update-db-inidices hour=1 job="docker exec --user www-data nextcloud php occ db:add-missing-indices"
|
||||
tags: [ docker, nextcloud, cron ]
|
||||
- name: assure nextcloud expensive migration cronjob
|
||||
ansible.builtin.cron: user=root name=nextcloud-update-expensive-migration hour=1 minute=30 job="docker exec --user www-data nextcloud php occ db:add-missing-indices"
|
||||
tags: [ docker, nextcloud, cron ]
|
||||
|
@ -2,10 +2,14 @@
|
||||
- name: docker deploy prowlarr
|
||||
docker_container:
|
||||
name: prowlarr
|
||||
image: linuxserver/prowlarr:develop
|
||||
image: ghcr.io/linuxserver/prowlarr:develop
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "prowlarr" ]
|
||||
volumes:
|
||||
- /data/prowlarr/config:/config
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.prowlarr.rule: Host(`prowlarr.media.desu.ltd`)
|
||||
traefik.http.routers.prowlarr.entrypoints: web
|
||||
tags: [ docker, prowlarr ]
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy radarr
|
||||
docker_container:
|
||||
name: radarr
|
||||
image: linuxserver/radarr:latest
|
||||
image: ghcr.io/linuxserver/radarr:latest
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "radarr" ]
|
||||
@ -10,4 +10,8 @@
|
||||
- /data/radarr/config:/config
|
||||
- /data/shared/downloads:/data
|
||||
- /data/shared/media/movies:/tv
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.radarr.rule: Host(`radarr.media.desu.ltd`)
|
||||
traefik.http.routers.radarr.entrypoints: web
|
||||
tags: [ docker, radarr ]
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy sonarr
|
||||
docker_container:
|
||||
name: sonarr
|
||||
image: linuxserver/sonarr:latest
|
||||
image: ghcr.io/linuxserver/sonarr:latest
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "sonarr" ]
|
||||
@ -10,4 +10,8 @@
|
||||
- /data/sonarr/config:/config
|
||||
- /data/shared/downloads:/data
|
||||
- /data/shared/media/shows:/tv
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.sonarr.rule: Host(`sonarr.media.desu.ltd`)
|
||||
traefik.http.routers.sonarr.entrypoints: web
|
||||
tags: [ docker, sonarr ]
|
||||
|
@ -4,7 +4,7 @@
|
||||
# NOTE: We depend on the default configuration of Apache here, specifically
|
||||
# the default to have server-generated indexes. Makes srv easier to navigate
|
||||
name: srv
|
||||
image: httpd:latest
|
||||
image: docker.io/httpd:latest
|
||||
networks:
|
||||
- name: web
|
||||
aliases: [ "srv" ]
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: docker deploy transmission
|
||||
docker_container:
|
||||
name: transmission
|
||||
image: haugene/transmission-openvpn:latest
|
||||
image: docker.io/haugene/transmission-openvpn:latest
|
||||
env:
|
||||
USER: transmission
|
||||
PASS: "{{ secret_transmission_user_pass }}"
|
||||
@ -11,6 +11,8 @@
|
||||
OPENVPN_USERNAME: "{{ secret_pia_user }}"
|
||||
OPENVPN_PASSWORD: "{{ secret_pia_pass }}"
|
||||
LOCAL_NETWORK: 192.168.0.0/16
|
||||
devices:
|
||||
- /dev/net/tun
|
||||
capabilities:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
@ -23,4 +25,9 @@
|
||||
- /data/transmission/config:/config
|
||||
- /data/shared/downloads:/data
|
||||
- /data/transmission/watch:/watch
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.http.routers.transmission.rule: Host(`transmission.media.desu.ltd`)
|
||||
traefik.http.routers.transmission.entrypoints: web
|
||||
traefik.http.services.transmission.loadbalancer.server.port: "9091"
|
||||
tags: [ docker, transmission ]
|
||||
|
@ -14,7 +14,7 @@ roles:
|
||||
version: 2.0.0
|
||||
# Upstream: https://github.com/geerlingguy/ansible-role-postgresql
|
||||
- src: geerlingguy.postgresql
|
||||
version: 3.5.0
|
||||
version: 3.5.2
|
||||
# Upstream: https://github.com/willshersystems/ansible-sshd
|
||||
- src: willshersystems.sshd
|
||||
version: v0.23.0
|
||||
|
@ -6,6 +6,7 @@
|
||||
append: "{{ adminuser_groups_append }}"
|
||||
groups: "{{ adminuser_groups + adminuser_groups_extra }}"
|
||||
shell: "{{ adminuser_shell }}"
|
||||
tags: [ adminuser ]
|
||||
- name: assure admin user ssh key
|
||||
ansible.builtin.user:
|
||||
name: "{{ adminuser_name }}"
|
||||
@ -13,15 +14,20 @@
|
||||
ssh_key_type: "{{ adminuser_ssh_key_type }}"
|
||||
ssh_key_file: ".ssh/id_{{ adminuser_ssh_key_type }}"
|
||||
when: adminuser_ssh_key
|
||||
tags: [ adminuser ]
|
||||
- name: assure admin user ssh authorized keys
|
||||
authorized_key: user={{ adminuser_name }} key={{ item }}
|
||||
loop: "{{ adminuser_ssh_authorized_keys }}"
|
||||
tags: [ adminuser ]
|
||||
- name: remove admin user ssh keys
|
||||
authorized_key: state=absent user={{ adminuser_name }} key={{ item }}
|
||||
loop: "{{ adminuser_ssh_unauthorized_keys }}"
|
||||
tags: [ adminuser ]
|
||||
- name: assure admin user pass
|
||||
ansible.builtin.user: name={{ adminuser_name }} password={{ adminuser_password }}
|
||||
when: adminuser_password is defined
|
||||
tags: [ adminuser ]
|
||||
- name: assure admin user sudo rule
|
||||
ansible.builtin.lineinfile: path=/etc/sudoers line={{ adminuser_sudo_rule }}
|
||||
when: adminuser_sudo
|
||||
tags: [ adminuser ]
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 1a332f6788d4ae24b52948850965358790861432
|
||||
Subproject commit 56549b8ac718997c6b5c314636955e46ee5e8cc1
|
@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
- name: install ansible
|
||||
pip: name=ansible<5,ansible-lint state=latest
|
@ -1,12 +1,18 @@
|
||||
# Which backup script to use. Configuration is somewhat unique to each script
|
||||
backup_script: s3backup
|
||||
restore_script: s3restore
|
||||
# When to kick off backups using the systemd timer
|
||||
backup_time: "*-*-* 02:00:00"
|
||||
# What format should the datestamps in the filenames of any backups be in?
|
||||
# Defaults to YYYY-MM-DD-hhmm
|
||||
# So January 5th, 2021 at 3:41PM would be 2021-01-05-1541
|
||||
backup_dateformat: "%Y-%m-%d-%H%M"
|
||||
# What variation should the systemd timer have?
|
||||
# Default value of "5400" is 1h30min in seconds
|
||||
backup_time_randomization: "5400"
|
||||
|
||||
# Should this machine backup?
|
||||
# Disabling this variable templates out the scripts, but not the units
|
||||
backup_restic: yes
|
||||
|
||||
# Should this machine prune?
|
||||
# Be very careful with this -- it's an expensive operation
|
||||
backup_restic_prune: no
|
||||
# How frequently should we prune?
|
||||
backup_restic_prune_time: "*-*-01 12:00:00"
|
||||
|
||||
# S3 configuration for scripts that use it
|
||||
# Which bucket to upload the backup to
|
||||
@ -14,16 +20,17 @@ backup_s3_bucket: replaceme
|
||||
# Credentials for the bucket
|
||||
backup_s3_aws_access_key_id: REPLACEME
|
||||
backup_s3_aws_secret_access_key: REPLACEME
|
||||
# Changeme if you use a non-AWS S3-compatible object store (like Backblaze)
|
||||
#backup_s3_aws_endpoint_url:
|
||||
|
||||
# List of files/directories to back up
|
||||
# Note that tar is NOT instructed to recurse through symlinks
|
||||
# If you want it to do that, end the path with a slash!
|
||||
backup_s3backup_list: []
|
||||
backup_s3backup_list:
|
||||
- "/etc"
|
||||
- "/home/{{ adminuser_name }}"
|
||||
backup_s3backup_list_extra: []
|
||||
# List of files/directories to --exclude
|
||||
backup_s3backup_exclude_list: []
|
||||
backup_s3backup_exclude_list:
|
||||
- "/home/{{ adminuser_name }}/Vaults/*"
|
||||
backup_s3backup_exclude_list_extra: []
|
||||
# Arguments to pass to tar
|
||||
# Note that passing f here is probably a bad idea
|
||||
backup_s3backup_tar_args: cz
|
||||
backup_s3backup_tar_args_extra: ""
|
||||
|
@ -4,3 +4,6 @@
|
||||
- name: restart backup timer
|
||||
ansible.builtin.systemd: name=backup.timer state=restarted daemon_reload=yes
|
||||
become: yes
|
||||
- name: restart prune timer
|
||||
ansible.builtin.systemd: name=backup-prune.timer state=restarted daemon_reload=yes
|
||||
become: yes
|
||||
|
@ -1,12 +1,33 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
# Install restic if we can
|
||||
- name: install restic
|
||||
block:
|
||||
- name: install restic through apt
|
||||
ansible.builtin.apt: name=restic state=present
|
||||
when: ansible_pkg_mgr == "apt"
|
||||
- name: install restic through rpm-ostree
|
||||
community.general.rpm_ostree_pkg: name=restic state=present
|
||||
when: ansible_os_family == "RedHat" and ansible_pkg_mgr == "atomic_container"
|
||||
tags: [ packages ]
|
||||
# The script
|
||||
- name: template out backup-related files
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/opt/{{ item.dest | default(item.src, true) }}"
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
with_items:
|
||||
- src: restic-password
|
||||
- src: restic-wrapper.sh
|
||||
dest: restic-wrapper
|
||||
# Backup service/timer definitions
|
||||
- name: set up backups
|
||||
block:
|
||||
- name: template out backup script
|
||||
ansible.builtin.template: src={{ backup_script }}.sh dest=/opt/backup.sh mode=0700 owner=root group=root
|
||||
- name: template out analyze script
|
||||
ansible.builtin.template: src={{ backup_script }}-analyze.sh dest=/opt/analyze.sh mode=0700 owner=root group=root
|
||||
- name: template out restore script
|
||||
ansible.builtin.template: src={{ restore_script }}.sh dest=/opt/restore.sh mode=0700 owner=root group=root
|
||||
ansible.builtin.template: src=backup.sh dest=/opt/backup.sh mode=0700 owner=root group=root
|
||||
- name: configure systemd service
|
||||
ansible.builtin.template: src=backup.service dest=/etc/systemd/system/backup.service mode=0644
|
||||
- name: configure systemd timer
|
||||
@ -14,3 +35,17 @@
|
||||
notify: restart backup timer
|
||||
- name: enable timer
|
||||
ansible.builtin.systemd: name=backup.timer state=started enabled=yes daemon_reload=yes
|
||||
when: backup_restic
|
||||
# Prune script
|
||||
- name: set up restic prune
|
||||
block:
|
||||
- name: template out prune script
|
||||
ansible.builtin.template: src=backup-prune.sh dest=/opt/backup-prune.sh mode=0700 owner=root group=root
|
||||
- name: configure prune systemd service
|
||||
ansible.builtin.template: src=backup-prune.service dest=/etc/systemd/system/backup-prune.service mode=0644
|
||||
- name: configure prune systemd timer
|
||||
ansible.builtin.template: src=backup-prune.timer dest=/etc/systemd/system/backup-prune.timer mode=0644
|
||||
notify: restart prune timer
|
||||
- name: enable prune timer
|
||||
ansible.builtin.systemd: name=backup-prune.timer state=started enabled=yes daemon_reload=yes
|
||||
when: backup_restic_prune
|
||||
|
18
roles/backup/templates/backup-prune.service
Normal file
18
roles/backup/templates/backup-prune.service
Normal file
@ -0,0 +1,18 @@
|
||||
# vim:ft=systemd
|
||||
[Unit]
|
||||
Description=Backup prune service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
StartLimitInterval=3600
|
||||
StartLimitBurst=2
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
#MemoryMax=512M
|
||||
Environment="GOGC=20"
|
||||
ExecStart=/opt/backup-prune.sh
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
11
roles/backup/templates/backup-prune.sh
Normal file
11
roles/backup/templates/backup-prune.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#! /bin/sh
|
||||
#
|
||||
# backup-prune.sh
|
||||
# An Ansible-managed script to prune restic backups every now and again
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
/opt/restic-wrapper \
|
||||
--verbose \
|
||||
prune
|
10
roles/backup/templates/backup-prune.timer
Normal file
10
roles/backup/templates/backup-prune.timer
Normal file
@ -0,0 +1,10 @@
|
||||
# vim:ft=systemd
|
||||
[Unit]
|
||||
Description=Backup prune timer
|
||||
|
||||
[Timer]
|
||||
Persistent=true
|
||||
OnCalendar={{ backup_restic_prune_time }}
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
@ -3,11 +3,17 @@
|
||||
Description=Nightly backup service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
StartLimitInterval=600
|
||||
StartLimitBurst=5
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
MemoryMax=256M
|
||||
#MemoryMax=512M
|
||||
Environment="GOGC=20"
|
||||
ExecStart=/opt/backup.sh
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
RestartSteps=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
118
roles/backup/templates/backup.sh
Normal file
118
roles/backup/templates/backup.sh
Normal file
@ -0,0 +1,118 @@
|
||||
#! /bin/bash
|
||||
#
|
||||
# backup.sh
|
||||
# Ansible-managed backup script that uses restic to automate machine bacukps to
|
||||
# an S3 bucket. Intelligently handles a few extra apps, too.
|
||||
#
|
||||
# NOTICE: DO NOT MODIFY THIS FILE
|
||||
# Any changes made will be clobbered by Ansible
|
||||
# Please make any configuration changes in the main repo
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Directories to backup
|
||||
# Ansible will determine the entries here
|
||||
|
||||
# We use a bash array because it affords us some level of sanitization, enough
|
||||
# to let us back up items whose paths contain spaces
|
||||
declare -a DIRS
|
||||
{% for item in backup_s3backup_list + backup_s3backup_list_extra %}
|
||||
DIRS+=("{{ item }}")
|
||||
{% endfor %}
|
||||
# End directory manual configuration
|
||||
|
||||
# Helper functions
|
||||
backup() {
|
||||
# Takes a file or directory to backup and backs it up
|
||||
[ -z "$*" ] && return 1
|
||||
|
||||
for dir in "$@"; do
|
||||
echo "- $dir"
|
||||
done
|
||||
# First, we remove stale locks. This command will only remove locks that have not been
|
||||
# updated in the last half hour. By default, restic updates them during an ongoing
|
||||
# operation every 5 minutes, so this should be perfectly fine to do.
|
||||
# What I'm not sure of (but should be fine because we auto-restart if need be) is if two
|
||||
# processes doing this concurrently will cause issues. I'd hope not but you never know.
|
||||
# restic-unlock(1)
|
||||
/opt/restic-wrapper \
|
||||
--verbose \
|
||||
unlock
|
||||
# Back up everything in the $DIRS array (which was passed as args)
|
||||
# This results in some level of pollution with regard to what paths are backed up
|
||||
# (especially on ostree systems where we do the etc diff) but that's syntactic and
|
||||
# we can script around it.
|
||||
/opt/restic-wrapper \
|
||||
--verbose \
|
||||
{% for item in backup_s3backup_exclude_list + backup_s3backup_exclude_list_extra %}
|
||||
--exclude="{{ item }}" \
|
||||
{% endfor %}
|
||||
--exclude="/data/**/backup" \
|
||||
--exclude="/data/**/backups" \
|
||||
--exclude="*.bak" \
|
||||
--exclude="*.tmp" \
|
||||
--exclude="*.swp" \
|
||||
--retry-lock=3h \
|
||||
backup \
|
||||
"$@"
|
||||
# In addition, we should also prune our backups
|
||||
# https://restic.readthedocs.io/en/stable/060_forget.html
|
||||
# --keep-daily n Keeps daily backups for the last n days
|
||||
# --keep-weekly n Keeps weekly backups for the last n weeks
|
||||
# --keep-montly n Keeps monthly backups for the last n months
|
||||
# --keep-tag foo Keeps all snapshots tagged with "foo"
|
||||
# --host "$HOSTNAME" Only act on *our* snapshots. We assume other machines are taking
|
||||
# care of their own houses.
|
||||
/opt/restic-wrapper \
|
||||
--verbose \
|
||||
--retry-lock=3h \
|
||||
forget \
|
||||
--keep-daily 7 \
|
||||
--keep-weekly 4 \
|
||||
--keep-monthly 6 \
|
||||
--keep-tag noremove \
|
||||
--host "$HOSTNAME"
|
||||
}
|
||||
|
||||
# Dump Postgres DBs, if possible
|
||||
if command -v psql > /dev/null 2>&1; then
|
||||
# Put down a place for us to store backups, if we don't have it already
|
||||
backupdir="/opt/postgres-backups"
|
||||
mkdir -p "$backupdir"
|
||||
# Populate a list of databases
|
||||
declare -a DATABASES
|
||||
while read line; do
|
||||
DATABASES+=("$line")
|
||||
done < <(sudo -u postgres psql -t -A -c "SELECT datname FROM pg_database where datname not in ('template0', 'template1', 'postgres');" 2>/dev/null)
|
||||
|
||||
# pgdump all DBs, compress them, and pipe straight up to S3
|
||||
echo "Commencing backup on the following databases:"
|
||||
for dir in "${DATABASES[@]}"; do
|
||||
echo "- $dir"
|
||||
done
|
||||
echo "Will upload resultant backups to {{ backup_s3_bucket }}"
|
||||
for db in "${DATABASES[@]}"; do
|
||||
echo "Backing up $db"
|
||||
path="$backupdir/$db.pgsql.gz"
|
||||
sudo -u postgres pg_dump "$db" \
|
||||
| gzip -v9 \
|
||||
> "$path"
|
||||
DIRS+=("$path")
|
||||
done
|
||||
fi
|
||||
|
||||
# Tar up all items in the backup list, recursively, and pipe them straight
|
||||
# up to S3
|
||||
if [ -n "${DIRS[*]}" ]; then
|
||||
echo "Commencing backup on the following items:"
|
||||
for dir in "${DIRS[@]}"; do
|
||||
echo "- $dir"
|
||||
done
|
||||
echo "Will ignore the following items:"
|
||||
{% for item in backup_s3backup_exclude_list + backup_s3backup_exclude_list_extra %}
|
||||
echo "- {{ item }}"
|
||||
{% endfor %}
|
||||
echo "Will upload resultant backups to {{ backup_s3_bucket }}"
|
||||
backup ${DIRS[*]}
|
||||
fi
|
@ -5,6 +5,7 @@ Description=Nightly backup timer
|
||||
[Timer]
|
||||
Persistent=true
|
||||
OnCalendar={{ backup_time }}
|
||||
RandomizedDelaySec={{ backup_time_randomization }}
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
|
1
roles/backup/templates/restic-password
Normal file
1
roles/backup/templates/restic-password
Normal file
@ -0,0 +1 @@
|
||||
{{ backup_restic_password }}
|
11
roles/backup/templates/restic-wrapper.sh
Normal file
11
roles/backup/templates/restic-wrapper.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#! /bin/sh
|
||||
export AWS_ACCESS_KEY_ID="{{ backup_s3_aws_access_key_id }}"
|
||||
export AWS_SECRET_ACCESS_KEY="{{ backup_s3_aws_secret_access_key }}"
|
||||
export RESTIC_CACHE_DIR="/var/cache/restic"
|
||||
mkdir -p "$RESTIC_CACHE_DIR"
|
||||
chown root: "$RESTIC_CACHE_DIR"
|
||||
chmod 0700 "$RESTIC_CACHE_DIR"
|
||||
exec nice -n 10 restic \
|
||||
-r "s3:{{ backup_s3_aws_endpoint_url }}/{{ backup_s3_bucket }}/restic" \
|
||||
-p /opt/restic-password \
|
||||
"$@"
|
@ -1,17 +0,0 @@
|
||||
#! /bin/bash
|
||||
#
|
||||
# s3backup-analyze.sh
|
||||
# A companion script to s3backup to analyze disk usage for backups
|
||||
|
||||
# NOTICE: DO NOT MODIFY THIS FILE
|
||||
# Any changes made will be clobbered by Ansible
|
||||
# Please make any configuration changes in the main repo
|
||||
|
||||
exec ncdu \
|
||||
{% for item in backup_s3backup_list + backup_s3backup_list_extra %}
|
||||
"{{ item }}" \
|
||||
{% endfor %}
|
||||
{% for item in backup_s3backup_exclude_list + backup_s3backup_exclude_list_extra %}
|
||||
--exclude "{{ item }}" \
|
||||
{% endfor %}
|
||||
-r
|
@ -1,110 +0,0 @@
|
||||
#! /bin/bash
|
||||
#
|
||||
# s3backup.sh
|
||||
# General-purpose, Ansible-managed backup script to push directories, DBs, and
|
||||
# more up to an S3 bucket
|
||||
#
|
||||
# NOTICE: THIS FILE CONTAINS SECRETS
|
||||
# This file may contain the following secrets depending on configuration:
|
||||
# * An AWS access key
|
||||
# * An AWS session token
|
||||
# These are NOT things you want arbitrary readers to access! Ansible will
|
||||
# attempt to ensure this file has 0700 permissions, but that won't stop you
|
||||
# from changing that yourself
|
||||
# DO NOT ALLOW THIS FILE TO BE READ BY NON-ROOT USERS
|
||||
|
||||
# NOTICE: DO NOT MODIFY THIS FILE
|
||||
# Any changes made will be clobbered by Ansible
|
||||
# Please make any configuration changes in the main repo
|
||||
|
||||
set -e
|
||||
|
||||
# AWS S3 configuration
|
||||
# NOTE: THIS IS SECRET INFORMATION
|
||||
export AWS_ACCESS_KEY_ID="{{ backup_s3_aws_access_key_id }}"
|
||||
export AWS_SECRET_ACCESS_KEY="{{ backup_s3_aws_secret_access_key }}"
|
||||
|
||||
# Directories to backup
|
||||
# Ansible will determine the entries here
|
||||
|
||||
# We use a bash array because it affords us some level of sanitization, enough
|
||||
# to let us back up items whose paths contain spaces
|
||||
declare -a DIRS
|
||||
{% for item in backup_s3backup_list + backup_s3backup_list_extra %}
|
||||
DIRS+=("{{ item }}")
|
||||
{% endfor %}
|
||||
# End directory manual configuration
|
||||
|
||||
# If we have ostree, add diff'd configs to the list, too
|
||||
if command -v ostree > /dev/null 2>&1; then
|
||||
for file in $(
|
||||
ostree admin config-diff 2>/dev/null | \
|
||||
grep -e '^[A|M]' | \
|
||||
awk '{print $2}'
|
||||
); do
|
||||
DIRS+=("/etc/$file")
|
||||
done
|
||||
fi
|
||||
|
||||
# Helper functions
|
||||
backup() {
|
||||
# Takes a file or directory to backup and backs it up
|
||||
[ -z "$1" ] && return 1
|
||||
|
||||
dir="$1"
|
||||
echo "- $dir"
|
||||
|
||||
nice -n 10 tar {{ backup_s3backup_tar_args }}{{ backup_s3backup_tar_args_extra }} \
|
||||
{% for item in backup_s3backup_exclude_list + backup_s3backup_exclude_list_extra %}
|
||||
--exclude "{{ item }}" \
|
||||
{% endfor %}
|
||||
"$dir" \
|
||||
| aws s3 cp --expected-size 274877906944 - \
|
||||
"s3://{{ backup_s3_bucket }}/{{ inventory_hostname }}/$dir/$(date "+{{ backup_dateformat }}").tar.gz"
|
||||
}
|
||||
|
||||
# Tar up all items in the backup list, recursively, and pipe them straight
|
||||
# up to S3
|
||||
if [ -n "${DIRS[*]}" ]; then
|
||||
echo "Commencing backup on the following items:"
|
||||
for dir in "${DIRS[@]}"; do
|
||||
echo "- $dir"
|
||||
done
|
||||
echo "Will ignore the following items:"
|
||||
{% for item in backup_s3backup_exclude_list + backup_s3backup_exclude_list_extra %}
|
||||
echo "- {{ item }}"
|
||||
{% endfor %}
|
||||
echo "Will upload resultant backups to {{ backup_s3_bucket }}"
|
||||
for dir in "${DIRS[@]}"; do
|
||||
if [ "$dir" == "/data" ]; then
|
||||
for datadir in "$dir"/*; do
|
||||
[ -e "$datadir" ] && backup "$datadir"
|
||||
done
|
||||
else
|
||||
backup "$dir"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Dump Postgres DBs, if possible
|
||||
if command -v psql > /dev/null 2>&1; then
|
||||
# Populate a list of databases
|
||||
declare -a DATABASES
|
||||
while read line; do
|
||||
DATABASES+=("$line")
|
||||
done < <(sudo -u postgres psql -t -A -c "SELECT datname FROM pg_database where datname not in ('template0', 'template1', 'postgres');" 2>/dev/null)
|
||||
|
||||
# pgdump all DBs, compress them, and pipe straight up to S3
|
||||
echo "Commencing backup on the following databases:"
|
||||
for dir in "${DATABASES[@]}"; do
|
||||
echo "- $dir"
|
||||
done
|
||||
echo "Will upload resultant backups to {{ backup_s3_bucket }}"
|
||||
for db in "${DATABASES[@]}"; do
|
||||
echo "Backing up $db"
|
||||
sudo -u postgres pg_dump "$db" \
|
||||
| gzip -v9 \
|
||||
| aws s3 cp - \
|
||||
"s3://{{ backup_s3_bucket }}/{{ inventory_hostname }}/pgdump/$db/$(date "+{{ backup_dateformat }}").pgsql.gz"
|
||||
done
|
||||
fi
|
@ -1,47 +0,0 @@
|
||||
#! /bin/bash
|
||||
#
|
||||
# s3pgdump.sh
|
||||
# General-purpose, Ansible-managed backup script to dump PostgreSQL DBs to
|
||||
# an S3 bucket
|
||||
#
|
||||
|
||||
# NOTICE: THIS FILE CONTAINS SECRETS
|
||||
# This file may contain the following secrets depending on configuration:
|
||||
# * An AWS access key
|
||||
# * An AWS session token
|
||||
# These are NOT things you want arbitrary readers to access! Ansible will
|
||||
# attempt to ensure this file has 0700 permissions, but that won't stop you
|
||||
# from changing that yourself
|
||||
# DO NOT ALLOW THIS FILE TO BE READ BY NON-ROOT USERS
|
||||
|
||||
# NOTICE: DO NOT MODIFY THIS FILE
|
||||
# Any changes made will be clobbered by Ansible
|
||||
# Please make any configuration changes in the main repo
|
||||
|
||||
set -e
|
||||
|
||||
# AWS S3 configuration
|
||||
# NOTE: THIS IS SECRET INFORMATION
|
||||
export AWS_ACCESS_KEY_ID="{{ backup_s3_aws_access_key_id }}"
|
||||
export AWS_SECRET_ACCESS_KEY="{{ backup_s3_aws_secret_access_key }}"
|
||||
|
||||
# Populate a list of databases
|
||||
declare -a DATABASES
|
||||
while read line; do
|
||||
DATABASES+=("$line")
|
||||
done < <(sudo -u postgres psql -t -A -c "SELECT datname FROM pg_database where datname not in ('template0', 'template1', 'postgres');" 2>/dev/null)
|
||||
|
||||
# pgdump all DBs, compress them, and pipe straight up to S3
|
||||
echo "Commencing backup on the following databases:"
|
||||
for dir in "${DATABASES[@]}"; do
|
||||
echo "- $dir"
|
||||
done
|
||||
echo "Will upload resultant backups to {{ backup_s3_bucket }}"
|
||||
for db in "${DATABASES[@]}"; do
|
||||
echo "Backing up $db"
|
||||
sudo -u postgres pg_dump "$db" \
|
||||
| gzip -v9 \
|
||||
| aws s3 cp - \
|
||||
"s3://{{ backup_s3_bucket }}/{{ inventory_hostname }}/$db-$(date "+{{ backup_dateformat }}").pgsql.gz"
|
||||
done
|
||||
|
@ -1,64 +0,0 @@
|
||||
#! /bin/bash
|
||||
#
|
||||
# s3restore.sh
|
||||
# Companion script to s3backup.sh, this script obtains a listing of recent
|
||||
# backups and offers the user a choice to restore from.
|
||||
#
|
||||
# This script offers no automation; it is intended for use by hand.
|
||||
#
|
||||
# NOTICE: THIS FILE CONTAINS SECRETS
|
||||
# This file may contain the following secrets depending on configuration:
|
||||
# * An AWS access key
|
||||
# * An AWS session token
|
||||
# These are NOT things you want arbitrary readers to access! Ansible will
|
||||
# attempt to ensure this file has 0700 permissions, but that won't stop you
|
||||
# from changing that yourself
|
||||
# DO NOT ALLOW THIS FILE TO BE READ BY NON-ROOT USERS
|
||||
|
||||
# NOTICE: DO NOT MODIFY THIS FILE
|
||||
# Any changes made will be clobbered by Ansible
|
||||
# Please make any configuration changes in the main repo
|
||||
|
||||
set -e
|
||||
url="s3://{{ backup_s3_bucket}}/{{ inventory_hostname }}/"
|
||||
|
||||
# AWS S3 configuration
|
||||
# NOTE: THIS IS SECRET INFORMATION
|
||||
export AWS_ACCESS_KEY_ID="{{ backup_s3_aws_access_key_id }}"
|
||||
export AWS_SECRET_ACCESS_KEY="{{ backup_s3_aws_secret_access_key }}"
|
||||
|
||||
# Obtain a list possible restorable for this host
|
||||
declare -a BACKUPS
|
||||
printf "Querying S3 for restoreable backups (\e[35m$url\e[0m)...\n"
|
||||
while read line; do
|
||||
filename="$(echo "$line" | awk '{print $NF}')"
|
||||
BACKUPS+=("$filename")
|
||||
done < <(aws s3 ls "$url")
|
||||
|
||||
# Present the user with some options
|
||||
printf "Possible restorable backups:\n"
|
||||
printf "\e[37m\t%s\t%s\n\e[0m" "Index" "Filename"
|
||||
for index in "${!BACKUPS[@]}"; do
|
||||
printf "\t\e[32m%s\e[0m\t\e[34m%s\e[0m\n" "$index" "${BACKUPS[$index]}"
|
||||
done
|
||||
|
||||
# Ensure we can write to pwd
|
||||
if ! [ -w "$PWD" ]; then
|
||||
printf "To restore a backup, please navigate to a writeable directory\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Query for a backup to pull down
|
||||
printf "Please select a backup by \e[32mindex\e[0m to pull down\n"
|
||||
printf "It will be copied into the current directory as a tarball\n"
|
||||
read -p "?" restoreindex
|
||||
|
||||
# Sanity check user input
|
||||
if [ -z "${BACKUPS[$restoreindex]}" ]; then
|
||||
printf "Invalid selection, aborting: $restoreindex\n"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Copy the thing
|
||||
printf "Pulling backup...\n"
|
||||
aws s3 cp "$url${BACKUPS[$restoreindex]}" ./
|
@ -11,7 +11,6 @@
|
||||
- apt-file
|
||||
- aptitude
|
||||
- at
|
||||
- awscli
|
||||
- htop
|
||||
- jq
|
||||
- ncdu
|
||||
@ -19,8 +18,6 @@
|
||||
- nfs-common
|
||||
- openssh-server
|
||||
- pwgen
|
||||
- python-is-python3 # God damn you Nextcloud role
|
||||
- python2 # Needed for some legacy crap
|
||||
- python3-apt
|
||||
- python3-boto
|
||||
- python3-boto3
|
||||
@ -44,10 +41,7 @@
|
||||
- name: configure rpm-ostree packages
|
||||
community.general.rpm_ostree_pkg:
|
||||
name:
|
||||
- awscli
|
||||
- htop
|
||||
- ibm-plex-fonts-all
|
||||
- ncdu
|
||||
- screen
|
||||
- vim
|
||||
when: ansible_os_family == "RedHat" and ansible_pkg_mgr == "atomic_container"
|
||||
|
@ -13,6 +13,14 @@ alias ls="ls $lsarguments"
|
||||
alias ll="ls -Al --file-type $lsarguments"
|
||||
unset lsarguments
|
||||
|
||||
# Extra shell aliases for things
|
||||
resticwrapper="/opt/restic-wrapper"
|
||||
if [ -e "$resticwrapper" ]; then
|
||||
alias r="$resticwrapper"
|
||||
alias r-snapshots="$resticwrapper snapshots -g host -c"
|
||||
alias r-prune="$resticwrapper prune"
|
||||
fi
|
||||
|
||||
# Set some bash-specific stuff
|
||||
[ "${BASH-}" ] && [ "$BASH" != "/bin/sh" ] || return
|
||||
# Like a fancy prompt
|
||||
|
@ -148,22 +148,60 @@ desktop_apt_packages_remove_extra: []
|
||||
desktop_apt_debs: []
|
||||
desktop_apt_debs_extra: []
|
||||
|
||||
desktop_flatpak_remotes:
|
||||
- name: flathub
|
||||
url: "https://dl.flathub.org/repo/flathub.flatpakrepo"
|
||||
- name: flathub-beta
|
||||
url: "https://flathub.org/beta-repo/flathub-beta.flatpakrepo"
|
||||
desktop_flatpak_remotes_extra: []
|
||||
desktop_ostree_layered_packages:
|
||||
- akmod-v4l2loopback # Used by OBS for proper virtual webcam
|
||||
- cava # Sadly does not enable functionality in waybar :<
|
||||
- cryfs # Used for vaults
|
||||
- foot # Wayblue ships Kitty but I don't like the dev direction
|
||||
- htop # For some reason not the default
|
||||
- ibm-plex-fonts-all
|
||||
- iotop # Requires uncontainerized access to the host
|
||||
- libvirt
|
||||
- ncdu
|
||||
- NetworkManager-tui
|
||||
- obs-studio # Has to be installed native for virtual webcam
|
||||
- restic # Also called in via the backup role, but doing this here saves a deployment
|
||||
- vim # It's just way too much hassle that this isn't installed by default
|
||||
- virt-manager # VMs, baby
|
||||
- ydotool # Must be layered in and configured since it's a hw emulator thing
|
||||
- zerotier-one # Ideally layered in since it's a network daemon
|
||||
desktop_ostree_layered_packages_extra: []
|
||||
desktop_ostree_removed_packages:
|
||||
- firefox
|
||||
- firefox-langpacks
|
||||
desktop_ostree_removed_packages_extra: []
|
||||
|
||||
desktop_flatpak_packages:
|
||||
- remote: flathub
|
||||
packages:
|
||||
- com.discordapp.Discord
|
||||
- com.obsproject.Studio
|
||||
- com.bambulab.BambuStudio
|
||||
- com.github.Matoking.protontricks
|
||||
- com.github.tchx84.Flatseal
|
||||
- com.nextcloud.desktopclient.nextcloud
|
||||
- com.spotify.Client
|
||||
- com.valvesoftware.Steam
|
||||
- com.visualstudio.code
|
||||
- com.vscodium.codium
|
||||
- dev.vencord.Vesktop
|
||||
- im.riot.Riot
|
||||
- io.freetubeapp.FreeTube
|
||||
- io.github.Cockatrice.cockatrice
|
||||
- io.github.hydrusnetwork.hydrus
|
||||
- io.mpv.Mpv
|
||||
- md.obsidian.Obsidian
|
||||
- net.lutris.Lutris
|
||||
- net.minetest.Minetest
|
||||
- org.DolphinEmu.dolphin-emu
|
||||
- org.freecad.FreeCAD
|
||||
- org.gimp.GIMP
|
||||
- org.gnucash.GnuCash
|
||||
- org.keepassxc.KeePassXC
|
||||
- org.libreoffice.LibreOffice
|
||||
- org.mozilla.firefox
|
||||
- remote: flathub-beta
|
||||
packages:
|
||||
- net.lutris.Lutris
|
||||
- org.mozilla.Thunderbird
|
||||
- org.openscad.OpenSCAD
|
||||
- org.qbittorrent.qBittorrent
|
||||
# - remote: unmojang
|
||||
# packages:
|
||||
# - org.unmojang.FjordLauncher
|
||||
desktop_flatpak_packages_extra: []
|
||||
|
5
roles/desktop/meta/main.yml
Normal file
5
roles/desktop/meta/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
dependencies:
|
||||
- role: flatpak
|
@ -27,14 +27,16 @@
|
||||
ansible.builtin.apt: deb="{{ item }}"
|
||||
loop: "{{ desktop_apt_debs + desktop_apt_debs_extra }}"
|
||||
when: ansible_pkg_mgr == "apt"
|
||||
- name: configure ostree
|
||||
block:
|
||||
- name: configure layered packages for ostree
|
||||
community.general.rpm_ostree_pkg: name="{{ desktop_ostree_layered_packages + desktop_ostree_layered_packages_extra }}"
|
||||
- name: configure removed base packages for ostree
|
||||
community.general.rpm_ostree_pkg: name="{{ desktop_ostree_removed_packages + desktop_ostree_removed_packages_extra }}" state=absent
|
||||
when: ansible_os_family == "RedHat" and ansible_pkg_mgr == "atomic_container"
|
||||
- name: configure pip3 packages
|
||||
ansible.builtin.pip: executable=/usr/bin/pip3 state=latest name="{{ desktop_pip3_packages + desktop_pip3_packages_extra }}"
|
||||
when: ansible_os_family != "Gentoo"
|
||||
- name: configure flatpak
|
||||
block:
|
||||
- name: configure flatpak remotes
|
||||
flatpak_remote: name="{{ item.name }}" state=present flatpakrepo_url="{{ item.url }}"
|
||||
with_items: "{{ desktop_flatpak_remotes + desktop_flatpak_remotes_extra }}"
|
||||
when: ansible_pkg_mgr == "apt"
|
||||
- name: configure installed flatpaks
|
||||
flatpak: name="{{ item.packages }}" state=present remote="{{ item.remote | default('flathub', true) }}"
|
||||
with_items: "{{ desktop_flatpak_packages + desktop_flatpak_packages_extra }}"
|
||||
|
@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
tmodloader_name: generic
|
||||
|
||||
# Container settings
|
||||
tmodloader_uid: 1521
|
||||
tmodloader_gid: 1521
|
||||
tmodloader_state: started
|
||||
tmodloader_image: rehashedsalt/tmodloader-docker:bleeding
|
||||
tmodloader_restart_policy: unless-stopped
|
||||
tmodloader_timezone: "America/Chicago"
|
||||
# Container network settings
|
||||
tmodloader_external_port: "7777"
|
||||
tmodloader_data_prefix: "/data/terraria/{{ tmodloader_name }}"
|
||||
|
||||
# Server configuration
|
||||
# We have two variables here; things you might not want to change and things
|
||||
# that you probably will
|
||||
tmodloader_config:
|
||||
autocreate: "3"
|
||||
difficulty: "1"
|
||||
secure: "0"
|
||||
tmodloader_config_extra:
|
||||
maxplayers: "8"
|
||||
motd: "Deployed via Ansible edition"
|
||||
password: "dicks"
|
||||
# Server configuration specific to this Ansible role
|
||||
# DO NOT CHANGE
|
||||
tmodloader_config_internal:
|
||||
port: "7777"
|
||||
world: "/terraria/ModLoader/Worlds/World.wld"
|
||||
worldpath: "/terraria/ModLoader/Worlds"
|
||||
# A list of mods to acquire
|
||||
# The default server of mirror.sgkoi.dev is the official tModLoader mod browser
|
||||
# mirror
|
||||
tmodloader_mod_server: "https://mirror.sgkoi.dev"
|
||||
# tmodloader_mods:
|
||||
# - "CalamityMod"
|
||||
# - "RecipeBrowser"
|
||||
# - "BossChecklist"
|
||||
tmodloader_mods: []
|
@ -1,7 +0,0 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
- name: restart tmodloader {{ tmodloader_name }}
|
||||
docker_container:
|
||||
name: "tmodloader-{{ tmodloader_name }}"
|
||||
state: started
|
||||
restart: yes
|
@ -1,76 +0,0 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
- name: assure tmodloader {{ tmodloader_name }} directory structure
|
||||
ansible.builtin.file:
|
||||
state: directory
|
||||
owner: "{{ tmodloader_uid }}"
|
||||
group: "{{ tmodloader_gid }}"
|
||||
mode: "0750"
|
||||
path: "{{ item }}"
|
||||
# We recurse here since these directories and all of their contents
|
||||
# should be read-write by the container without exception.
|
||||
recurse: yes
|
||||
with_items:
|
||||
- "{{ tmodloader_data_prefix }}/backups"
|
||||
- "{{ tmodloader_data_prefix }}/data"
|
||||
- "{{ tmodloader_data_prefix }}/data/ModLoader"
|
||||
- "{{ tmodloader_data_prefix }}/data/ModLoader/Mods"
|
||||
- "{{ tmodloader_data_prefix }}/data/ModLoader/Worlds"
|
||||
- "{{ tmodloader_data_prefix }}/logs"
|
||||
- name: assure mods
|
||||
ansible.builtin.shell:
|
||||
cmd: "curl -L \"{{ tmodloader_mod_server }}\" -o \"{{ item }}.tmod\" && chown \"{{ tmodloader_uid }}:{{ tmodloader_gid }}\" \"{{ item }}.tmod\""
|
||||
chdir: "{{ tmodloader_data_prefix }}/data/ModLoader/Mods"
|
||||
creates: "{{ tmodloader_data_prefix }}/data/ModLoader/Mods/{{ item }}.tmod"
|
||||
with_list: "{{ tmodloader_mods }}"
|
||||
when: tmodloader_mods
|
||||
notify: "restart tmodloader {{ tmodloader_name }}"
|
||||
- name: enable mods
|
||||
ansible.builtin.template:
|
||||
src: enabled.json
|
||||
dest: "{{ tmodloader_data_prefix }}/data/ModLoader/Mods/enabled.json"
|
||||
owner: "{{ tmodloader_uid }}"
|
||||
group: "{{ tmodloader_gid }}"
|
||||
mode: "0750"
|
||||
when: tmodloader_mods
|
||||
notify: "restart tmodloader {{ tmodloader_name }}"
|
||||
- name: assure tmodloader {{ tmodloader_name }} files
|
||||
ansible.builtin.file:
|
||||
state: touch
|
||||
owner: "{{ tmodloader_uid }}"
|
||||
group: "{{ tmodloader_gid }}"
|
||||
mode: "0750"
|
||||
path: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ tmodloader_data_prefix }}/config.txt"
|
||||
- name: assure {{ tmodloader_name }} configs
|
||||
ansible.builtin.lineinfile:
|
||||
state: present
|
||||
regexp: "^{{ item.key }}"
|
||||
line: "{{ item.key }}={{ item.value }}"
|
||||
path: "{{ tmodloader_data_prefix }}/config.txt"
|
||||
with_dict: "{{ tmodloader_config | combine(tmodloader_config_extra) | combine(tmodloader_config_internal) }}"
|
||||
notify: "restart tmodloader {{ tmodloader_name }}"
|
||||
- name: assure {{ tmodloader_name }} backup cronjob
|
||||
ansible.builtin.cron:
|
||||
user: root
|
||||
name: "terraria-{{ tmodloader_name }}"
|
||||
minute: "*/30"
|
||||
job: "tar czvf \"{{ tmodloader_data_prefix }}/backups/world-$(date +%Y-%m-%d-%H%M).tgz\" \"{{ tmodloader_data_prefix }}/data/ModLoader/Worlds\" \"{{ tmodloader_data_prefix }}/data/tModLoader/Worlds\""
|
||||
- name: assure tmodloader {{ tmodloader_name }} container
|
||||
docker_container:
|
||||
name: "tmodloader-{{ tmodloader_name }}"
|
||||
state: started
|
||||
image: "{{ tmodloader_image }}"
|
||||
restart_policy: "{{ tmodloader_restart_policy }}"
|
||||
pull: yes
|
||||
user: "{{ tmodloader_uid }}:{{ tmodloader_gid }}"
|
||||
env:
|
||||
TZ: "{{ tmodloader_timezone }}"
|
||||
ports:
|
||||
- "{{ tmodloader_external_port }}:7777"
|
||||
volumes:
|
||||
- "{{ tmodloader_data_prefix }}/data:/terraria"
|
||||
- "{{ tmodloader_data_prefix }}/config.txt:/terraria/config.txt"
|
||||
- "{{ tmodloader_data_prefix }}/logs:/terraria-server/tModLoader-Logs"
|
@ -1,6 +0,0 @@
|
||||
[
|
||||
{% for item in tmodloader_mods[1:] %}
|
||||
"{{ item }}",
|
||||
{% endfor %}
|
||||
"{{ tmodloader_mods[0] }}"
|
||||
]
|
7
roles/flatpak/defaults/main.yml
Normal file
7
roles/flatpak/defaults/main.yml
Normal file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
---
|
||||
flatpak_remotes:
|
||||
- name: flathub
|
||||
state: present
|
||||
url: "https://dl.flathub.org/repo/flathub.flatpakrepo"
|
||||
flatpak_remotes_extra: []
|
17
roles/flatpak/tasks/main.yml
Normal file
17
roles/flatpak/tasks/main.yml
Normal file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
- name: install flatpak on apt distros
|
||||
when: ansible_pkg_mgr == "apt"
|
||||
block:
|
||||
- name: install flatpak packages
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
pkg:
|
||||
- flatpak
|
||||
- name: configure flatpak remotes
|
||||
with_items: "{{ flatpak_remotes + flatpak_remotes_extra }}"
|
||||
community.general.flatpak_remote:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state }}"
|
||||
flatpakrepo_url: "{{ item.url }}"
|
28
roles/gameserver-terraria/defaults/main.yml
Normal file
28
roles/gameserver-terraria/defaults/main.yml
Normal file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
# What is the name of the server? This should be unique per instance
|
||||
terraria_server_name: "generic"
|
||||
# Remove this Terraria server instead of provision it?
|
||||
terraria_server_remove: no
|
||||
|
||||
# What mods should be enabled?
|
||||
terraria_mods: []
|
||||
|
||||
# Basic server configuration
|
||||
terraria_shutdown_message: "Server is going down NOW!"
|
||||
terraria_motd: "Literally playing Minecraft"
|
||||
terraria_password: "dicks"
|
||||
terraria_port: "7777"
|
||||
|
||||
terraria_world_name: "World"
|
||||
# Leaving this value blank rolls one for us
|
||||
terraria_world_seed: ""
|
||||
# 1 Small
|
||||
# 2 Medium
|
||||
# 3 Large
|
||||
terraria_world_size: "3"
|
||||
# 0 Normal
|
||||
# 1 Expert
|
||||
# 2 Master
|
||||
# 3 Journey
|
||||
terraria_world_difficulty: "1"
|
58
roles/gameserver-terraria/tasks/main.yml
Normal file
58
roles/gameserver-terraria/tasks/main.yml
Normal file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
#
|
||||
# Docs available here:
|
||||
# https://github.com/JACOBSMILE/tmodloader1.4
|
||||
#
|
||||
# If you need to run a command in this container:
|
||||
# docker exec tmodloader inject "say Hello World!"
|
||||
#
|
||||
---
|
||||
- name: set backups tmodloader - {{ terraria_server_name }}
|
||||
vars:
|
||||
backup_dirs:
|
||||
- "/data/tmodloader/{{ terraria_server_name }}/data/tModLoader/Worlds"
|
||||
backup_dest: "/data/tmodloader/{{ terraria_server_name }}/backups"
|
||||
ansible.builtin.cron:
|
||||
user: root
|
||||
name: "terraria-{{ terraria_server_name }}-backup"
|
||||
state: "{{ 'absent' if terraria_server_remove else 'present' }}"
|
||||
minute: "*/15"
|
||||
job: "tar czvf \"{{ backup_dest }}/world-$(date +\\%Y-\\%m-\\%d-\\%H\\%M).tgz\" {{ backup_dirs | join(' ') }} && find {{ backup_dest }}/ -type f -iname \\*.tgz -mtime +1 -print -delete"
|
||||
tags: [ docker, tmodloader, cron, backup, tar ]
|
||||
- name: assure backups dir tmodloader - {{ terraria_server_name }}
|
||||
ansible.builtin.file:
|
||||
path: "/data/tmodloader/{{ terraria_server_name }}/backups"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0700"
|
||||
tags: [ docker, tmodloader, file, directory, backup ]
|
||||
- name: docker deploy tmodloader - {{ terraria_server_name }}
|
||||
community.general.docker_container:
|
||||
name: tmodloader-{{ terraria_server_name }}
|
||||
state: "{{ 'absent' if terraria_server_remove else 'started' }}"
|
||||
image: docker.io/jacobsmile/tmodloader1.4:latest
|
||||
env:
|
||||
TMOD_AUTODOWNLOAD: "{{ terraria_mods | sort() | join(',') }}"
|
||||
TMOD_ENABLEDMODS: "{{ terraria_mods | sort() | join(',') }}"
|
||||
TMOD_SHUTDOWN_MESSAGE: "{{ terraria_shutdown_message }}"
|
||||
TMOD_MOTD: "{{ terraria_motd }}"
|
||||
TMOD_PASS: "{{ terraria_password }}"
|
||||
TMOD_WORLDNAME: "{{ terraria_world_name }}"
|
||||
TMOD_WORLDSEED: "{{ terraria_world_seed }}"
|
||||
TMOD_WORLDSIZE: "{{ terraria_world_size }}"
|
||||
TMOD_DIFFICULTY: "{{ terraria_world_difficulty }}"
|
||||
TMOD_PORT: "7777"
|
||||
# In theory, this allows you to change how much data the server sends
|
||||
# This is in Hz. Crank it lower to throttle it at the cost of NPC jitteriness
|
||||
#TMOD_NPCSTREAM: "60"
|
||||
ports:
|
||||
- "{{ terraria_port }}:7777/tcp"
|
||||
- "{{ terraria_port }}:7777/udp"
|
||||
volumes:
|
||||
- "/data/tmodloader/{{ terraria_server_name }}/data:/data"
|
||||
- "/data/tmodloader/{{ terraria_server_name }}/logs:/terraria-server/tModLoader-Logs"
|
||||
- "/data/tmodloader/{{ terraria_server_name }}/dotnet:/terraria-server/dotnet"
|
||||
tags: [ docker, tmodloader ]
|
||||
|
37
roles/ingress-traefik/defaults/main.yml
Normal file
37
roles/ingress-traefik/defaults/main.yml
Normal file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
|
||||
# Core container configuration
|
||||
ingress_container_image: docker.io/traefik:latest
|
||||
ingress_container_name: ingress
|
||||
|
||||
# Core service configuration
|
||||
ingress_container_tls: no
|
||||
ingress_container_dashboard: no
|
||||
|
||||
# Secondary container configuration
|
||||
ingress_container_ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
ingress_container_ports_dashboard:
|
||||
- 8080:8080
|
||||
ingress_container_timezone: America/Chicago
|
||||
ingress_container_docker_socket_location: "/var/run/docker.sock"
|
||||
|
||||
# Command args
|
||||
ingress_command_args:
|
||||
- "--api.dashboard=true"
|
||||
- "--providers.docker"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--entrypoints.web.address=:80"
|
||||
ingress_command_args_tls:
|
||||
- "--entrypoints.web.address=:443"
|
||||
- "--certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=web"
|
||||
- "--certificatesresolvers.letsencrypt.acme.email=rehashedsalt@cock.li"
|
||||
- "--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
|
||||
ingress_command_args_extra: []
|
||||
|
||||
# Network configuration
|
||||
ingress_container_networks:
|
||||
- name: web
|
||||
aliases: [ "ingress" ]
|
16
roles/ingress-traefik/tasks/main.yml
Normal file
16
roles/ingress-traefik/tasks/main.yml
Normal file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
- name: assure traefik container
|
||||
docker_container:
|
||||
name: "{{ ingress_container_name }}"
|
||||
image: "{{ ingress_container_image }}"
|
||||
restart_policy: unless-stopped
|
||||
command: "{{ ingress_command_args + ingress_command_args_tls + ingress_command_args_extra if ingress_container_tls else ingress_command_args + ingress_command_args_extra }}"
|
||||
env:
|
||||
TZ: "{{ ingress_container_timezone }}"
|
||||
networks: "{{ ingress_container_networks }}"
|
||||
ports: "{{ ingress_container_ports + ingress_container_ports_dashboard if ingress_container_dashboard else ingress_container_ports }}"
|
||||
volumes:
|
||||
- "{{ ingress_container_docker_socket_location }}:/var/run/docker.sock"
|
||||
- "/data/traefik/letsencrypt:/letsencrypt"
|
||||
tags: [ docker, ingress, traefik ]
|
@ -2,7 +2,7 @@
|
||||
# vim:ft=ansible:
|
||||
|
||||
# Core container configuration
|
||||
ingress_container_image: jonasal/nginx-certbot:latest
|
||||
ingress_container_image: docker.io/jonasal/nginx-certbot:latest
|
||||
ingress_container_name: ingress
|
||||
|
||||
# Secondary container configuration
|
||||
@ -21,6 +21,12 @@ ingress_container_networks:
|
||||
# Certbot configuration
|
||||
ingress_container_certbot_email: rehashedsalt@cock.li
|
||||
|
||||
# Volumes
|
||||
ingress_container_volumes:
|
||||
- "{{ ingress_container_persist_dir }}/letsencrypt:/etc/letsencrypt"
|
||||
- "{{ ingress_container_persist_dir }}/user_conf.d:{{ ingress_container_config_mount }}:ro"
|
||||
ingress_container_volumes_extra: []
|
||||
|
||||
# General Nginx configuration
|
||||
ingress_listen_args: "443 http2 ssl"
|
||||
ingress_resolver: 8.8.8.8
|
||||
|
@ -3,3 +3,8 @@
|
||||
- name: restart ingress container
|
||||
docker_container: name="{{ ingress_container_name }}" state=started restart=yes
|
||||
become: yes
|
||||
- name: reload ingress container
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ ingress_container_name }}"
|
||||
command: nginx -s reload
|
||||
become: yes
|
||||
|
@ -5,9 +5,6 @@
|
||||
with_items:
|
||||
- letsencrypt
|
||||
- user_conf.d
|
||||
- name: template out ingress configuration file
|
||||
ansible.builtin.template: src=vhosts.conf.j2 dest="{{ ingress_container_persist_dir }}/user_conf.d/vhosts.conf" mode="0640"
|
||||
notify: restart ingress container
|
||||
- name: assure ingress container
|
||||
docker_container:
|
||||
name: ingress
|
||||
@ -17,6 +14,14 @@
|
||||
CERTBOT_EMAIL: "{{ ingress_container_certbot_email }}"
|
||||
networks: "{{ ingress_container_networks }}"
|
||||
ports: "{{ ingress_container_ports }}"
|
||||
volumes:
|
||||
- "{{ ingress_container_persist_dir }}/letsencrypt:/etc/letsencrypt"
|
||||
- "{{ ingress_container_persist_dir }}/user_conf.d:{{ ingress_container_config_mount }}:ro"
|
||||
volumes: "{{ ingress_container_volumes + ingress_container_volumes_extra }}"
|
||||
- name: template out configuration
|
||||
block:
|
||||
- name: template out ingress configuration file
|
||||
ansible.builtin.template: src=vhosts.conf.j2 dest="{{ ingress_container_persist_dir }}/user_conf.d/vhosts.conf" mode="0640"
|
||||
notify: reload ingress container
|
||||
- name: test templated configuration file
|
||||
community.docker.docker_container_exec:
|
||||
container: ingress
|
||||
command: nginx -t
|
||||
changed_when: false
|
||||
|
@ -1,3 +1,7 @@
|
||||
{% if ingress_head is defined %}
|
||||
{{ ingress_head }}
|
||||
{% endif %}
|
||||
|
||||
{% for server in ingress_servers %}
|
||||
server {
|
||||
{% if loop.index == 1 %}
|
||||
@ -49,9 +53,14 @@ server {
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass {{ server.proxy_pass }};
|
||||
proxy_request_buffering off;
|
||||
{% if server.proxy_extra is defined %}{{ server.proxy_extra }}{% endif %}
|
||||
}
|
||||
{% elif server.proxies is defined %}
|
||||
# Proxy locations
|
||||
@ -61,9 +70,14 @@ server {
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass {{ proxy.pass }};
|
||||
proxy_request_buffering off;
|
||||
{% if proxy.extra is defined %}{{ proxy.extra }}{% endif %}
|
||||
}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
18
roles/kodi/defaults/main.yml
Normal file
18
roles/kodi/defaults/main.yml
Normal file
@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
---
|
||||
kodi_flatpak_name: "tv.kodi.Kodi"
|
||||
|
||||
kodi_autologin_user: "kodi"
|
||||
kodi_autologin_user_groups:
|
||||
- audio # Gotta be able to play audio
|
||||
- tty # Required to start Cage
|
||||
- video # Not sure if required, but could be useful for hw accel
|
||||
kodi_autologin_service: "kodi.service"
|
||||
|
||||
kodi_apt_packages:
|
||||
- alsa-utils # For testing audio
|
||||
- cage # A kiosk wayland compositor
|
||||
- pipewire # Audio routing
|
||||
- pipewire-pulse
|
||||
- wireplumber
|
||||
- xwayland # Required for Kodi since it's not Wayland-native
|
8
roles/kodi/handlers/main.yml
Normal file
8
roles/kodi/handlers/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env ansible-playbook
|
||||
# vim:ft=ansible:
|
||||
---
|
||||
- name: restart kodi
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ kodi_autologin_service }}"
|
||||
state: restarted
|
||||
daemon_reload: yes
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user