initial commit, here be dragons

This commit is contained in:
Chris 2021-04-04 18:45:11 +02:00
commit 58137668b7
Signed by: Lazalatin
GPG Key ID: 96E3D0920AEEF022
39 changed files with 1237 additions and 0 deletions

145
.editorconfig Normal file
View File

@ -0,0 +1,145 @@
# EditorConfig: https://EditorConfig.org
# top-most EditorConfig file
root = true
# general preferences
[*]
end_of_line = lf
insert_final_newline = true
# IDEA specific
curly_bracket_next_line = false
wildcard_import_limit = 0
# Generic XML
# https://google.github.io/styleguide/xmlstyle.html
[*.xml]
indent_size = 2
indent_style = space
# Maven
# https://maven.apache.org/
[pom.xml]
indent_size = 4
indent_style = space
# Groovy (gradle)
# ?
[*.{groovy, gradle}]
indent_size = 4
indent_style = space
# Bazel: https://bazel.build/
# https://github.com/bazelbuild/buildtools/blob/master/BUILD.bazel
[*.{bazel, bzl}]
indent_size = 4
indent_style = space
# CSS
# https://google.github.io/styleguide/htmlcssguide.xml#General_Formatting_Rules
# http://cssguidelin.es/#syntax-and-formatting
[*.css]
indent_size = 2
indent_style = space
trim_trailing_whitespace = true
# GNU make
# https://www.gnu.org/software/make/manual/html_node/Recipe-Syntax.html
[Makefile]
indent_style = tab
# Go
# https://golang.org/cmd/gofmt/
[{go.mod, *.go}]
indent_style = tab
# GraphQL
# https://graphql.org/learn/
# https://prettier.io
[*.graphql]
indent_size = 2
indent_style = space
# HTML
# https://google.github.io/styleguide/htmlcssguide.xml#General_Formatting_Rules
[*.{htm, html}]
indent_size = 2
indent_style = space
trim_trailing_whitespace = true
# Java
# https://google.github.io/styleguide/javaguide.html#s4.2-block-indentation
[*.java]
indent_size = 2
indent_style = space
# JavaScript, JSON, JSX, JavaScript Modules, TypeScript
# https://github.com/feross/standard
# https://prettier.io
[*.{cjs, js, json, jsx, mjs, ts, tsx}]
indent_size = 2
indent_style = space
# Kotlin (and gradle.kts)
# https://android.github.io/kotlin-guides/style.html#indentation
[*.{kt, kts}]
indent_size = 4
indent_style = space
# LESS
# https://github.com/less/less-docs#less-standards
[*.less]
indent_size = 2
indent_style = space
# PHP
# http://www.php-fig.org/psr/psr-2/
[*.php]
indent_size = 4
indent_style = space
# Python
# https://www.python.org/dev/peps/pep-0008/#code-lay-out
[*.py]
indent_size = 4
indent_style = space
# Ruby
# http://www.caliban.org/ruby/rubyguide.shtml#indentation
[*.rb]
indent_size = 2
indent_style = space
# Rust
# https://github.com/rust-lang/rust/blob/master/src/doc/style/style/whitespace.md
[*.rs]
indent_size = 4
indent_style = space
insert_final_newline = false
trim_trailing_whitespace = true
# SASS
# https://sass-guidelin.es/#syntax--formatting
[*.{sass, scss}]
indent_size = 2
indent_style = space
# Shell
# https://google.github.io/styleguide/shell.xml#Indentation
[*.{bash, sh, zsh}]
indent_size = 2
indent_style = space
# TOML
# https://github.com/toml-lang/toml/tree/master/examples
[*.toml]
indent_size = 2
indent_style = space
# YAML
# http://yaml.org/spec/1.2/2009-07-21/spec.html#id2576668
[*.{yaml, yml}]
indent_size = 2
indent_style = space

16
.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
### IDEA ###
.idea
### Ansible ###
*.retry
*/.vault_key
.fact_caching
### Vagrant ###
.vagrant/
*.box
### Misc ###
*.kate-swp
*.iml

48
README.md Normal file
View File

@ -0,0 +1,48 @@
# Ansible configuration for $HOST
This repository configures a server based on Ubuntu 20.04
If contains:
- Ansible files for deployment
- Vagrantfile for local testing
## Local VM for testing using Vagrant
You can spin up a local staging VM and provision it using [Vagrant](https://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/).
Test the playbook before tagging/deploying it. See the `/test` directory
| Command | Description |
| --- | --- |
| `vagrant up` | Spin up a staging VM and provision it. |
| `vagrant provision` | Provision (redo) a running VM with the Ansible playbook. |
| `vagrant destroy -f` | Destroy the VM completely |
By default, it uses two cores with 2GB RAM, which can be overwritten with the environment variables `VB_CPUS` and `VB_RAM`.
## Provisioning the target system with Ansible
You need the secret for the vault to decrypt the secrets. Editing the secrets can be done via `ansible-vault group_vars/all/vault.yml`.
To provision the actual server completely:
> ansible-playbook site.yml
Each role has an ansible-tag with the same name. You can run individual roles using the tags, e.g.:
> ansible-playbook site.yml --tags "traefik"
## Playbook Contents
This project should contain at least these roles:
- borg
- responsible for backups of vital persisted data of a hosts services
- bootstrap
- the united usership of the server and their representative ssh keys and permissions
- Some bootstrap stuff like logrotate, etc.
- docker
- responsible for provisioning a docker environment
- traefik
- responsible for providing traefik and its configuration
- unattended-upgrades
- watchtower
- configures a container which is responsible for autoamtically updating other containers
- wireguard

13
ansible.cfg Normal file
View File

@ -0,0 +1,13 @@
[defaults]
inventory = hosts.yml
retry_files_enabled = False
vault_password_file = vault-password.sh
gathering = smart
fact_caching = jsonfile
fact_caching_connection = .fact_caching
fact_caching_timeout = 3600
# Print more human-readable command outputs
stdout_callback = debug
[privilege_escalation]
become = True

0
group_vars/all/vault.yml Normal file
View File

1
hosts.yml Normal file
View File

@ -0,0 +1 @@
nachtigall

View File

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL3QNn/uO/cRQcSbWHndnAhNhFOyamQvSxxmeDE9uCeH chris@Krabat

View File

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEE9UlErchhdMODrVFLZWwk+Qm7O8hmWrR92K3omMYg7 LinTron2

View File

@ -0,0 +1,15 @@
---
- name: Install logrotate
ansible.builtin.apt:
name: "logrotate"
- name: Configure logrotate to rotate monthly
ansible.builtin.lineinfile: { path: /etc/logrotate.conf, regexp: "^weekly", line: "monthly" }
- name: Configure logrotate to keep 12 months
ansible.builtin.lineinfile: { path: /etc/logrotate.conf, regexp: "^rotate 4", line: "rotate 12" }
- name: Configure logrotate to compress
ansible.builtin.lineinfile: { path: /etc/logrotate.conf, regexp: "^#compress", line: "compress" }

View File

@ -0,0 +1,66 @@
---
- name: Create users
ansible.builtin.user:
name: "{{ item.name }}"
shell: "{{ item.shell }}"
groups: "sudo"
state: present
loop:
- { name: "chris", shell: "/bin/bash" }
- name: Add authorized keys
ansible.posix.authorized_key:
user: "{{ item.name }}"
key: "{{ lookup('file', '{{ item.keyfile }}') }}"
state: present
loop:
- { name: "chris", keyfile: "Krabat_ed25519.pub" }
- { name: "chris", keyfile: "LinTron2_ed25519.pub" }
- name: Set swappiness via sysctl
ansible.posix.sysctl:
name: vm.swappiness
value: '20'
sysctl_file: /etc/sysctl.d/99-swappiness.conf
state: present
reload: yes
- name: Set timezone
community.general.timezone: { name: "Europe/Berlin" }
- name: Setup static network
ansible.builtin.template:
src: 00-static-config.yaml.j2
dest: /etc/netplan/00-static-config.yaml
- name: Apply netplan configuration
ansible.builtin.command:
cmd: netplan apply
changed_when: false
- name: Setup sudoers to sudo without password
ansible.builtin.lineinfile:
dest: /etc/sudoers
state: present
regexp: ^%sudo\s
line: "%sudo ALL=(ALL) NOPASSWD: ALL"
- name: Install common software
ansible.builtin.apt:
name: "{{ packages }}"
update_cache: yes
vars:
packages:
- htop
- rsync
- nano
- tmux
- byobu
- iotop
- iftop
- colordiff
- ncdu
- name: Include logroate
ansible.builtin.include_tasks: logrotate.yml

View File

@ -0,0 +1,15 @@
network:
version: 2
ethernets:
{{ ansible_default_ipv4.interface }}:
addresses:
- 10.42.0.20/22
- fe80::2/64
dhcp4: no
dhcp6: no
gateway4: 192.168.178.1
gateway6: fe80::1
nameservers:
addresses:
- 2620:fe::fe # quad9
- 9.9.9.9 # quad9

View File

@ -0,0 +1,11 @@
[Unit]
Description=borgmatic backup
[Service]
Type=oneshot
ExecStart=/usr/local/bin/borgmatic
# Move it into the background:
Nice=15
IOSchedulingClass=best-effort
IOSchedulingPriority=6

View File

@ -0,0 +1,6 @@
[Unit]
Description=Run borgmatic backup
[Timer]
OnCalendar=*-*-* 6:30:00
OnCalendar=*-*-* 18:30:00

View File

@ -0,0 +1,17 @@
---
- name: Create new borg ID ssh keyfiles
community.crypto.openssh_keypair:
path: "/root/.ssh/borg-id"
type: ed25519
comment: "{{ inventory_hostname }} borg backup key"
become: true
register: public_key
- name: Show key info message
ansible.builtin.debug:
msg:
- "Please authorize the following public key to your borg backup server:"
- "---"
- "{{ public_key }}"
- "---"
- "##### ATTENTION: Until this key is registered no backups will work! #####"

80
roles/borg/tasks/main.yml Normal file
View File

@ -0,0 +1,80 @@
---
- name: Set facts
ansible.builtin.set_fact:
# renovate: datasource=github-releases depName=borgbackup/borg
borg_version: "1.1.16"
# renovate: datasource=pypi depName=borgmatic
borgmatic_version: "1.5.12"
- name: Install borg
ansible.builtin.get_url:
url: "https://github.com/borgbackup/borg/releases/download/{{ borg_version }}/borg-linux64"
dest: "/usr/local/bin/borg"
mode: "ugo=rx"
- name: Provide borgmatic dependencies to system
ansible.builtin.apt:
name: "{{ packages }}"
update_cache: yes
state: present
vars:
packages:
- python3
- python3-pip
- name: Provide borgmatic using pip3
ansible.builtin.pip:
name: "borgmatic=={{ borgmatic_version }}"
executable: "pip3"
- name: Make sure borgmatic config directory exists
ansible.builtin.file:
path: /etc/borgmatic
state: directory
mode: "0755"
- name: Copy borgmatic configuration
ansible.builtin.template:
src: borgmatic-config.yml.j2
dest: /etc/borgmatic/config.yaml
owner: root
group: root
mode: '0600'
validate: validate-borgmatic-config -c %s
- name: Copy borgmatic systemd service
ansible.builtin.copy:
src: borgmatic.service
dest: /etc/systemd/system/
owner: root
group: root
mode: '0644'
# Causes weird "Attempted to remove disk file system, and we can't allow that." issue.
# This might be broken due to https://bugs.launchpad.net/ubuntu-manpage-repository/+bug/1817627
#validate: systemd-analyze verify %s
- name: Copy borgmatic systemd timer
ansible.builtin.copy:
src: borgmatic.timer
dest: /etc/systemd/system/
owner: root
group: root
mode: '0644'
# See previous task
#validate: systemd-analyze verify %s
- name: Activate borgmatic timer
ansible.builtin.systemd:
name: borgmatic.timer
state: started
enabled: yes
daemon_reload: yes
- name: Check if borg-id ssh key is already deployed
ansible.builtin.stat: { path: /root/.ssh/borg-id }
register: borg_id
- name: Create borg ID ssh key
ansible.builtin.include_tasks: create_borg-id.yml
when: not borg_id.stat.exists

View File

@ -0,0 +1,242 @@
# Where to look for files to backup, and where to store those backups. See
# https://borgbackup.readthedocs.io/en/stable/quickstart.html and
# https://borgbackup.readthedocs.io/en/stable/usage.html#borg-create for details.
location:
# List of source directories to backup (required). Globs and tildes are expanded.
source_directories:
- /
# Paths to local or remote repositories (required). Tildes are expanded. Multiple
# repositories are backed up to in sequence. See ssh_command for SSH options like
# identity file or port.
repositories:
- ssh://borg@chaospott.de:1234/backup/borg/host
# Stay in same file system (do not cross mount points). Defaults to false.
one_file_system: true
# Only store/extract numeric user and group identifiers. Defaults to false.
#numeric_owner: true
# Use Borg's --read-special flag to allow backup of block and other special
# devices. Use with caution, as it will lead to problems if used when
# backing up special devices such as /dev/zero. Defaults to false.
#read_special: false
# Record bsdflags (e.g. NODUMP, IMMUTABLE) in archive. Defaults to true.
bsd_flags: false
# Mode in which to operate the files cache. See
# https://borgbackup.readthedocs.io/en/stable/usage/create.html#description for
# details. Defaults to "ctime,size,inode".
#files_cache: ctime,size,inode
# Alternate Borg local executable. Defaults to "borg".
#local_path: borg1
# Alternate Borg remote executable. Defaults to "borg".
#remote_path: borg1
# Any paths matching these patterns are included/excluded from backups. Globs are
# expanded. (Tildes are not.) Note that Borg considers this option experimental.
# See the output of "borg help patterns" for more details. Quote any value if it
# contains leading punctuation, so it parses correctly.
#patterns:
# - R /
# - '- /home/*/.cache'
# - + /home/susan
# - '- /home/*'
# Read include/exclude patterns from one or more separate named files, one pattern
# per line. Note that Borg considers this option experimental. See the output of
# "borg help patterns" for more details.
#patterns_from:
# - /etc/borgmatic/patterns
# Any paths matching these patterns are excluded from backups. Globs and tildes
# are expanded. See the output of "borg help patterns" for more details.
exclude_patterns:
- /bin
- /dev
- /lib
- /lib32
- /lib64
- /lost+found
- /media
- /proc
- /run
- /sbin
- /sys
- /tmp
- /swap
- /swap.img
# we store our container state in mounts and never the contains itself, ignore crud
- /var/lib/docker/overlay2
- /var/lib/docker/volumes
# Read exclude patterns from one or more separate named files, one pattern per
# line. See the output of "borg help patterns" for more details.
#exclude_from:
# - /etc/borgmatic/excludes
# Exclude directories that contain a CACHEDIR.TAG file. See
# http://www.brynosaurus.com/cachedir/spec.html for details. Defaults to false.
exclude_caches: true
# Exclude directories that contain a file with the given filename. Defaults to not
# set.
exclude_if_present: .NOBACKUP
# Repository storage options. See
# https://borgbackup.readthedocs.io/en/stable/usage.html#borg-create and
# https://borgbackup.readthedocs.io/en/stable/usage/general.html#environment-variables for
# details.
storage:
# The standard output of this command is used to unlock the encryption key. Only
# use on repositories that were initialized with passcommand/repokey encryption.
# Note that if both encryption_passcommand and encryption_passphrase are set,
# then encryption_passphrase takes precedence. Defaults to not set.
#encryption_passcommand: secret-tool lookup borg-repository repo-name
# Passphrase to unlock the encryption key with. Only use on repositories that were
# initialized with passphrase/repokey encryption. Quote the value if it contains
# punctuation, so it parses correctly. And backslash any quote or backslash
# literals as well. Defaults to not set.
#encryption_passphrase: "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
encryption_passphrase: "{{ vault.borg.passphrase }}"
# Number of seconds between each checkpoint during a long-running backup. See
# https://borgbackup.readthedocs.io/en/stable/faq.html#if-a-backup-stops-mid-way-does-the-already-backed-up-data-stay-there
# for details. Defaults to checkpoints every 1800 seconds (30 minutes).
#checkpoint_interval: 1800
# Specify the parameters passed to then chunker (CHUNK_MIN_EXP, CHUNK_MAX_EXP,
# HASH_MASK_BITS, HASH_WINDOW_SIZE). See https://borgbackup.readthedocs.io/en/stable/internals.html
# for details. Defaults to "19,23,21,4095".
#chunker_params: 19,23,21,4095
# Type of compression to use when creating archives. See
# https://borgbackup.readthedocs.org/en/stable/usage.html#borg-create for details.
# Defaults to "lz4".
compression: zstd,8
# Remote network upload rate limit in kiBytes/second. Defaults to unlimited.
#remote_rate_limit: 100
# Command to use instead of "ssh". This can be used to specify ssh options.
# Defaults to not set.
#ssh_command: ssh -i /path/to/private/key
ssh_command: ssh -i /root/.ssh/borg-id
# Base path used for various Borg directories. Defaults to $HOME, ~$USER, or ~.
# See https://borgbackup.readthedocs.io/en/stable/usage/general.html#environment-variables for details.
#borg_base_directory: /path/to/base
# Path for Borg configuration files. Defaults to $borg_base_directory/.config/borg
#borg_config_directory: /path/to/base/config
# Path for Borg cache files. Defaults to $borg_base_directory/.cache/borg
#borg_cache_directory: /path/to/base/cache
# Path for Borg security and encryption nonce files. Defaults to $borg_base_directory/.config/borg/security
#borg_security_directory: /path/to/base/config/security
# Path for Borg encryption key files. Defaults to $borg_base_directory/.config/borg/keys
#borg_keys_directory: /path/to/base/config/keys
# Umask to be used for borg create. Defaults to 0077.
#umask: 0077
# Maximum seconds to wait for acquiring a repository/cache lock. Defaults to 1.
#lock_wait: 5
# Name of the archive. Borg placeholders can be used. See the output of
# "borg help placeholders" for details. Defaults to
# "{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}". If you specify this option, you must
# also specify a prefix in the retention section to avoid accidental pruning of
# archives with a different archive name format. And you should also specify a
# prefix in the consistency section as well.
#archive_name_format: '{hostname}-documents-{now}'
# Retention policy for how many backups to keep in each category. See
# https://borgbackup.readthedocs.org/en/stable/usage.html#borg-prune for details.
# At least one of the "keep" options is required for pruning to work. See
# https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/
# if you'd like to skip pruning entirely.
retention:
# Keep all archives within this time interval.
keep_within: 48H
# Number of secondly archives to keep.
#keep_secondly: 60
# Number of minutely archives to keep.
#keep_minutely: 60
# Number of hourly archives to keep.
#keep_hourly: 24
# Number of daily archives to keep.
keep_daily: 7
# Number of weekly archives to keep.
keep_weekly: 4
# Number of monthly archives to keep.
keep_monthly: 12
# Number of yearly archives to keep.
keep_yearly: 2
# When pruning, only consider archive names starting with this prefix.
# Borg placeholders can be used. See the output of "borg help placeholders" for
# details. Defaults to "{hostname}-".
#prefix: sourcehostname
# Consistency checks to run after backups. See
# https://borgbackup.readthedocs.org/en/stable/usage.html#borg-check and
# https://borgbackup.readthedocs.org/en/stable/usage.html#borg-extract for details.
#consistency:
# List of one or more consistency checks to run: "repository", "archives", and/or
# "extract". Defaults to "repository" and "archives". Set to "disabled" to disable
# all consistency checks. "repository" checks the consistency of the repository,
# "archive" checks all of the archives, and "extract" does an extraction dry-run
# of the most recent archive.
#checks:
# - repository
# - archives
# Paths to a subset of the repositories in the location section on which to run
# consistency checks. Handy in case some of your repositories are very large, and
# so running consistency checks on them would take too long. Defaults to running
# consistency checks on all repositories configured in the location section.
#check_repositories:
# - user@backupserver:sourcehostname.borg
# Restrict the number of checked archives to the last n. Applies only to the "archives" check. Defaults to checking all archives.
#check_last: 3
# When performing the "archives" check, only consider archive names starting with
# this prefix. Borg placeholders can be used. See the output of
# "borg help placeholders" for details. Defaults to "{hostname}-".
#prefix: sourcehostname
# Shell commands or scripts to execute before and after a backup or if an error has occurred.
# IMPORTANT: All provided commands and scripts are executed with user permissions of borgmatic.
# Do not forget to set secure permissions on this file as well as on any script listed (chmod 0700) to
# prevent potential shell injection or privilege escalation.
# hooks:
# List of one or more shell commands or scripts to execute before creating a backup.
# before_backup:
# - echo "Starting a backup job $(date)." | sendmatrix
# List of one or more shell commands or scripts to execute after creating a backup.
# after_backup:
# List of one or more shell commands or scripts to execute in case an exception has occurred.
# on_error:
# - echo "Error while creating a backup $(date). Repository {repository} failed with {error}, output was {output}" | mysendscript.sh
healthchecks: https://hc-ping.com/4c12883a-0770-4a4a-a90e-2b551074fc33
# Umask used when executing hooks. Defaults to the umask that borgmatic is run with.
#umask: 0077

View File

@ -0,0 +1,9 @@
[Unit]
Description=Docker Housekeeping
[Service]
Type=oneshot
Nice=19
IOSchedulingClass=2
IOSchedulingPriority=7
ExecStart=/usr/local/bin/docker-prune.sh

View File

@ -0,0 +1,10 @@
#!/bin/sh
# prune *all* images not currently used
docker image prune -af
# prune unused volumes (we keep all state host-mounts)
docker volume prune -f
# prune images, containers, networks etc. but keep potentially used one (no -a)
docker system prune -f
# update left-over images
docker images | grep -v "^REPO" | grep -v "^<none>" | sed 's/ \+/:/g' | cut -d: -f1,2 | xargs -L1 docker pull

View File

@ -0,0 +1,2 @@
[Timer]
OnCalendar=Mon 12:04

View File

@ -0,0 +1,55 @@
---
- name: Add docker repository key
ansible.builtin.apt_key:
id: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Configure apt docker repository
ansible.builtin.apt_repository:
repo: deb https://download.docker.com/linux/ubuntu focal stable
state: present
- name: Install docker tools and dependencies
ansible.builtin.apt:
name: "{{ packages }}"
state: present
vars:
packages:
- docker-ce
- docker-compose
- name: Create service directory for docker services
ansible.builtin.file: { path: "/opt/service", state: directory, mode: '0755' }
- name: Copy docker-prune script
copy: { src: docker-prune.sh, dest: /usr/local/bin, owner: root, group: root, mode: '0744' }
- name: Copy docker-prune systemd service
ansible.builtin.copy:
src: docker-prune.service
dest: /etc/systemd/system/
owner: root
group: root
mode: '0644'
# Causes weird "Attempted to remove disk file system, and we can't allow that." issue.
# This might be broken due to https://bugs.launchpad.net/ubuntu-manpage-repository/+bug/1817627
#validate: systemd-analyze verify %s
- name: Copy docker-prune systemd timer
ansible.builtin.copy:
src: docker-prune.timer
dest: /etc/systemd/system/
owner: root
group: root
mode: '0644'
# See previous task
#validate: systemd-analyze verify %s
- name: Activate docker-prune timer
ansible.builtin.systemd:
name: docker-prune.timer
state: started
enabled: yes
daemon_reload: yes

View File

@ -0,0 +1,8 @@
/srv/traefik/logs/access.log
/srv/traefik/logs/traefik.log
{
rotate 12
monthly
compress
missingok
}

View File

@ -0,0 +1,128 @@
---
# Traefik static config options
# Only loaded on startup!
global:
sendAnonymousUsage: false
#serversTransport:
# insecureSkipVerify: true
entryPoints:
web:
address: ":80"
http:
redirections:
entryPoint:
to: websecure
scheme: https
websecure:
address: ":443"
providers:
file:
filename: "/etc/traefik/traefik.yml"
docker:
watch: true
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
network: traefik
api:
dashboard: true
metrics:
prometheus: { }
ping: { }
log:
level: WARN
accessLog:
filePath: "/data/logs/access.log"
bufferingSize: 128
certificatesResolvers:
letsencrypt:
acme:
email: "changeme@chaospott.de"
caServer: "https://acme-v02.api.letsencrypt.org/directory"
storage: "/data/acme.json"
keyType: "EC384"
#httpChallenge:
# entryPoint: web
dnsChallenge:
provider: inwx # more available at: https://doc.traefik.io/traefik/https/acme/#providers
# Checked by traefik before issuing LE, need to be public DNS server!
# Quad9
resolvers: [ "9.9.9.9", "2620:fe::fe" ]
letsencrypt-staging: # this is for testing new services
acme:
email: "changeme@chaospott.de"
caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
storage: "/data/acme-staging.json"
keyType: "EC384"
#httpChallenge:
# entryPoint: web
dnsChallenge:
provider: inwx
# Checked by traefik before issuing LE, need to be public DNS server!
# Quad9
resolvers: [ "9.9.9.9", "2620:fe::fe" ]
###
# Traefik dynamic configuration options
# File is live-reloaded.
# Not all dynamic options can be set via labels. This is why some general, dynamic
# traefik options are configured here instead on labels.
# See also: https://github.com/traefik/traefik/issues/5507
tls:
options:
default:
sniStrict: true
# # Forced TLS v1.3 still causes issues like renovate failing to check our repos
# minVersion: "VersionTLS13"
# # TLS v1.2 Alternative config for more compatibility
minVersion: "VersionTLS12"
cipherSuites:
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
# global HTTP config
http:
routers:
api:
rule: "Host(`traefik.chaospott.de`)"
service: api@internal
middlewares: [ "dashboard-auth" ]
tls:
certResolver: "letsencrypt"
# generate cert for main domain and wildcard (requires DNS-01)
domains:
- main: "chaospott.de"
- main: "*.chaospott.de"
middlewares:
redirect-to-https:
redirectScheme:
scheme: "https"
hsts-header:
headers:
# HSTSPreload is an initiative that forces browsers to only access a website
# via HTTPS. This implies some requirements. https://hstspreload.org/
customResponseHeaders:
frameDeny: true # forbid embedding into frames
sslRedirect: true
stsSeconds: 3600 # Must be at least 31536000 (1-year) for HSTSPreload
stsPreload: true # HSTSPreload requirement
stsIncludeSubdomains: true # HSTSPreload requirement
browserXssFilter: true
dashboard-auth:
basicauth:
users: "admin:htpasswd-generated-password"

View File

@ -0,0 +1,14 @@
---
- name: "Stop {{ docker_compose.project_name }}"
community.general.docker_compose:
project_name: "{{ docker_compose.project_name }}"
project_src: "{{ docker_compose.path }}"
files: "{{ docker_compose.file }}"
state: absent
- name: "Start {{ docker_compose.project_name }}"
community.general.docker_compose:
project_name: "{{ docker_compose.project_name }}"
project_src: "{{ docker_compose.path }}"
files: "{{ docker_compose.file }}"

View File

@ -0,0 +1,26 @@
---
- name: Create service directory if needed
ansible.builtin.file: { path: "/opt/service/{{ docker_compose.project_name }}", state: directory, mode: '0700' }
- name: create traefik network
community.general.docker_network:
name: "traefik"
- name: Copy config file
ansible.builtin.copy: { src: "traefik.yml", dest: "{{ docker_compose.path }}/traefik.yml" }
notify: [ "Stop {{ docker_compose.project_name }}", "Start {{ docker_compose.project_name }}" ]
- name: Copy logrotate traefik config
ansible.builtin.copy: { src: "traefik", dest: "/etc/logrotate.d/" }
- name: Copy docker compose files
ansible.builtin.template:
src: "{{ docker_compose.file }}.j2"
dest: "{{ docker_compose.path }}/{{ docker_compose.file }}"
mode: "u=rw,go-rwx"
validate: docker-compose -f %s config
notify: [ "Stop {{ docker_compose.project_name }}", "Start {{ docker_compose.project_name }}" ]
- name: Flush handlers
ansible.builtin.meta: flush_handlers

View File

@ -0,0 +1,24 @@
---
version: '3'
services:
traefik:
image: traefik:v2.4.8
restart: unless-stopped
ports:
- "80:80" # HTTP
- "443:443" # HTTPS
environment:
- INWX_USERNAME={{ vault.traefik.inwx.username | mandatory }}
- INWX_PASSWORD={{ vault.traefik.inwx.password | mandatory }}
- INWX_POLLING_INTERVAL=60
- INWX_PROPAGATION_TIMEOUT=3600 # 1h to make sure DNS-01 works
volumes:
- /srv/{{ docker_compose.project_name }}/:/data/
- ./traefik.yml:/etc/traefik/traefik.yml
- /var/run/docker.sock:/var/run/docker.sock:ro
networks: [ traefik ]
networks:
traefik:
external: true

View File

@ -0,0 +1,4 @@
docker_compose:
path: "/opt/service/traefik"
file: "docker-compose.yml"
project_name: "traefik"

View File

@ -0,0 +1,2 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";

View File

@ -0,0 +1,131 @@
// Automatically upgrade packages from these (origin:archive) pairs
//
// Note that in Ubuntu security updates may pull in new dependencies
// from non-security sources (e.g. chromium). By allowing the release
// pocket these get automatically pulled in.
Unattended-Upgrade::Allowed-Origins {
"${distro_id}:${distro_codename}";
"${distro_id}:${distro_codename}-security";
// Extended Security Maintenance; doesn't necessarily exist for
// every release and this system may not have it installed, but if
// available, the policy for updates is such that unattended-upgrades
// should also install from here by default.
"${distro_id}ESMApps:${distro_codename}-apps-security";
"${distro_id}ESM:${distro_codename}-infra-security";
// "${distro_id}:${distro_codename}-updates";
// "${distro_id}:${distro_codename}-proposed";
// "${distro_id}:${distro_codename}-backports";
};
// Python regular expressions, matching packages to exclude from upgrading
Unattended-Upgrade::Package-Blacklist {
// The following matches all packages starting with linux-
// "linux-";
// Use $ to explicitely define the end of a package name. Without
// the $, "libc6" would match all of them.
// "libc6$";
// "libc6-dev$";
// "libc6-i686$";
// Special characters need escaping
// "libstdc\+\+6$";
// The following matches packages like xen-system-amd64, xen-utils-4.1,
// xenstore-utils and libxenstore3.0
// "(lib)?xen(store)?";
// For more information about Python regular expressions, see
// https://docs.python.org/3/howto/regex.html
};
// This option controls whether the development release of Ubuntu will be
// upgraded automatically. Valid values are "true", "false", and "auto".
Unattended-Upgrade::DevRelease "auto";
// This option allows you to control if on a unclean dpkg exit
// unattended-upgrades will automatically run
// dpkg --force-confold --configure -a
// The default is true, to ensure updates keep getting installed
//Unattended-Upgrade::AutoFixInterruptedDpkg "true";
// Split the upgrade into the smallest possible chunks so that
// they can be interrupted with SIGTERM. This makes the upgrade
// a bit slower but it has the benefit that shutdown while a upgrade
// is running is possible (with a small delay)
//Unattended-Upgrade::MinimalSteps "true";
// Install all updates when the machine is shutting down
// instead of doing it in the background while the machine is running.
// This will (obviously) make shutdown slower.
// Unattended-upgrades increases logind's InhibitDelayMaxSec to 30s.
// This allows more time for unattended-upgrades to shut down gracefully
// or even install a few packages in InstallOnShutdown mode, but is still a
// big step back from the 30 minutes allowed for InstallOnShutdown previously.
// Users enabling InstallOnShutdown mode are advised to increase
// InhibitDelayMaxSec even further, possibly to 30 minutes.
//Unattended-Upgrade::InstallOnShutdown "false";
// Send email to this address for problems or packages upgrades
// If empty or unset then no email is sent, make sure that you
// have a working mail setup on your system. A package that provides
// 'mailx' must be installed. E.g. "user@example.com"
Unattended-Upgrade::Mail "noc@chaospott.de";
// Set this value to one of:
// "always", "only-on-error" or "on-change"
// If this is not set, then any legacy MailOnlyOnError (boolean) value
// is used to chose between "only-on-error" and "on-change"
Unattended-Upgrade::MailReport "only-on-error";
// Remove unused automatically installed kernel-related packages
// (kernel images, kernel headers and kernel version locked tools).
Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
// Do automatic removal of newly unused dependencies after the upgrade
Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
// Do automatic removal of unused packages after the upgrade
// (equivalent to apt-get autoremove)
Unattended-Upgrade::Remove-Unused-Dependencies "true";
// Automatically reboot *WITHOUT CONFIRMATION* if
// the file /var/run/reboot-required is found after the upgrade
//Unattended-Upgrade::Automatic-Reboot "false";
// Automatically reboot even if there are users currently logged in
// when Unattended-Upgrade::Automatic-Reboot is set to true
//Unattended-Upgrade::Automatic-Reboot-WithUsers "true";
// If automatic reboot is enabled and needed, reboot at the specific
// time instead of immediately
// Default: "now"
//Unattended-Upgrade::Automatic-Reboot-Time "02:00";
// Use apt bandwidth limit feature, this example limits the download
// speed to 70kb/sec
//Acquire::http::Dl-Limit "70";
// Enable logging to syslog. Default is False
// Unattended-Upgrade::SyslogEnable "false";
// Specify syslog facility. Default is daemon
// Unattended-Upgrade::SyslogFacility "daemon";
// Download and install upgrades only on AC power
// (i.e. skip or gracefully stop updates on battery)
// Unattended-Upgrade::OnlyOnACPower "true";
// Download and install upgrades only on non-metered connection
// (i.e. skip or gracefully stop updates on a metered connection)
// Unattended-Upgrade::Skip-Updates-On-Metered-Connections "true";
// Verbose logging
// Unattended-Upgrade::Verbose "false";
// Print debugging information both in unattended-upgrades and
// in unattended-upgrade-shutdown
// Unattended-Upgrade::Debug "false";
// Allow package downgrade if Pin-Priority exceeds 1000
// Unattended-Upgrade::Allow-downgrade "false";

View File

@ -0,0 +1,19 @@
---
- name: Install unattended-upgrades package
ansible.builtin.apt: { name: "unattended-upgrades" }
- name: Copy apt auto-upgrades configuration
ansible.builtin.copy:
src: 20auto-upgrades
dest: /etc/apt/apt.conf.d/20auto-upgrades
owner: root
group: root
mode: 0644
- name: Copy unattended-upgrades configuration
ansible.builtin.copy:
src: 50unattended-upgrades
dest: /etc/apt/apt.conf.d/50unattended-upgrades
owner: root
group: root
mode: 0644

View File

@ -0,0 +1,12 @@
- name: "Stop {{ docker_compose.project_name }}"
community.general.docker_compose:
project_name: "{{ docker_compose.project_name }}"
project_src: "{{ docker_compose.path }}"
files: "{{ docker_compose.file }}"
state: absent
- name: "Start {{ docker_compose.project_name }}"
community.general.docker_compose:
project_name: "{{ docker_compose.project_name }}"
project_src: "{{ docker_compose.path }}"
files: "{{ docker_compose.file }}"

View File

@ -0,0 +1,11 @@
---
- name: Create service directory if needed
ansible.builtin.file: { path: "/opt/service/{{ docker_compose.project_name }}", state: directory, mode: '0700' }
- name: Copy docker compose files
ansible.builtin.template:
src: "{{ docker_compose.file }}.j2"
dest: "{{ docker_compose.path }}/{{ docker_compose.file }}"
mode: "u=rw,go-rwx"
validate: docker-compose -f %s config
notify: [ "Stop {{ docker_compose.project_name }}", "Start {{ docker_compose.project_name }}" ]

View File

@ -0,0 +1,12 @@
---
version: '3'
# watchtower updates container images with mutable tags.
services:
watchtower:
image: 'containrrr/watchtower:1.1.6'
# Use label `com.centurylinklabs.watchtower.enable="false"` to disable on container!
command: '--cleanup --schedule "0 0 4 * * *"' # UTC
restart: unless-stopped
volumes:
- '/var/run/docker.sock:/var/run/docker.sock'

View File

@ -0,0 +1,4 @@
docker_compose:
path: "/opt/service/watchtower"
file: "docker-compose.yml"
project_name: "watchtower"

View File

@ -0,0 +1,5 @@
- name: Start a-vpn
ansible.builtin.systemd:
name: wg-quick@a-vpn
state: restarted
enabled: yes

View File

@ -0,0 +1,31 @@
---
- name: Activate IP4 forwarding in kernel
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_file: /etc/sysctl.d/99-ip-forwarding.conf
state: present
reload: yes
- name: Activate IP6 forwarding in kernel
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: '1'
sysctl_file: /etc/sysctl.d/99-ip-forwarding.conf
state: present
reload: yes
- name: Provision wireguard tools
ansible.builtin.apt: { name: "wireguard-tools" }
- name: Template a-vpn configuration
ansible.builtin.template:
src: a-vpn.conf.j2
dest: /etc/wireguard/a-vpn.conf
mode: 0600
owner: root
group: root
notify: [ "Start a-vpn" ]
- name: Flush handlers
ansible.builtin.meta: flush_handlers

View File

@ -0,0 +1,10 @@
[Interface]
PrivateKey = {{ vault.wireguard.host.private_key }}
Address = 10.0.22.1
ListenPort = 51841
[Peer]
# User1 - Machine1
PublicKey = {{ vault.wireguard.user1.machine1.public_key }}
AllowedIPs = 10.0.22.11/32
PresharedKey = {{ vault.wireguard.user1.machine1.preshared_key }}

10
site.yml Normal file
View File

@ -0,0 +1,10 @@
---
- hosts: all
roles:
- { role: bootstrap, tags: bootstrap }
- { role: borg, tags: borg }
- { role: unattended-upgrades, tags: unattended-upgrades }
- { role: wireguard, tags: wireguard }
- { role: docker, tags: docker }
- { role: watchtower, tags: watchtower }
- { role: traefik, tags: traefik }

31
test/Vagrantfile vendored Normal file
View File

@ -0,0 +1,31 @@
# Allow setting cores and RAM via env-vars, or use defaults
VB_CPUS = ENV['VB_CPUS'] || 2
VB_RAM = ENV['VB_RAM'] || 2048
NAME = "host"
Vagrant.configure(2) do |config|
config.vm.box = "bento/ubuntu-20.04"
# Default: false
# config.vm.box_check_update = false
# config.vm.network "forwarded_port", guest: 80, host: 8080
# config.vm.synced_folder "../data", "/vagrant_data"
# Set a name in the log
config.vm.define NAME+"staging" do |host_staging|
end
config.vm.provider "virtualbox" do |vb|
vb.cpus = VB_CPUS
vb.memory = VB_RAM
#vb.gui = true
#vb.customize ["modifyvm", :id, "--clipboard-mode", "bidirectional"]
end
config.vm.provision "ansible" do |ansible|
ansible.compatibility_mode = "2.0"
ansible.config_file = "../ansible.cfg"
ansible.playbook = "../site.yml"
end
end

2
vault-password.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
gopass show noc/servers/ansible-vault-pw