Switch to Debian 10 and Ansible

This changes the site to run on Debian 10 instead of Ubuntu 16.04. It
also fully converts the previous Salt setup to use Ansible instead.

Most of this was a relatively straightforward conversion, and it should
be very close to equivalent. One notable difference is that I removed
the setup for the "monitoring" server, since I wasn't confident that the
way of setting up self-hosted Sentry and Grafana was working any more.
I'll look to re-add that at some point, but it's not urgent.
This commit is contained in:
Deimos
2021-06-25 00:08:58 -06:00
parent d91ecf910f
commit 4cc100ab02
158 changed files with 1495 additions and 2075 deletions

36
Vagrantfile vendored
View File

@@ -4,25 +4,39 @@
VAGRANT_CONFIG_VERSION = "2"
Vagrant.configure(VAGRANT_CONFIG_VERSION) do |config|
config.vm.box = "ubuntu/xenial64"
# Using the "contrib" version for vboxsf module for synced folders
config.vm.box = "debian/contrib-buster64"
# Main application folder
config.vm.synced_folder "tildes/", "/opt/tildes/"
# Mount the salt file root and pillar root
config.vm.synced_folder "salt/salt/", "/srv/salt/"
config.vm.synced_folder "salt/pillar/", "/srv/pillar/"
config.vm.synced_folder "ansible/", "/srv/ansible"
config.vm.network "forwarded_port", guest: 443, host: 4443
config.vm.network "forwarded_port", guest: 9090, host: 9090
# Masterless salt provisioning
config.vm.provision :salt do |salt|
salt.masterless = true
salt.minion_config = "salt/minion"
salt.run_highstate = true
salt.verbose = true
salt.log_level = "info"
config.vm.provision "ansible_local" do |ansible|
ansible.install_mode = "pip"
# Since Debian Buster still uses Python 2.7 by default and the pip bootstrap
# script is no longer compatible with 2.7, we need to specify the installation
# command manually. If we upgrade to a newer version of Debian that defaults to
# Python 3.6+, this should no longer be necessary.
ansible.pip_install_cmd = "sudo apt-get install -y python3-distutils && curl -s https://bootstrap.pypa.io/get-pip.py | sudo python3"
# Vagrant doesn't currently recognize the new format for Ansible versions
# (e.g. "ansible [core 2.11.1]"), so the compatibility mode is set incorrectly.
# A new version of Vagrant should resolve this soon.
ansible.compatibility_mode = "2.0"
# put the VM into the "dev" and "app_server" Ansible groups
ansible.groups = {
"dev" => ["default"],
"app_server" => ["default"],
}
ansible.galaxy_role_file = "ansible/requirements.yml"
ansible.playbook = "ansible/playbook.yml"
end
config.vm.provider "virtualbox" do |vb|

9
ansible/common.yml Normal file
View File

@@ -0,0 +1,9 @@
---
- hosts: all
become: true
vars_files:
- vars.yml
roles:
- common

View File

@@ -0,0 +1,33 @@
---
app_username: vagrant
pip_requirements_filename: requirements-dev.txt
ini_file: development.ini
gunicorn_args: --reload
# have to disable sendfile for vagrant due to a virtualbox bug
nginx_enable_sendfile: false
nginx_worker_processes: 1
nginx_enable_hsts: false
nginx_enable_csp: false
postgresql_tildes_databases:
- tildes
- tildes_test
postgresql_tildes_user_flags: "SUPERUSER"
tildes_database_insert_dev_data: true
hsts_max_age: 60
site_hostname: localhost
ssl_cert_dir: /etc/pki/tls/certs
ssl_cert_path: "{{ ssl_cert_dir }}/localhost.crt"
ssl_private_key_path: "{{ ssl_cert_dir }}/localhost.key"
ansible_python_interpreter: /usr/bin/python3
# Workaround for some Ansible permissions issues when becoming an unprivileged user
# (this has some risks, but should be fine for our use)
ansible_shell_allow_world_readable_temp: true

View File

@@ -0,0 +1,52 @@
---
app_username: tildes
postgresql_settings:
checkpoint_completion_target: 0.7
default_statistics_target: 100
effective_cache_size: 24GB
effective_io_concurrency: 200
maintenance_work_mem: 2GB
max_parallel_workers: 8
max_parallel_workers_per_gather: 4
max_wal_size: 2GB
max_worker_processes: 8
min_wal_size: 1GB
random_page_cost: 1.1
shared_buffers: 8GB
wal_buffers: 16MB
work_mem: 10485kB
ini_file: production.ini
gunicorn_args: --workers 8
site_hostname: tildes.net
# add extra prod-only consumer services (e.g. ones that use external APIs)
consumers:
- comment_user_mentions_generator
- post_processing_script_runner
- site_icon_downloader
- topic_embedly_extractor
- topic_interesting_activity_updater
- topic_metadata_generator
- topic_youtube_scraper
ipv6_device: eno1
ipv6_address: "2607:5300:0203:2e7a::"
ipv6_gateway: "2607:5300:0203:2eff:ff:ff:ff:ff"
prometheus_ips:
- "2607:5300:201:3100::6e77"
prometheus_consumer_scrape_targets:
comment_user_mentions_generator: 25010
post_processing_script_runner: 25016
site_icon_downloader: 25011
topic_embedly_extractor: 25012
topic_interesting_activity_updater: 25013
topic_metadata_generator: 25014
topic_youtube_scraper: 25015
ansible_python_interpreter: /usr/bin/python3
# Workaround for some Ansible permissions issues when becoming an unprivileged user
# (this has some risks, but should be fine for our use)
ansible_shell_allow_world_readable_temp: true

54
ansible/playbook.yml Normal file
View File

@@ -0,0 +1,54 @@
---
- hosts: all
become: true
vars_files:
- vars.yml
roles:
- common
- hosts: app_server
become: true
vars_files:
- vars.yml
roles:
- cmark-gfm
- pts_lbsearch
- python
- gunicorn
- nginx
- nginx_site_config
- postgresql
- postgresql_plpython
- postgresql_tildes_dbs
- pgbouncer
- redis
- redis_module_cell
- postgresql_redis_bridge
- boussole
- webassets
- scripts
- prometheus_node_exporter
- prometheus_postgres_exporter
- prometheus_redis_exporter
- consumers
- cronjobs
- wiki_repo
- hosts: dev
become: true
vars_files:
- vars.yml
roles:
- self_signed_ssl_cert
- prometheus
- java
- nodejs
- development
- hosts: prod
become: true
vars_files:
- vars.yml
roles:
- nginx_prod_config
- ipv6_networking

3
ansible/requirements.yml Normal file
View File

@@ -0,0 +1,3 @@
---
collections:
- community.general

View File

@@ -0,0 +1,2 @@
---
boussole_venv_dir: /opt/venvs/boussole

View File

@@ -0,0 +1,44 @@
---
- name: Create venv and install boussole with pip
pip:
virtualenv: "{{ boussole_venv_dir }}"
virtualenv_command: python{{ python_version }} -m venv
name: boussole
- name: Create systemd service file
template:
src: boussole.service.jinja2
dest: /etc/systemd/system/boussole.service
owner: root
group: root
mode: 0644
- name: Start and enable boussole service
service:
name: boussole
state: started
enabled: true
- name: Create directory for compiled CSS
file:
path: "{{ app_dir }}/static/css"
state: directory
owner: "{{ app_username }}"
group: "{{ app_username }}"
mode: 0755
- name: Check if any compiled CSS files exist
find:
path: "{{ app_dir }}/static/css"
patterns:
- "*.css"
register: compiled_css_files
- name: Run boussole manually if no compiled CSS exists yet
command:
chdir: "{{ app_dir }}"
cmd: "{{ boussole_venv_dir }}/bin/boussole compile --backend=yaml --config=boussole.yaml"
environment:
LC_ALL: C.UTF-8
LANG: C.UTF-8
when: compiled_css_files.matched == 0

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir, app_username -%}
[Unit]
Description=Boussole - auto-compile SCSS files on change
@@ -7,7 +6,7 @@ User={{ app_username }}
Group={{ app_username }}
WorkingDirectory={{ app_dir }}
Environment="LC_ALL=C.UTF-8" "LANG=C.UTF-8"
ExecStart=/opt/venvs/boussole/bin/boussole watch --backend=yaml --config=boussole.yaml --poll
ExecStart={{ boussole_venv_dir }}/bin/boussole watch --backend=yaml --config=boussole.yaml --poll
Restart=always
RestartSec=5

View File

@@ -0,0 +1,40 @@
---
- name: Check if cmark-gfm is installed
stat:
path: /usr/local/lib/libcmark-gfm.so
register: cmark_gfm_library
- name: Download and install cmark-gfm
when: not cmark_gfm_library.stat.exists
block:
- name: Download cmark-gfm from GitHub
get_url:
dest: /tmp/cmark-gfm.tar.gz
url: https://github.com/github/cmark-gfm/archive/0.29.0.gfm.0.tar.gz
checksum: sha256:6a94aeaa59a583fadcbf28de81dea8641b3f56d935dda5b2447a3c8df6c95fea
- name: Create temp directory to extract cmark-gfm to
file:
path: /tmp/cmark-gfm
state: directory
- name: Extract cmark-gfm
unarchive:
remote_src: true
src: /tmp/cmark-gfm.tar.gz
dest: /tmp/cmark-gfm
extra_opts:
- --strip-components=1
- name: Install build dependencies for cmark-gfm
apt:
name:
- build-essential
- cmake
- name: Install cmark-gfm
shell:
chdir: /tmp/cmark-gfm
cmd: |
make
make install

View File

@@ -0,0 +1,13 @@
---
- name: Set time zone to UTC
community.general.timezone:
name: Etc/UTC
- name: Create group for app user
group:
name: "{{ app_username }}"
- name: Create app user
user:
name: "{{ app_username }}"
group: "{{ app_username }}"

View File

@@ -0,0 +1,6 @@
---
consumers:
- comment_user_mentions_generator
- post_processing_script_runner
- topic_interesting_activity_updater
- topic_metadata_generator

View File

@@ -0,0 +1,4 @@
---
dependencies:
- role: python
- role: redis

View File

@@ -0,0 +1,16 @@
---
- name: Set up service files for background consumers
template:
src: "consumer.service.jinja2"
dest: /etc/systemd/system/consumer-{{ item }}.service
owner: root
group: root
mode: 0644
loop: "{{ consumers }}"
- name: Start and enable all consumer services
service:
name: consumer-{{ item }}
state: started
enabled: true
loop: "{{ consumers }}"

View File

@@ -1,6 +1,5 @@
{% from 'common.jinja2' import app_dir, app_username, bin_dir -%}
[Unit]
Description=Topic Youtube Scraper (Queue Consumer)
Description={{ item.replace("_", " ").title() }} (Queue Consumer)
Requires=redis.service
After=redis.service
PartOf=redis.service
@@ -9,8 +8,8 @@ PartOf=redis.service
User={{ app_username }}
Group={{ app_username }}
WorkingDirectory={{ app_dir }}/consumers
Environment="INI_FILE={{ app_dir }}/{{ pillar['ini_file'] }}"
ExecStart={{ bin_dir }}/python topic_youtube_scraper.py
Environment="INI_FILE={{ app_dir }}/{{ ini_file }}"
ExecStart={{ bin_dir }}/python {{ item }}.py
Restart=always
RestartSec=5

View File

@@ -0,0 +1,53 @@
---
- name: Add cronjob for lifting expired temporary bans
cron:
name: lift_expired_temporary_bans
job: "{{ bin_dir }}/python -c \"from scripts.lift_expired_temporary_bans import lift_expired_temporary_bans; lift_expired_temporary_bans('{{ app_dir }}/{{ ini_file }}')\""
user: "{{ app_username }}"
hour: "*"
minute: 1
- name: Add cronjob for closing voting on old posts
cron:
name: close_voting_on_old_posts
job: "{{ bin_dir }}/python -c \"from scripts.close_voting_on_old_posts import close_voting_on_old_posts; close_voting_on_old_posts('{{ app_dir }}/{{ ini_file }}')\""
user: "{{ app_username }}"
hour: "*"
minute: 3
- name: Add cronjob for cleaning up private data
cron:
name: clean_private_data
job: "{{ bin_dir }}/python -c \"from scripts.clean_private_data import clean_all_data; clean_all_data('{{ app_dir }}/{{ ini_file }}')\""
user: "{{ app_username }}"
hour: 4
minute: 10
- name: Add cronjob for generating yesterday's group stats
cron:
name: generate_group_stats_for_yesterday
job: "{{ bin_dir }}/python -c \"from scripts.generate_group_stats_for_yesterday import generate_stats; generate_stats('{{ app_dir }}/{{ ini_file }}')\""
user: "{{ app_username }}"
hour: 0
minute: 10
- name: Add cronjob for generating site-icons CSS file
cron:
name: generate_site_icons_css
job: "{{ bin_dir }}/python -c \"from scripts.generate_site_icons_css import generate_css; generate_css()\""
user: "{{ app_username }}"
minute: "*/5"
- name: Add cronjob for posting scheduled topics
cron:
name: post_scheduled_topics
job: "{{ bin_dir }}/python -c \"from scripts.post_scheduled_topics import post_scheduled_topics; post_scheduled_topics('{{ app_dir }}/{{ ini_file }}')\""
user: "{{ app_username }}"
- name: Add cronjob for updating all groups' lists of common topic tags
cron:
name: update_groups_common_topic_tags
job: "{{ bin_dir }}/python -c \"from scripts.update_groups_common_topic_tags import update_common_topic_tags; update_common_topic_tags('{{ app_dir }}/{{ ini_file }}')\""
user: "{{ app_username }}"
hour: "*"
minute: 0

View File

@@ -0,0 +1,2 @@
c.InteractiveShellApp.extensions = ['autoreload']
c.InteractiveShellApp.exec_lines = ['%autoreload 2']

View File

@@ -0,0 +1,26 @@
---
- name: Create IPython profile
become_user: "{{ app_username }}"
command:
cmd: "{{ bin_dir }}/ipython profile create"
creates: /home/{{ app_username }}/.ipython/profile_default
- name: Create IPython config file
copy:
src: ipython_config.py
dest: /home/{{ app_username }}/.ipython/profile_default/ipython_config.py
owner: "{{ app_username }}"
group: "{{ app_username }}"
mode: 0744
- name: Automatically activate venv on login and in new shells
lineinfile:
path: /home/{{ app_username }}/.bashrc
line: source activate
owner: "{{ app_username }}"
group: "{{ app_username }}"
- name: Add invoke's tab-completion script to support completing invoke task names
lineinfile:
path: /home/{{ app_username }}/.bashrc
line: source <(invoke --print-completion-script bash)

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: python

View File

@@ -0,0 +1,54 @@
---
- name: Create gunicorn service file
template:
src: gunicorn.service.jinja2
dest: /etc/systemd/system/gunicorn.service
owner: root
group: root
mode: 0644
- name: Create gunicorn socket file
template:
src: gunicorn.socket.jinja2
dest: /etc/systemd/system/gunicorn.socket
owner: root
group: root
mode: 0644
- name: Create gunicorn tmpfiles.d configuration file
template:
src: gunicorn.conf.jinja2
dest: /usr/lib/tmpfiles.d/gunicorn.conf
owner: root
group: root
mode: 0644
- name: Start and enable gunicorn.socket service
service:
name: gunicorn.socket
state: started
enabled: true
# Set up the gunicorn_reloader service, which reloads gunicorn whenever certain files
# are changed (such as static files, to update the cache-busting strings)
- name: Create gunicorn_reloader service file
copy:
src: gunicorn_reloader.service
dest: /etc/systemd/system/gunicorn_reloader.service
owner: root
group: root
mode: 0644
- name: Create gunicorn_reloader path-monitoring file
copy:
src: gunicorn_reloader.path
dest: /etc/systemd/system/gunicorn_reloader.path
owner: root
group: root
mode: 0644
- name: Start and enable gunicorn_reloader path-monitoring service
service:
name: gunicorn_reloader.path
state: started
enabled: true

View File

@@ -1,2 +1 @@
{% from 'common.jinja2' import app_username %}
d /run/gunicorn 0755 {{ app_username }} {{ app_username }} -

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir, app_username, bin_dir -%}
[Unit]
Description=gunicorn daemon
Requires=gunicorn.socket
@@ -10,7 +9,7 @@ User={{ app_username }}
Group={{ app_username }}
RuntimeDirectory=gunicorn
WorkingDirectory={{ app_dir }}
ExecStart={{ bin_dir }}/gunicorn --paste {{ pillar['ini_file'] }} --config {{ app_dir }}/gunicorn_config.py --bind unix:/run/gunicorn/socket --pid /run/gunicorn/pid {{ pillar['gunicorn_args'] }}
ExecStart={{ bin_dir }}/gunicorn --paste {{ ini_file }} --config {{ app_dir }}/gunicorn_config.py --bind unix:/run/gunicorn/socket --pid /run/gunicorn/pid {{ gunicorn_args }}
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/bin/kill -s TERM $MAINPID
PrivateTmp=true

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_username -%}
[Unit]
Description=gunicorn socket
PartOf=gunicorn.service

View File

@@ -0,0 +1,2 @@
---
ipv6_device: eth0

View File

@@ -0,0 +1,23 @@
---
- name: Enable IPv6 networking
blockinfile:
path: /etc/network/interfaces
block: |
iface {{ ipv6_device }} inet6 static
address {{ ipv6_address }}
netmask 64
post-up sleep 5; /sbin/ip -family inet6 route add {{ ipv6_gateway }} dev {{ ipv6_device }}
post-up sleep 5; /sbin/ip -family inet6 route add default via {{ ipv6_gateway }}
pre-down /sbin/ip -family inet6 route del default via {{ ipv6_gateway }}
pre-down /sbin/ip -family inet6 route del {{ ipv6_gateway }} dev {{ ipv6_device }}
# apt seems to hang a lot when using IPv6
- name: Force apt not to use IPv6
lineinfile:
path: /etc/apt/apt.conf.d/99force-ipv4
line: Acquire::ForceIPv4 "true";
create: true
owner: root
group: root
mode: 0644

View File

@@ -0,0 +1,4 @@
---
- name: Install OpenJDK Java runtime
apt:
name: openjdk-11-jre

View File

@@ -0,0 +1,6 @@
---
nginx_enable_sendfile: true
nginx_worker_processes: auto
ssl_cert_path: /etc/letsencrypt/live/{{ site_hostname }}/fullchain.pem
ssl_private_key_path: /etc/letsencrypt/live/{{ site_hostname }}/privkey.pem

View File

@@ -1,3 +1,4 @@
# rotate nginx log files daily and delete after 30 days
/var/log/nginx/*.log {
daily
missingok

View File

@@ -0,0 +1,5 @@
---
- name: Reload nginx
service:
name: nginx
state: reloaded

View File

@@ -0,0 +1,52 @@
---
- name: Add APT key for nginx repository
apt_key:
url: https://nginx.org/keys/nginx_signing.key
- name: Add nginx APT repository
apt_repository:
repo: deb http://nginx.org/packages/debian/ buster nginx
- name: Install nginx
apt:
name: nginx
- name: Start and enable nginx service
service:
name: nginx
state: started
enabled: true
- name: Create nginx.conf file
template:
src: nginx.conf.jinja2
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: 0644
notify:
- Reload nginx
- name: Create sites-available directory
file:
path: /etc/nginx/sites-available
state: directory
owner: root
group: root
mode: 0755
- name: Create sites-enabled directory
file:
path: /etc/nginx/sites-enabled
state: directory
owner: root
group: root
mode: 0744
- name: Add logrotate config
copy:
src: logrotate
dest: /etc/logrotate.d/nginx
owner: root
group: root
mode: 0644

View File

@@ -1,5 +1,5 @@
user nginx;
worker_processes {{ pillar['nginx_worker_processes'] }};
worker_processes {{ nginx_worker_processes }};
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
@@ -22,11 +22,10 @@ http {
access_log /var/log/nginx/access.log main;
{% if grains['id'] == 'dev' %}
# have to disable sendfile for vagrant due to a virtualbox bug
sendfile off;
{% else %}
{% if nginx_enable_sendfile %}
sendfile on;
{% else %}
sendfile off;
{% endif %}
# define a rate-limiting zone to use, and return HTTP 429 if exceeded
@@ -77,8 +76,8 @@ http {
text/x-component
text/x-cross-domain-policy;
ssl_certificate {{ pillar['ssl_cert_path'] }};
ssl_certificate_key {{ pillar['ssl_private_key_path'] }};
ssl_certificate {{ ssl_cert_path }};
ssl_certificate_key {{ ssl_private_key_path }};
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;

View File

@@ -0,0 +1,3 @@
---
hsts_max_age: 63072000
static_sites_dir: /opt/tildes-static-sites

View File

@@ -0,0 +1,4 @@
---
dependencies:
- role: nginx
- role: gunicorn

View File

@@ -0,0 +1,38 @@
---
- name: Add shortener config file
template:
src: tildes-shortener.conf.jinja2
dest: /etc/nginx/sites-available/tildes-shortener.conf
owner: root
group: root
mode: 0644
- name: Enable shortener in nginx
file:
path: /etc/nginx/sites-enabled/tildes-shortener.conf
src: /etc/nginx/sites-available/tildes-shortener.conf
state: link
owner: root
group: root
mode: 0644
notify:
- Reload nginx
- name: Add static sites config file
template:
src: tildes-static-sites.conf.jinja2
dest: /etc/nginx/sites-available/tildes-static-sites.conf
owner: root
group: root
mode: 0644
- name: Enable static sites in nginx
file:
path: /etc/nginx/sites-enabled/tildes-static-sites.conf
src: /etc/nginx/sites-available/tildes-static-sites.conf
state: link
owner: root
group: root
mode: 0644
notify:
- Reload nginx

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir -%}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
@@ -7,7 +6,7 @@ server {
keepalive_timeout 5;
add_header Strict-Transport-Security "max-age={{ pillar['hsts_max_age'] }}; includeSubDomains; preload" always;
add_header Strict-Transport-Security "max-age={{ hsts_max_age }}; includeSubDomains; preload" always;
# Are these security headers unnecessary when we're just redirecting?
add_header X-Content-Type-Options "nosniff" always;

View File

@@ -1,11 +1,9 @@
{% from 'common.jinja2' import static_sites_dir -%}
{% for subdomain in ('blog', 'docs') %}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
add_header Strict-Transport-Security "max-age={{ pillar['hsts_max_age'] }}; includeSubDomains; preload" always;
add_header Strict-Transport-Security "max-age={{ hsts_max_age }}; includeSubDomains; preload" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;

View File

@@ -0,0 +1,11 @@
---
nginx_enable_hsts: true
nginx_enable_csp: true
nginx_enable_ratelimiting: true
nginx_redirect_www: true
prometheus_ips:
- 127.0.0.1
- ::1
hsts_max_age: 63072000

View File

@@ -0,0 +1,4 @@
---
dependencies:
- role: nginx
- role: gunicorn

View File

@@ -0,0 +1,21 @@
---
- name: Add site config file
template:
src: tildes.conf.jinja2
dest: /etc/nginx/sites-available/tildes.conf
owner: root
group: root
mode: 0644
notify:
- Reload nginx
- name: Enable site in nginx
file:
path: /etc/nginx/sites-enabled/tildes.conf
src: /etc/nginx/sites-available/tildes.conf
state: link
owner: root
group: root
mode: 0644
notify:
- Reload nginx

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir -%}
upstream app_server {
server unix:/run/gunicorn/socket fail_timeout=0;
}
@@ -35,9 +34,11 @@ server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
{% if grains['id'] != 'dev' %}
add_header Strict-Transport-Security "max-age={{ pillar['hsts_max_age'] }}; includeSubDomains; preload" always;
{% if nginx_enable_hsts %}
add_header Strict-Transport-Security "max-age={{ hsts_max_age }}; includeSubDomains; preload" always;
{% endif %}
{% if nginx_enable_csp %}
# Content Security Policy:
# - "img-src data:" is needed for Spectre.css icons
# - "https://js.stripe.com" in script-src and frame-src is needed for Stripe
@@ -49,7 +50,7 @@ server {
add_header X-Xss-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
server_name {{ pillar['site_hostname'] }};
server_name {{ site_hostname }};
keepalive_timeout 5;
@@ -57,9 +58,9 @@ server {
# Block access to /metrics except from Prometheus server(s)
location /metrics {
{% for ip in pillar['prometheus_ips'] %}
{% for ip in prometheus_ips -%}
allow {{ ip }};
{% endfor %}
{% endfor -%}
deny all;
# try_files is unnecessary here, but I don't know the "proper" way
@@ -76,7 +77,7 @@ server {
}
location @proxy_to_app {
{% if grains["id"] == "prod" %}
{% if nginx_enable_ratelimiting %}
# apply rate-limiting, allowing a burst above the limit
limit_req zone=tildes_app burst=5 nodelay;
{% endif -%}
@@ -96,14 +97,14 @@ server {
}
}
{% if grains["id"] == "prod" %}
{% if nginx_redirect_www %}
# redirect www. to base domain
server {
listen 80;
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name www.tildes.net;
return 301 https://tildes.net$request_uri;
server_name www.{{ site_hostname }};
return 301 https://{{ site_hostname }}$request_uri;
}
{% endif %}

View File

@@ -0,0 +1,20 @@
---
- name: Add APT key for NodeSource Node.js repository
apt_key:
url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key
- name: Add NodeSource Node.js APT repository
apt_repository:
repo: deb https://deb.nodesource.com/node_14.x buster main
- name: Install Node.js
apt:
name: nodejs
- name: Install npm packages defined in package.json
become_user: "{{ app_username }}"
community.general.npm:
path: "{{ app_dir }}"
# --no-bin-links option is needed to prevent npm from creating symlinks in the .bin
# directory, which doesn't work inside Vagrant on Windows
no_bin_links: true

View File

@@ -0,0 +1,5 @@
---
- name: Reload pgbouncer
service:
name: pgbouncer
state: reloaded

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: postgresql

View File

@@ -0,0 +1,31 @@
---
- name: Install pgbouncer
apt:
name: pgbouncer
- name: Add pgbouncer.ini
template:
src: pgbouncer.ini.jinja2
dest: /etc/pgbouncer/pgbouncer.ini
owner: postgres
group: postgres
mode: 0640
notify:
- Reload pgbouncer
- name: Add user to pgbouncer userlist
lineinfile:
path: /etc/pgbouncer/userlist.txt
line: '"tildes" ""'
create: true
owner: postgres
group: postgres
mode: 0640
notify:
- Reload pgbouncer
- name: Start and enable pgbouncer service
service:
name: pgbouncer
state: started
enabled: true

View File

@@ -1,7 +1,4 @@
[databases]
{% for line in accumulator['pgbouncer_db_lines'] -%}
{{ line }}
{% endfor %}
[pgbouncer]
logfile = /var/log/postgresql/pgbouncer.log
@@ -16,6 +13,6 @@ unix_socket_dir = /var/run/postgresql
auth_type = hba
auth_file = /etc/pgbouncer/userlist.txt
auth_hba_file = /etc/postgresql/{{ pillar['postgresql_version'] }}/main/pg_hba.conf
auth_hba_file = /etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf
pool_mode = transaction

View File

@@ -0,0 +1,12 @@
---
postgresql_version: 12
# Users of this role can define postgresql_settings, which will be merged with
# this base _postgresql_settings
_postgresql_settings:
lock_timeout: 5000
statement_timeout: 5000
idle_in_transaction_session_timeout: 600000
timezone: "'UTC'"
shared_preload_libraries: "'pg_stat_statements'"
postgresql_settings: {}

View File

@@ -0,0 +1,10 @@
---
- name: Restart postgresql
service:
name: postgresql
state: restarted
- name: Reload postgresql
service:
name: postgresql
state: reloaded

View File

@@ -0,0 +1,27 @@
---
- name: Add APT key for PostgreSQL repository
apt_key:
url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
- name: Add PostgreSQL APT repository
apt_repository:
repo: deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main
- name: Install PostgreSQL
apt:
name: postgresql-{{ postgresql_version }}
- name: Start and enable PostgreSQL service
service:
name: postgresql
state: started
enabled: true
- name: Set configuration options in postgresql.conf
lineinfile:
path: /etc/postgresql/{{ postgresql_version }}/main/postgresql.conf
regexp: "^#?{{ item.key }} ?="
line: "{{ item.key }} = {{ item.value }}"
loop: "{{ _postgresql_settings | combine(postgresql_settings) | dict2items }}"
notify:
- Restart postgresql

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: postgresql

View File

@@ -0,0 +1,12 @@
---
- name: Install PL/Python3 procedural language for PostgreSQL
apt:
name: postgresql-plpython3-{{ postgresql_version }}
- name: Set PYTHONPATH env var for PostgreSQL
lineinfile:
path: /etc/postgresql/{{ postgresql_version }}/main/environment
regexp: "^PYTHONPATH="
line: "PYTHONPATH='{{ venv_dir }}/lib/python{{ python_version }}/site-packages:{{ app_dir }}'"
notify:
- Restart postgresql

View File

@@ -0,0 +1,4 @@
---
dependencies:
- role: postgresql
- role: redis

View File

@@ -0,0 +1,13 @@
- name: Create postgresql_redis_bridge service file
template:
src: postgresql_redis_bridge.service.jinja2
dest: /etc/systemd/system/postgresql_redis_bridge.service
owner: root
group: root
mode: 0644
- name: Start and enable postgresql_redis_bridge service
service:
name: postgresql_redis_bridge
state: started
enabled: true

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir, app_username, bin_dir -%}
[Unit]
Description=postgresql_redis_bridge - convert NOTIFY to Redis streams
Requires=redis.service
@@ -9,7 +8,7 @@ PartOf=redis.service
User={{ app_username }}
Group={{ app_username }}
WorkingDirectory={{ app_dir }}/scripts
Environment="INI_FILE={{ app_dir }}/{{ pillar['ini_file'] }}"
Environment="INI_FILE={{ app_dir }}/{{ ini_file }}"
ExecStart={{ bin_dir }}/python postgresql_redis_bridge.py
Restart=always
RestartSec=5

View File

@@ -0,0 +1,6 @@
---
postgresql_tildes_databases:
- tildes
postgresql_tildes_user_flags: ""
tildes_database_insert_dev_data: false

View File

@@ -0,0 +1,8 @@
---
dependencies:
- role: postgresql
- role: pgbouncer
# needed to be able to run the db init scripts
- role: python
- role: cmark-gfm

View File

@@ -0,0 +1,38 @@
---
- name: Create database and enable access and all necessary extensions
become_user: postgres
block:
- name: Create database
community.postgresql.postgresql_db:
name: "{{ item }}"
owner: tildes
- name: Enable extensions
community.postgresql.postgresql_ext:
name: "{{ extension }}"
db: "{{ item }}"
loop: "{{ extensions }}"
# since the "outer" loop by include_tasks is already using the `item` variable,
# this will use `extension` for the loop inside here
loop_control:
loop_var: extension
- name: Add database to pg_hba.conf
community.postgresql.postgresql_pg_hba:
dest: /etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf
databases: "{{ item }}"
users: tildes
contype: local
method: trust
notify:
- Reload postgresql
- name: Add database to pgbouncer.ini
become_user: root
lineinfile:
path: /etc/pgbouncer/pgbouncer.ini
line: "{{ item }} ="
insertafter: "^\\[databases\\]$"
firstmatch: true
notify:
- Reload pgbouncer

View File

@@ -0,0 +1,62 @@
---
- name: Install requirements for building psycopg2 (needed by Ansible)
apt:
name:
- gcc
- libpq-dev
- python3-dev
- name: Install packages needed by Ansible community plugins
pip:
executable: pip3
name:
- ipaddress
- psycopg2
- name: Create tildes user
become_user: postgres
community.postgresql.postgresql_user:
name: tildes
role_attr_flags: "{{ postgresql_tildes_user_flags }}"
# This is a bit of a hack to effectively enable looping over a block of tasks
- name: Set up site database (and test database in dev version)
include_tasks: database.yml
loop: "{{ postgresql_tildes_databases }}"
vars:
extensions:
- citext
- ltree
- intarray
- pg_stat_statements
- pg_trgm
- plpython3u
register: database_changes
# Since handlers don't run until the end of the entire playbook, we need to run them
# manually at this point in case postgresql or pgbouncer need to be reloaded
- name: Trigger handlers to run manually for postgresql/pgbouncer updates
meta: flush_handlers
- name: Check if the database has already been initialized (will fail if not)
become_user: postgres
community.postgresql.postgresql_query:
db: tildes
query: select user_id from users;
ignore_errors: true
register: users_query
- name: Initialize the database
become_user: postgres
command:
cmd: "{{ bin_dir }}/python -c \"from scripts.initialize_db import initialize_db; initialize_db('{{ app_dir }}/{{ ini_file }}')\""
chdir: "{{ app_dir }}"
when: users_query is failed
register: initialize_db
- name: Insert dev data into database
become_user: "{{ app_username }}"
command:
cmd: "{{ bin_dir }}/python -c \"from scripts.initialize_db import insert_dev_data; insert_dev_data('{{ app_dir }}/{{ ini_file }}')\""
chdir: "{{ app_dir }}"
when: tildes_database_insert_dev_data and initialize_db is changed

View File

@@ -0,0 +1,8 @@
---
prometheus_version: 2.0.0
prometheus_consumer_scrape_targets:
comment_user_mentions_generator: 25010
post_processing_script_runner: 25016
topic_interesting_activity_updater: 25013
topic_metadata_generator: 25014

View File

@@ -0,0 +1,5 @@
---
- name: Restart prometheus
service:
name: prometheus
state: restarted

View File

@@ -0,0 +1,59 @@
---
- name: Create prometheus user and group
import_tasks: prometheus_user.yml
- name: Check if Prometheus is installed
stat:
path: /opt/prometheus/prometheus
register: prometheus_binary
- name: Download and install Prometheus
when: not prometheus_binary.stat.exists
block:
- name: Download Prometheus from GitHub
get_url:
dest: /tmp/prometheus-{{ prometheus_version }}.tar.gz
url: https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz
checksum: sha256:e12917b25b32980daee0e9cf879d9ec197e2893924bd1574604eb0f550034d46
- name: Create Prometheus directory
file:
path: /opt/prometheus
state: directory
owner: prometheus
group: prometheus
mode: 0755
- name: Extract Prometheus
unarchive:
remote_src: true
src: /tmp/prometheus-{{ prometheus_version }}.tar.gz
dest: /opt/prometheus
owner: prometheus
group: prometheus
extra_opts:
- --strip-components=1
- name: Create Prometheus service file
copy:
src: prometheus.service
dest: /etc/systemd/system/prometheus.service
owner: root
group: root
mode: 0644
- name: Add Prometheus config file
template:
src: prometheus.yml.jinja2
dest: /opt/prometheus/prometheus.yml
owner: prometheus
group: prometheus
mode: 0644
notify:
- Restart prometheus
- name: Start and enable prometheus service
service:
name: prometheus
state: started
enabled: true

View File

@@ -5,21 +5,21 @@ global:
scrape_configs:
- job_name: "node"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:9100']
- targets: ['{{ site_hostname }}:9100']
- job_name: "redis"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:9121']
- targets: ['{{ site_hostname }}:9121']
- job_name: "postgres"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:9187']
- targets: ['{{ site_hostname }}:9187']
- job_name: "tildes"
scheme: https
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:443']
{% if grains['id'] == 'dev' %}
- targets: ['{{ site_hostname }}:443']
{% if site_hostname == "localhost" -%}
tls_config:
insecure_skip_verify: true
{% endif %}
@@ -31,7 +31,7 @@ scrape_configs:
module: [site_ipv4]
static_configs:
- targets:
- https://{{ pillar['site_hostname'] }}
- https://{{ site_hostname }}
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
@@ -47,7 +47,7 @@ scrape_configs:
module: [site_ipv6]
static_configs:
- targets:
- https://{{ pillar['site_hostname'] }}
- https://{{ site_hostname }}
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
@@ -57,28 +57,8 @@ scrape_configs:
replacement: 127.0.0.1:9115 # The blackbox exporter's real hostname:port
# event stream consumers (background jobs)
- job_name: "consumer_comment_user_mentions_generator"
{% for name, port in prometheus_consumer_scrape_targets.items() -%}
- job_name: "consumer_{{ name }}"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:25010']
- job_name: "consumer_topic_interesting_activity_updater"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:25013']
- job_name: "consumer_topic_metadata_generator"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:25014']
{% if grains["id"] != "dev" %}
- job_name: "consumer_site_icon_downloader"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:25011']
- job_name: "consumer_topic_embedly_extractor"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:25012']
- job_name: "consumer_topic_youtube_scraper"
static_configs:
- targets: ['{{ pillar['site_hostname'] }}:25015']
{% endif %}
- targets: ['{{ site_hostname }}:{{ port }}']
{% endfor %}

View File

@@ -0,0 +1,42 @@
---
- name: Create prometheus user and group
import_tasks: prometheus_user.yml
- name: Download node_exporter from GitHub
get_url:
dest: /tmp/prometheus_node_exporter.tar.gz
url: https://github.com/prometheus/node_exporter/releases/download/v0.13.0/node_exporter-0.13.0.linux-amd64.tar.gz
checksum: sha256:2de5d1e51330c41588ed4c88bc531a3d2dccf6b4d7b99d5782d95cff27a3c049
- name: Create node_exporter directory
file:
path: /opt/prometheus_node_exporter
state: directory
owner: prometheus
group: prometheus
mode: 0755
- name: Extract node_exporter
unarchive:
remote_src: true
src: /tmp/prometheus_node_exporter.tar.gz
dest: /opt/prometheus_node_exporter
owner: prometheus
group: prometheus
extra_opts:
- --strip-components=1
creates: /opt/prometheus_node_exporter/node_exporter
- name: Create node_exporter service file
copy:
src: prometheus_node_exporter.service
dest: /etc/systemd/system/prometheus_node_exporter.service
owner: root
group: root
mode: 0644
- name: Start and enable node_exporter service
service:
name: prometheus_node_exporter
state: started
enabled: true

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: postgresql

View File

@@ -0,0 +1,47 @@
---
- name: Download postgres_exporter from GitHub
get_url:
dest: /tmp/prometheus_postgres_exporter.tar.gz
url: https://github.com/wrouesnel/postgres_exporter/releases/download/v0.4.7/postgres_exporter_v0.4.7_linux-amd64.tar.gz
checksum: sha256:c34d61bb4deba8efae06fd3c9979b96dae3f3c757698ce3384c80fff586c667b
- name: Create postgres_exporter directory
file:
path: /opt/prometheus_postgres_exporter
state: directory
owner: postgres
group: postgres
mode: 0755
- name: Extract postgres_exporter
unarchive:
remote_src: true
src: /tmp/prometheus_postgres_exporter.tar.gz
dest: /opt/prometheus_postgres_exporter
owner: postgres
group: postgres
extra_opts:
- --strip-components=1
creates: /opt/prometheus_postgres_exporter/postgres_exporter
- name: Create queries.yaml file
copy:
src: queries.yaml
dest: /opt/prometheus_postgres_exporter/queries.yaml
owner: postgres
group: postgres
mode: 0644
- name: Create postgres_exporter service file
copy:
src: prometheus_postgres_exporter.service
dest: /etc/systemd/system/prometheus_postgres_exporter.service
owner: root
group: root
mode: 0644
- name: Start and enable postgres_exporter service
service:
name: prometheus_postgres_exporter
state: started
enabled: true

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: redis

View File

@@ -0,0 +1,40 @@
---
- name: Create prometheus user and group
import_tasks: prometheus_user.yml
- name: Download redis_exporter from GitHub
get_url:
dest: /tmp/prometheus_redis_exporter.tar.gz
url: https://github.com/oliver006/redis_exporter/releases/download/v0.26.0/redis_exporter-v0.26.0.linux-amd64.tar.gz
checksum: sha256:39354c57b9d02b455c584baf46a2df6ed3d1ac190c88e3ec0fa0c23b49ccc2bb
- name: Create redis_exporter directory
file:
path: /opt/prometheus_redis_exporter
state: directory
owner: prometheus
group: prometheus
mode: 0755
- name: Extract redis_exporter
unarchive:
remote_src: true
src: /tmp/prometheus_redis_exporter.tar.gz
dest: /opt/prometheus_redis_exporter
owner: prometheus
group: prometheus
creates: /opt/prometheus_redis_exporter/redis_exporter
- name: Create redis_exporter service file
copy:
src: prometheus_redis_exporter.service
dest: /etc/systemd/system/prometheus_redis_exporter.service
owner: root
group: root
mode: 0644
- name: Start and enable redis_exporter service
service:
name: prometheus_redis_exporter
state: started
enabled: true

View File

@@ -0,0 +1,13 @@
---
- name: Download pts_lbsearch code from GitHub
get_url:
dest: /tmp/pts_lbsearch.c
url: https://raw.githubusercontent.com/pts/pts-line-bisect/2ecd9f59246cfa28cb1aeac7cd8d98a8eea2914f/pts_lbsearch.c
checksum: sha256:ef79efc2f1ecde504b6074f9c89bdc71259a833fa2a2dda4538ed5ea3e04aea1
- name: Compile pts_lbsearch
command:
chdir: /tmp
# compilation command taken from the top of the source file
cmd: gcc -ansi -W -Wall -Wextra -Werror=missing-declarations -s -O2 -DNDEBUG -o /usr/local/bin/pts_lbsearch pts_lbsearch.c
creates: /usr/local/bin/pts_lbsearch

View File

@@ -0,0 +1,2 @@
---
pip_requirements_filename: requirements.txt

View File

@@ -0,0 +1,93 @@
---
- name: Check if the correct version of Python is already installed
stat:
path: /usr/local/bin/python{{ python_version }}
register: python_binary
- name: Download and install Python
when: not python_binary.stat.exists
block:
- name: Download Python source code
get_url:
dest: /tmp/python.tar.gz
url: https://www.python.org/ftp/python/3.8.10/Python-3.8.10.tgz
checksum: sha256:b37ac74d2cbad2590e7cd0dd2b3826c29afe89a734090a87bf8c03c45066cb65
- name: Create temp directory to extract Python to
file:
path: /tmp/python
state: directory
- name: Extract Python
unarchive:
remote_src: true
src: /tmp/python.tar.gz
dest: /tmp/python
extra_opts:
- --strip-components=1
- name: Install build dependencies for Python
apt:
name:
- make
- build-essential
- libssl-dev
- zlib1g-dev
- libbz2-dev
- libreadline-dev
- libsqlite3-dev
- wget
- curl
- llvm
- libncurses5-dev
- libncursesw5-dev
- xz-utils
- tk-dev
- name: Build and install Python (this can take a long time)
shell:
chdir: /tmp/python
cmd: |
./configure --enable-optimizations --with-ensurepip=install
make
make altinstall
- name: Create dir for venvs
file:
path: /opt/venvs
state: directory
owner: "{{ app_username }}"
group: "{{ app_username }}"
mode: 0755
- name: Install packages needed for compiling psycopg2
apt:
name:
- gcc
- libpq-dev
- name: Create venv
become_user: "{{ app_username }}"
command:
cmd: python{{ python_version }} -m venv {{ venv_dir }}
creates: "{{ venv_dir }}"
- name: Check installed packages in venv
command:
cmd: "{{ bin_dir }}/pip freeze"
register: pip_freeze
changed_when: false
- name: Install Python packages into venv
become_user: "{{ app_username }}"
pip:
executable: "{{ bin_dir }}/pip"
requirements: "{{ app_dir }}/{{ pip_requirements_filename }}"
- name: Install Tildes into venv as editable
become_user: "{{ app_username }}"
pip:
executable: "{{ bin_dir }}/pip"
name: "{{ app_dir }}"
editable: true
when: "'-e '+app_dir not in pip_freeze.stdout_lines"

View File

@@ -0,0 +1,2 @@
---
redis_version: 5.0.7

View File

@@ -0,0 +1,5 @@
---
- name: Restart redis
service:
name: redis
state: restarted

View File

@@ -0,0 +1,131 @@
---
- name: Check if Redis is installed
stat:
path: /usr/local/bin/redis-server
register: redis_server
- name: Download and install Redis
when: not redis_server.stat.exists
block:
- name: Download Redis from GitHub
get_url:
dest: /tmp/redis-{{ redis_version }}.tar.gz
url: https://github.com/antirez/redis/archive/{{ redis_version }}.tar.gz
checksum: sha256:2761422599f8969559e66797cd7f606c16e907bf82d962345a7d366c5d1278df
- name: Create temp directory to extract Redis to
file:
path: /tmp/redis-{{ redis_version }}
state: directory
- name: Extract Redis
unarchive:
remote_src: true
src: /tmp/redis-{{ redis_version }}.tar.gz
dest: /tmp/redis-{{ redis_version }}
extra_opts:
- --strip-components=1
- name: Install build dependencies for Redis
apt:
name: build-essential
- name: Install Redis
shell:
chdir: /tmp/redis-{{ redis_version }}
cmd: |
make
make install
- name: Create group for redis user
group:
name: redis
- name: Create redis user
user:
name: redis
group: redis
create_home: false
- name: Create /run/redis
file:
path: /run/redis
state: directory
owner: redis
group: redis
mode: 0755
- name: Create /var/lib/redis
file:
path: /var/lib/redis
state: directory
owner: redis
group: redis
mode: 0700
- name: Create /var/log/redis
file:
path: /var/log/redis
state: directory
owner: redis
group: redis
mode: 0744
- name: Create Redis configuration file
template:
src: redis.conf.jinja2
dest: /etc/redis.conf
owner: redis
group: redis
mode: 0600
- name: Create Redis service file
copy:
src: redis.service
dest: /etc/systemd/system/redis.service
owner: root
group: root
mode: 0644
- name: Add service file for disabling transparent hugepage
copy:
src: transparent_hugepage.service
dest: /etc/systemd/system/transparent_hugepage.service
owner: root
group: root
mode: 0644
- name: Check if transparent hugepage is currently enabled
command:
cmd: cat /sys/kernel/mm/transparent_hugepage/enabled
register: transparent_hugepage
changed_when: false
- name: Start and enable "disable transparent hugepage" service
service:
name: transparent_hugepage.service
state: started
enabled: true
when: "'[never]' not in transparent_hugepage.stdout"
- name: Check if kernel overcommit mode is already set
command:
cmd: sysctl -n vm.overcommit_memory
register: overcommit_memory
changed_when: false
- name: Set kernel overcommit mode temporarily (recommended by Redis)
command:
cmd: sysctl vm.overcommit_memory=1
when: overcommit_memory.stdout == "0"
- name: Make kernel overcommit mode permanent (recommended by Redis, requires restart)
lineinfile:
path: /etc/sysctl.conf
line: vm.overcommit_memory = 1
- name: Start and enable redis service
service:
name: redis
state: started
enabled: true

View File

@@ -43,12 +43,6 @@
# loadmodule /path/to/my_module.so
# loadmodule /path/to/other_module.so
{% if accumulator is defined %}
{% for line in accumulator['redis_loadmodule_lines'] -%}
{{ line }}
{% endfor %}
{% endif %}
################################## NETWORK #####################################
# By default, if no "bind" configuration directive is specified, Redis listens

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: redis

View File

@@ -0,0 +1,27 @@
---
- name: Download redis-cell Redis module from GitHub
get_url:
dest: /tmp/redis-cell.tar.gz
url: https://github.com/brandur/redis-cell/releases/download/v0.2.1/redis-cell-v0.2.1-x86_64-unknown-linux-gnu.tar.gz
checksum: sha256:9427fb100f4cada817f30f854ead7f233de32948a0ec644f15988c275a2ed1cb
- name: Create /opt/redis-cell
file:
path: /opt/redis-cell
state: directory
owner: redis
group: redis
mode: 0755
- name: Extract redis-cell
unarchive:
remote_src: true
src: /tmp/redis-cell.tar.gz
dest: /opt/redis-cell
- name: Load redis-cell module in Redis configuration
lineinfile:
path: /etc/redis.conf
line: loadmodule /opt/redis-cell/libredis_cell.so
notify:
- Restart redis

View File

@@ -0,0 +1,8 @@
---
- name: Install the activate script
template:
src: activate.sh.jinja2
dest: /usr/local/bin/activate
owner: root
group: root
mode: 0755

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir, bin_dir -%}
#!/bin/bash
#
# Simple convenience script to activate the venv and switch to the app dir

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: nginx

View File

@@ -0,0 +1,23 @@
---
- name: Install packages needed by Ansible community plugins
pip:
executable: pip3
name: cryptography
- name: Create directory for certificate
file:
path: "{{ ssl_cert_dir }}"
state: directory
mode: 0755
- name: Create a private key
community.crypto.openssl_privatekey:
path: "{{ ssl_private_key_path }}"
- name: Create a self-signed certificate
community.crypto.x509_certificate:
path: "{{ ssl_cert_path }}"
privatekey_path: "{{ ssl_private_key_path }}"
provider: selfsigned
notify:
- Reload nginx

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: python

View File

@@ -0,0 +1,29 @@
---
- name: Check if site-icons.css file exists
stat:
path: "{{ app_dir }}/static/css/site-icons.css"
register: site_icons_css_file
# webassets will crash the site if this file doesn't exist
- name: Create site-icons.css file
file:
path: "{{ app_dir }}/static/css/site-icons.css"
state: touch
owner: "{{ app_username }}"
group: "{{ app_username }}"
mode: 0644
when: not site_icons_css_file.stat.exists
- name: Create systemd service file
template:
src: webassets.service.jinja2
dest: /etc/systemd/system/webassets.service
owner: root
group: root
mode: 0644
- name: Start and enable webassets service
service:
name: webassets
state: started
enabled: true

View File

@@ -1,4 +1,3 @@
{% from 'common.jinja2' import app_dir, app_username, bin_dir -%}
[Unit]
Description=Webassets - auto-compile JS files on change

View File

@@ -0,0 +1,24 @@
---
- name: Create the base repo directory
file:
path: /var/lib/tildes-wiki
state: directory
owner: "{{ app_username }}"
group: "{{ app_username }}"
mode: 0775
- name: Check if a wiki git repo exists
stat:
path: /var/lib/tildes-wiki/.git
register: wiki_repo
- name: Create a git repo and initial commit
become_user: "{{ app_username }}"
shell:
cmd: |
git init
git config user.name "Tildes"
git config user.email "Tildes"
git commit --allow-empty -m "Initial commit"
chdir: /var/lib/tildes-wiki
when: not wiki_repo.stat.exists

View File

@@ -0,0 +1,10 @@
---
- name: Create group for prometheus user
group:
name: prometheus
- name: Create prometheus user
user:
name: prometheus
group: prometheus
create_home: false

8
ansible/vars.yml Normal file
View File

@@ -0,0 +1,8 @@
---
app_dir: /opt/tildes
venv_dir: /opt/venvs/tildes
bin_dir: "{{ venv_dir }}/bin"
static_sites_dir: /opt/tildes-static-sites
python_version: 3.8

View File

@@ -1,19 +0,0 @@
# look for files on the minion, not the master
file_client: local
# set a specific minion ID for use in top file / pillars
# options:
# - "dev" for the vagrant setup
# - "prod" for (single-server) production
# - "monitoring" for the monitoring server
id: dev
# base path for SSL certificates
ca.cert_base_path: '/etc/pki'
state_verbose: False
# enable new module.run state syntax
# https://docs.saltstack.com/en/develop/topics/releases/2017.7.0.html#state-module-changes
use_superseded:
- module.run

View File

@@ -1,8 +0,0 @@
gunicorn_args: --reload
ini_file: development.ini
ssl_cert_path: /etc/pki/tls/certs/localhost.crt
ssl_private_key_path: /etc/pki/tls/certs/localhost.key
nginx_worker_processes: 1
postgresql_version: 12
prometheus_ips: ['127.0.0.1']
site_hostname: localhost

View File

@@ -1,8 +0,0 @@
sentry_secret: 'sentry_secret_token'
sentry_email: 'sentry_superuser@email.address'
sentry_password: 'passwordforsentrysuperuser'
sentry_server_name: 'sentry.example.com'
developer_ips: ['127.0.0.1']
prometheus_server_name: 'prom.example.com'
grafana_server_name: 'grafana.example.com'
grafana_admin_password: 'passwordforgrafanaadmin'

View File

@@ -1,6 +0,0 @@
ssl_cert_path: /etc/letsencrypt/live/tildes.net/fullchain.pem
ssl_private_key_path: /etc/letsencrypt/live/tildes.net/privkey.pem
hsts_max_age: 60
nginx_worker_processes: auto
postgresql_version: 9.6
site_hostname: tildes.net

View File

@@ -1,11 +0,0 @@
gunicorn_args: --workers 8
ini_file: production.ini
ssl_cert_path: /etc/letsencrypt/live/tildes.net/fullchain.pem
ssl_private_key_path: /etc/letsencrypt/live/tildes.net/privkey.pem
hsts_max_age: 63072000
nginx_worker_processes: auto
postgresql_version: 12
prometheus_ips: ['2607:5300:201:3100::6e77']
site_hostname: tildes.net
ipv6_address: '2607:5300:0203:2dd8::'
ipv6_gateway: '2607:5300:0203:2dff:ff:ff:ff:ff'

View File

@@ -1,8 +0,0 @@
base:
'dev':
- dev
'prod':
- prod
'monitoring':
- monitoring
- monitoring-secrets

Some files were not shown because too many files have changed in this diff Show More