Compare commits

..

2 Commits

Author SHA1 Message Date
18c45219d8 WIP: Set stateful meta 2023-08-21 22:31:35 -07:00
2e2e4f2064 Add ability to set meta at job level 2023-08-21 22:30:18 -07:00
123 changed files with 2531 additions and 3321 deletions

View File

@ -30,5 +30,5 @@ repos:
- id: variable-sample - id: variable-sample
name: generate variable sample file name: generate variable sample file
language: system language: system
entry: bash -c 'venv/bin/python scripts/nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml' entry: bash -c 'venv/bin/python nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml'
types: [file] types: [file]

View File

@ -132,14 +132,14 @@
"filename": "core/authelia.yml", "filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f", "hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false, "is_verified": false,
"line_number": 189, "line_number": 185,
"is_secret": false "is_secret": false
} }
], ],
"core/grafana/grafana.ini": [ "core/metrics/grafana/grafana.ini": [
{ {
"type": "Basic Auth Credentials", "type": "Basic Auth Credentials",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false, "is_verified": false,
"line_number": 78, "line_number": 78,
@ -147,7 +147,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd", "hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false, "is_verified": false,
"line_number": 109, "line_number": 109,
@ -155,7 +155,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997", "hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false, "is_verified": false,
"line_number": 151, "line_number": 151,
@ -163,7 +163,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f", "hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false, "is_verified": false,
"line_number": 154, "line_number": 154,
@ -171,7 +171,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3", "hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false, "is_verified": false,
"line_number": 239, "line_number": 239,
@ -179,7 +179,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50", "hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false, "is_verified": false,
"line_number": 252, "line_number": 252,
@ -187,5 +187,5 @@
} }
] ]
}, },
"generated_at": "2024-02-20T18:04:29Z" "generated_at": "2023-07-25T23:48:05Z"
} }

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.2.0" version = "1.4.20"
hashes = [ hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=", "h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66", "zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff", "zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61", "zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f", "zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c", "zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6", "zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2", "zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1", "zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
] "zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
} "zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
] ]
} }

View File

@ -62,7 +62,7 @@ ansible-cluster: $(VENV) ansible_galaxy
bootstrap-values: $(VENV) bootstrap-values: $(VENV)
env NOMAD_ADDR=http://192.168.2.101:4646 \ env NOMAD_ADDR=http://192.168.2.101:4646 \
NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \ NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \
$(VENV)/bin/python ./scripts/nomad_vars.py $(VENV)/bin/python ./nomad_vars.py
.PHONY: recover-nomad .PHONY: recover-nomad
recover-nomad: $(VENV) recover-nomad: $(VENV)
@ -87,16 +87,6 @@ apply:
-auto-approve \ -auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \ -var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: refresh
refresh:
@terraform refresh \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean .PHONY: clean
clean: clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \ env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \

View File

@ -1,72 +1,68 @@
--- ---
all: all:
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
n2.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances:
vars:
nomad_network_interface: eth0
children: children:
nomad_servers: {} servers:
nomad_clients: {} hosts:
nomad_servers: n1.thefij:
hosts: nomad_node_role: both
nonopi.thefij: # nomad_meta:
ansible_host: 192.168.2.170 # hw_transcode.device: /dev/dri
n1.thefij: {} # hw_transcode.type: intel
n2.thefij: {} nfs_mounts:
pi4: {} - src: 10.50.250.2:/srv/volumes
# qnomad.thefij: {} path: /srv/volumes/moxy
nomad_clients: opts: proto=tcp,rw
hosts: nomad_unique_host_volumes:
n1.thefij: {} - name: mysql-data
n2.thefij: {} path: /srv/volumes/mysql
pi4: {} owner: "999"
# qnomad.thefij: {} group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
n2.thefij:
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_node_class: ingress
nomad_node_role: both
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: gitea-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: sonarr-data
path: /srv/volumes/sonarr
owner: "root"
group: "bin"
mode: "0755"
read_only: false
pi4:
nomad_node_role: both
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
consul_instances:
children:
servers: {}
vault_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}

View File

@ -0,0 +1,80 @@
---
- name: Bootstrap Consul values
hosts: consul_instances
gather_facts: false
vars_files:
- vars/consul_values.yml
tasks:
- name: Add values
delegate_to: localhost
run_once: true
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Write values
consul_kv:
host: "{{ inventory_hostname }}"
key: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ consul_values | default({}) | dict2items }}"
- name: Bootstrap value values
hosts: vault_instances
gather_facts: false
vars_files:
- vars/vault_hashi_vault_values.yml
tasks:
- name: Bootstrap Vault secrets
delegate_to: localhost
run_once: true
block:
- name: Install hvac
pip:
name: hvac
extra_args: --index-url https://pypi.org/simple
- name: Check mount
community.hashi_vault.vault_read:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
ignore_errors: true
register: check_mount
- name: Create kv mount
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
data:
type: kv-v2
when: check_mount is not succeeded
- name: Write values
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
retries: 2
delay: 10
- name: Write userpass
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "auth/userpass/users/{{ item.name }}"
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
loop: "{{ vault_userpass }}"

View File

@ -1,5 +1,27 @@
# Stops Nomad and clears all data from its ata dirs # Stops Consul, Vault, and Nomad and clears all data from their data dirs
--- ---
- name: Delete Consul data
hosts: consul_instances
tasks:
- name: Stop consul
systemd:
name: consul
state: stopped
become: true
- name: Stop vault
systemd:
name: vault
state: stopped
become: true
- name: Remove data dir
file:
path: /opt/consul
state: absent
become: true
- name: Delete Nomad data - name: Delete Nomad data
hosts: nomad_instances hosts: nomad_instances

View File

@ -14,14 +14,8 @@
state: restarted state: restarted
become: true become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad - name: Start Nomad
systemd: systemd:
name: nomad name: nomad
state: started state: stopped
become: true become: true

View File

@ -0,0 +1,88 @@
---
- name: Stop Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Stop Vault
hosts: vault_instances
gather_facts: false
tasks:
- name: Stop Vault
systemd:
name: vault
state: stopped
become: true
- name: Recover Consul
hosts: consul_instances
gather_facts: false
tasks:
- name: Stop Consul
systemd:
name: consul
state: stopped
become: true
- name: Get node-id
slurp:
src: /opt/consul/node-id
register: consul_node_id
become: true
- name: Node Info
debug:
msg: |
node_id: {{ consul_node_id.content | b64decode }}
address: {{ ansible_default_ipv4.address }}
- name: Save
copy:
dest: "/opt/consul/raft/peers.json"
# I used to have reject('equalto', inventory_hostname) in the loop, but I'm not sure if I should
content: |
[
{% for host in ansible_play_hosts -%}
{
"id": "{{ hostvars[host].consul_node_id.content | b64decode }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Consul
systemd:
name: consul
state: restarted
become: true
- name: Start Vault
hosts: vault_instances
gather_facts: false
tasks:
- name: Start Vault
systemd:
name: vault
state: started
become: true
- name: Start Nomad
hosts: nomad_instances
gather_facts: false
tasks:
- name: Start Nomad
systemd:
name: nomad
state: started
become: true

View File

@ -1,6 +1,6 @@
--- ---
- name: Recover Nomad - name: Recover Nomad
hosts: nomad_servers hosts: nomad_instances
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
@ -10,10 +10,6 @@
state: stopped state: stopped
become: true become: true
- name: Remount all shares
command: mount -a
become: true
- name: Get node-id - name: Get node-id
slurp: slurp:
src: /var/nomad/server/node-id src: /var/nomad/server/node-id

View File

@ -1,6 +1,6 @@
--- ---
- name: Update DNS for bootstrapping with non-Nomad host - name: Update DNS for bootstrapping with non-Nomad host
hosts: nomad_instances hosts: consul_instances
become: true become: true
gather_facts: false gather_facts: false
vars: vars:
@ -14,7 +14,7 @@
line: "nameserver {{ non_nomad_dns }}" line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker - name: Install Docker
hosts: nomad_clients hosts: nomad_instances
become: true become: true
vars: vars:
docker_architecture_map: docker_architecture_map:
@ -44,7 +44,7 @@
# state: present # state: present
- name: Create NFS mounts - name: Create NFS mounts
hosts: nomad_clients hosts: nomad_instances
become: true become: true
vars: vars:
shared_nfs_mounts: shared_nfs_mounts:
@ -112,24 +112,9 @@
- name: nzbget-config - name: nzbget-config
path: /srv/volumes/nas-container/nzbget path: /srv/volumes/nas-container/nzbget
read_only: false read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config - name: lidarr-config
path: /srv/volumes/nas-container/lidarr path: /srv/volumes/nas-container/lidarr
read_only: false read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config
path: /srv/volumes/nas-container/bazarr
read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: all-volumes - name: all-volumes
path: /srv/volumes path: /srv/volumes
owner: "root" owner: "root"
@ -140,10 +125,9 @@
roles: roles:
- name: ansible-nomad - name: ansible-nomad
vars: vars:
nomad_version: "1.8.0-1" nomad_version: "1.6.1-1"
nomad_install_upgrade: true nomad_install_upgrade: true
nomad_allow_purge_config: true nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to # Where nomad gets installed to
nomad_bin_dir: /usr/bin nomad_bin_dir: /usr/bin
@ -197,8 +181,7 @@
nomad_bind_address: 0.0.0.0 nomad_bind_address: 0.0.0.0
# Default interface for binding tasks # Default interface for binding tasks
# This is now set at the inventory level nomad_network_interface: eth0
# nomad_network_interface: eth0
# Create networks for binding task ports # Create networks for binding task ports
nomad_host_networks: nomad_host_networks:
@ -217,7 +200,7 @@
enabled: true enabled: true
- name: Bootstrap Nomad ACLs and scheduler - name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_servers hosts: nomad_instances
tasks: tasks:
- name: Start Nomad - name: Start Nomad
@ -247,7 +230,6 @@
run_once: true run_once: true
ignore_errors: true ignore_errors: true
register: bootstrap_result register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result - name: Save bootstrap result
copy: copy:
@ -279,15 +261,13 @@
- list - list
environment: environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}" NOMAD_TOKEN: "{{ read_secretid.stdout }}"
register: policies
run_once: true run_once: true
changed_when: false register: policies
- name: Copy policy - name: Copy policy
copy: copy:
src: ../acls/nomad-anon-policy.hcl src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
register: anon_policy register: anon_policy
@ -307,18 +287,6 @@
delegate_to: "{{ play_hosts[0] }}" delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption - name: Enable service scheduler preemption
command: command:
argv: argv:
@ -326,24 +294,12 @@
- operator - operator
- scheduler - scheduler
- set-config - set-config
- -preempt-system-scheduler=true
- -preempt-service-scheduler=true - -preempt-service-scheduler=true
environment: environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}" NOMAD_TOKEN: "{{ read_secretid.stdout }}"
delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault # - name: Set up Nomad backend and roles in Vault
# community.general.terraform: # community.general.terraform:

View File

@ -0,0 +1,27 @@
---
- name: Unseal Vault
hosts: vault_instances
tasks:
- name: Get Vault status
uri:
url: http://127.0.0.1:8200/v1/sys/health
method: GET
status_code: 200, 429, 472, 473, 501, 503
body_format: json
return_content: true
register: vault_status
- name: Unseal Vault
no_log: true
command:
argv:
- "vault"
- "operator"
- "unseal"
- "-address=http://127.0.0.1:8200/"
- "{{ item }}"
loop: "{{ unseal_keys_hex }}"
when:
- unseal_keys_hex is defined
- vault_status.json["sealed"]

View File

@ -2,13 +2,24 @@ nomad/jobs:
base_hostname: VALUE base_hostname: VALUE
db_user_ro: VALUE db_user_ro: VALUE
ldap_base_dn: VALUE ldap_base_dn: VALUE
mysql_root_password: VALUE
notify_email: VALUE notify_email: VALUE
smtp_password: VALUE
smtp_port: VALUE
smtp_server: VALUE
smtp_tls: VALUE
smtp_user: VALUE
nomad/jobs/adminer/adminer/stunnel:
mysql_stunnel_psk: VALUE
postgres_stunnel_psk: VALUE
nomad/jobs/authelia: nomad/jobs/authelia:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
email_sender: VALUE email_sender: VALUE
jwt_secret: VALUE jwt_secret: VALUE
lldap_admin_password: VALUE
lldap_admin_user: VALUE
oidc_clients: VALUE oidc_clients: VALUE
oidc_hmac_secret: VALUE oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE oidc_issuer_certificate_chain: VALUE
@ -16,39 +27,40 @@ nomad/jobs/authelia:
session_secret: VALUE session_secret: VALUE
storage_encryption_key: VALUE storage_encryption_key: VALUE
nomad/jobs/authelia/authelia/stunnel: nomad/jobs/authelia/authelia/stunnel:
ldap_stunnel_psk: VALUE
mysql_stunnel_psk: VALUE
redis_stunnel_psk: VALUE redis_stunnel_psk: VALUE
nomad/jobs/backup: nomad/jobs/backup:
backup_passphrase: VALUE backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1: nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2: nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4: nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE nomad/jobs/bazarr/bazarr:
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
nomad/jobs/bazarr/bazarr/postgres-bootstrap:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/blocky: nomad/jobs/blocky:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
@ -56,6 +68,7 @@ nomad/jobs/blocky:
mappings: VALUE mappings: VALUE
whitelists_ads: VALUE whitelists_ads: VALUE
nomad/jobs/blocky/blocky/stunnel: nomad/jobs/blocky/blocky/stunnel:
mysql_stunnel_psk: VALUE
redis_stunnel_psk: VALUE redis_stunnel_psk: VALUE
nomad/jobs/ddclient: nomad/jobs/ddclient:
domain: VALUE domain: VALUE
@ -67,9 +80,11 @@ nomad/jobs/git:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
oidc_secret: VALUE mysql_stunnel_psk: VALUE
secret_key: VALUE secret_key: VALUE
smtp_sender: VALUE smtp_sender: VALUE
nomad/jobs/git/git/stunnel:
mysql_stunnel_psk: VALUE
nomad/jobs/grafana: nomad/jobs/grafana:
admin_pw: VALUE admin_pw: VALUE
alert_email_addresses: VALUE alert_email_addresses: VALUE
@ -86,19 +101,31 @@ nomad/jobs/grafana:
slack_hook_url: VALUE slack_hook_url: VALUE
smtp_password: VALUE smtp_password: VALUE
smtp_user: VALUE smtp_user: VALUE
nomad/jobs/grafana/grafana/stunnel:
mysql_stunnel_psk: VALUE
nomad/jobs/immich: nomad/jobs/immich:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
nomad/jobs/lego: nomad/jobs/ipdvr/radarr:
acme_email: VALUE db_pass: VALUE
domain_lego_dns: VALUE db_user: VALUE
usersfile: VALUE nomad/jobs/ipdvr/radarr/bootstrap:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/lidarr: nomad/jobs/lidarr:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
nomad/jobs/lidarr/lidarr/postgres-bootstrap:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/lidarr/lidarr/stunnel:
postgres_stunnel_psk: VALUE
nomad/jobs/lldap: nomad/jobs/lldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
@ -106,45 +133,48 @@ nomad/jobs/lldap:
key_seed: VALUE key_seed: VALUE
smtp_from: VALUE smtp_from: VALUE
smtp_reply_to: VALUE smtp_reply_to: VALUE
nomad/jobs/lldap/lldap/bootstrap:
mysql_root_password: VALUE
nomad/jobs/lldap/lldap/stunnel:
allowed_psks: VALUE
mysql_stunnel_psk: VALUE
nomad/jobs/minitor: nomad/jobs/minitor:
mailgun_api_key: VALUE mailgun_api_key: VALUE
nomad/jobs/mysql-server: nomad/jobs/mysql-server:
mysql_root_password: VALUE allowed_psks: VALUE
root_password: VALUE
nomad/jobs/photoprism: nomad/jobs/photoprism:
admin_password: VALUE admin_password: VALUE
admin_user: VALUE admin_user: VALUE
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
mysql_stunnel_psk: VALUE
nomad/jobs/photoprism/photoprism/stunnel:
mysql_stunnel_psk: VALUE
nomad/jobs/postgres-server: nomad/jobs/postgres-server:
superuser: VALUE superuser: VALUE
superuser_pass: VALUE superuser_pass: VALUE
nomad/jobs/radarr: nomad/jobs/postgres-server/postgres-server/stunnel:
db_name: VALUE allowed_psks: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia: nomad/jobs/redis-authelia:
allowed_psks: VALUE allowed_psks: VALUE
nomad/jobs/redis-blocky: nomad/jobs/redis-blocky:
allowed_psks: VALUE allowed_psks: VALUE
nomad/jobs/rediscommander: nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE redis_stunnel_psk: VALUE
nomad/jobs/sonarr: nomad/jobs/traefik:
acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE
nomad/jobs/tubesync:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
nomad/jobs/traefik: nomad/jobs/tubesync/tubesync/stunnel:
external: VALUE mysql_stunnel_psk: VALUE
usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/oidc: nomad/oidc:
secret: VALUE secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql: secrets/mysql:
mysql_root_password: VALUE mysql_root_password: VALUE
secrets/postgres: secrets/postgres:

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,136 +0,0 @@
resource "nomad_job" "backup" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
use_wesher = var.use_wesher
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
use_wesher = var.use_wesher
})
}
locals {
# NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-n2", "backup-oneoff-pi4"])
}
resource "nomad_acl_policy" "secrets_mysql" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_psk" {
path = "secrets/mysql/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.mysql_psk.result}"
}
}
resource "nomad_acl_policy" "mysql_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}
resource "nomad_acl_policy" "secrets_postgres" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_psk" {
path = "secrets/postgres/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.postgres_psk.result}"
}
}
resource "nomad_acl_policy" "postgres_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres-psk"
description = "Give access to Postgres PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}

View File

@ -1,57 +0,0 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,67 +0,0 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

12
core.tf Normal file
View File

@ -0,0 +1,12 @@
module "databases" {
source = "./databases"
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1" version = "1.4.20"
hashes = [ hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=", "h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb", "zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1", "zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9", "zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa", "zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39", "zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082", "zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196", "zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f", "zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7", "zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
] "zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
} "zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
] ]
} }

View File

@ -4,12 +4,11 @@ module "authelia" {
name = "authelia" name = "authelia"
instance_count = 2 instance_count = 2
priority = 70 priority = 70
image = "authelia/authelia:4.37" image = "authelia/authelia:latest"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"] args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true ingress = true
service_port = 9999 service_port = 9999
service_port_static = true service_port_static = true
use_wesher = var.use_wesher
# metrics_port = 9959 # metrics_port = 9959
env = { env = {
@ -27,14 +26,13 @@ module "authelia" {
use_mysql = true use_mysql = true
use_ldap = true use_ldap = true
use_redis = true use_redis = true
use_smtp = true
mysql_bootstrap = { mysql_bootstrap = {
enabled = true enabled = true
} }
service_tags = [ service_tags = [
# Configure traefik to add this middleware # Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.${var.base_hostname}%2F", "traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.thefij.rocks%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true", "traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email", "traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic", "traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic",
@ -49,7 +47,7 @@ module "authelia" {
mount = false mount = false
}, },
{ {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}" data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .lldap_admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}" dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt" dest = "ldap_password.txt"
mount = false mount = false
@ -97,7 +95,7 @@ module "authelia" {
mount = false mount = false
}, },
{ {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}" data = "{{ with nomadVar \"nomad/jobs\" }}{{ .smtp_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}" dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt" dest = "smtp_password.txt"
mount = false mount = false
@ -105,43 +103,6 @@ module "authelia" {
] ]
} }
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
resource "nomad_acl_auth_method" "nomad_authelia" { resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia" name = "authelia"
type = "OIDC" type = "OIDC"
@ -150,7 +111,7 @@ resource "nomad_acl_auth_method" "nomad_authelia" {
default = true default = true
config { config {
oidc_discovery_url = "https://authelia.${var.base_hostname}" oidc_discovery_url = "https://authelia.thefij.rocks"
oidc_client_id = "nomad" oidc_client_id = "nomad"
oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"] oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"]
bound_audiences = ["nomad"] bound_audiences = ["nomad"]
@ -159,8 +120,8 @@ resource "nomad_acl_auth_method" "nomad_authelia" {
"openid", "openid",
] ]
allowed_redirect_uris = [ allowed_redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback", "https://nomad.thefij.rocks/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens", "https://nomad.thefij.rocks/ui/settings/tokens",
] ]
list_claim_mappings = { list_claim_mappings = {
"groups" : "roles" "groups" : "roles"

View File

@ -89,8 +89,8 @@ authentication_backend:
groups_filter: (member={dn}) groups_filter: (member={dn})
## The username and password of the admin user. ## The username and password of the admin user.
{{ with nomadVar "secrets/ldap" }} {{ with nomadVar "nomad/jobs/authelia" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }} user: uid={{ .lldap_admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }} {{ end }}
# password set using secrets file # password set using secrets file
# password: <secret> # password: <secret>
@ -151,10 +151,6 @@ access_control:
networks: 192.168.5.0/24 networks: 192.168.5.0/24
rules: rules:
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone ## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}' - domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks: networks:
@ -225,11 +221,11 @@ notifier:
## You can disable the notifier startup check by setting this to true. ## You can disable the notifier startup check by setting this to true.
disable_startup_check: false disable_startup_check: false
{{ with nomadVar "secrets/smtp" }} {{ with nomadVar "nomad/jobs" }}
smtp: smtp:
host: {{ .server }} host: {{ .smtp_server }}
port: {{ .port }} port: {{ .smtp_port }}
username: {{ .user }} username: {{ .smtp_user }}
# password: <in file> # password: <in file>
{{- end }} {{- end }}

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0" version = "1.4.16"
hashes = [ hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=", "h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc", "zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf", "zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251", "zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc", "zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599", "zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c", "zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543", "zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e", "zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

View File

@ -5,25 +5,16 @@ variable "config_data" {
job "blocky" { job "blocky" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 100 priority = 100
constraint {
distinct_hosts = true
}
update { update {
max_parallel = 1 max_parallel = 1
# TODO: maybe switch to service job from system so we can use canary and autorollback # TODO: maybe switch to service job from system so we can use canary and autorollback
auto_revert = true # auto_revert = true
min_healthy_time = "60s"
healthy_deadline = "5m"
} }
group "blocky" { group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 3
network { network {
mode = "bridge" mode = "bridge"
@ -33,17 +24,13 @@ job "blocky" {
} }
port "api" { port "api" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
to = "4000" to = "4000"
} }
dns { dns {
# Set expclicit DNS servers because tasks, by default, use this task # Set expclicit DNS servers because tasks, by default, use this task
servers = [ servers = ["1.1.1.1", "1.0.0.1"]
"192.168.2.1",
]
} }
} }
@ -71,11 +58,6 @@ job "blocky" {
path = "/" path = "/"
interval = "10s" interval = "10s"
timeout = "3s" timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
} }
} }
@ -83,20 +65,20 @@ job "blocky" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/0xerr0r/blocky:v0.24" image = "ghcr.io/0xerr0r/blocky"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"] args = ["-c", "${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"] ports = ["dns", "api"]
} }
resources { resources {
cpu = 50 cpu = 50
memory = 75 memory = 50
memory_max = 150 memory_max = 100
} }
template { template {
data = var.config_data data = var.config_data
destination = "$${NOMAD_TASK_DIR}/config.yml" destination = "${NOMAD_TASK_DIR}/config.yml"
splay = "1m" splay = "1m"
wait { wait {
@ -113,41 +95,7 @@ job "blocky" {
{{- end }} {{- end }}
{{- end }} {{- end }}
EOF EOF
destination = "$${NOMAD_TASK_DIR}/nomad.hosts" destination = "${NOMAD_TASK_DIR}/nomad.hosts"
change_mode = "noop"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "nomad/jobs/blocky" }}
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "noop"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "nomad/jobs/blocky" }}
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "noop" change_mode = "noop"
wait { wait {
@ -166,9 +114,9 @@ job "blocky" {
} }
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -178,32 +126,41 @@ job "blocky" {
template { template {
data = <<EOF data = <<EOF
syslog = no set -e
foreground = yes apk add stunnel
delay = yes exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client]
client = yes
accept = 127.0.0.1:3306
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client]
client = yes
accept = 127.0.0.1:6379
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }}
EOF EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf" destination = "${NOMAD_TASK_DIR}/start.sh"
} }
template { template {
data = <<EOF data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/blocky" }}{{ .psk }}{{ end -}} syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[redis_client]
client = yes
accept = 127.0.0.1:6379
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt" destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
} }
@ -212,11 +169,11 @@ EOF
data = <<EOF data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}} {{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt" destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
} }
} }
task "mysql-bootstrap" { task "blocky-bootstrap" {
driver = "docker" driver = "docker"
lifecycle { lifecycle {
@ -241,7 +198,7 @@ EOF
host=127.0.0.1 host=127.0.0.1
port=3306 port=3306
user=root user=root
{{ with nomadVar "secrets/mysql" }} {{ with nomadVar "nomad/jobs" }}
password={{ .mysql_root_password }} password={{ .mysql_root_password }}
{{ end }} {{ end }}
EOF EOF

View File

@ -1,68 +1,25 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
locals { locals {
config_data = file("${path.module}/config.yml") config_data = templatefile(
"${path.module}/config.yml",
{
"base_hostname" = var.base_hostname,
}
)
} }
resource "nomad_job" "blocky" { resource "nomad_job" "blocky" {
hcl2 { hcl2 {
enabled = true
vars = { vars = {
"config_data" = local.config_data, "config_data" = local.config_data,
} }
} }
jobspec = templatefile("${path.module}/blocky.nomad", { jobspec = file("${path.module}/blocky.nomad")
use_wesher = var.use_wesher,
})
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "blocky_mysql_bootstrap_secrets" {
name = "blocky-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "mysql-bootstrap"
}
}
resource "random_password" "blocky_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "blocky_mysql_psk" {
path = "secrets/mysql/allowed_psks/blocky"
items = {
psk = "blocky:${resource.random_password.blocky_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "blocky_mysql_psk" {
name = "blocky-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/blocky" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "stunnel"
}
} }

View File

@ -5,47 +5,25 @@ ports:
bootstrapDns: bootstrapDns:
- upstream: 1.1.1.1 - upstream: 1.1.1.1
- upstream: 1.0.0.1 - upstream: 1.0.0.1
- upstream: 9.9.9.9
- upstream: 149.112.112.112
upstream:
upstreams: default:
init: - 1.1.1.1
strategy: fast - 1.0.0.1
groups: quad9:
default: - 9.9.9.9
- https://dns.quad9.net/dns-query - 149.112.112.112
- tcp-tls:dns.quad9.net - 2620:fe::fe
- https://one.one.one.one/dns-query - 2620:fe::9
- tcp-tls:one.one.one.one - https://dns.quad9.net/dns-query
cloudflare: - tcp-tls:dns.quad9.net
- 1.1.1.1 quad9-unsecured:
- 1.0.0.1 - 9.9.9.10
- 2606:4700:4700::1111 - 149.112.112.10
- 2606:4700:4700::1001 - 2620:fe::10
- https://one.one.one.one/dns-query - 2620:fe::fe:10
- tcp-tls:one.one.one.one - https://dns10.quad9.net/dns-query
quad9: - tcp-tls:dns10.quad9.net
- 9.9.9.9
- 149.112.112.112
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-secured:
- 9.9.9.11
- 149.112.112.11
- 2620:fe::11
- 2620:fe::fe:11
- https://dns11.quad9.net/dns-query
- tcp-tls:dns11.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
conditional: conditional:
fallbackUpstream: false fallbackUpstream: false
@ -58,11 +36,9 @@ conditional:
.: 192.168.2.1 .: 192.168.2.1
hostsFile: hostsFile:
sources: filePath: {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s hostsTTL: 30s
loading: refreshPeriod: 30s
refreshPeriod: 30s
clientLookup: clientLookup:
upstream: 192.168.2.1 upstream: 192.168.2.1
@ -74,23 +50,10 @@ blocking:
- http://sysctl.org/cameleon/hosts - http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt - https://hosts-file.net/ad_servers.txt
smarttv: smarttv:
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt - https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
# - https://perflyst.github.io/PiHoleBlocklist/regex.list - https://perflyst.github.io/PiHoleBlocklist/regex.list
- |
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
# /(^|\.)samsungotn\.net$/ # prevents updates
/(^|\.)samsungrm\.net$/
wemo: wemo:
- | - |
# Remote commands # Remote commands
@ -101,6 +64,8 @@ blocking:
nat.wemo2.com nat.wemo2.com
# Connectivity checks # Connectivity checks
heartbeat.xwemo.com heartbeat.xwemo.com
malware:
- https://mirror1.malwaredomains.com/files/justdomains
antisocial: antisocial:
- | - |
facebook.com facebook.com
@ -108,17 +73,18 @@ blocking:
reddit.com reddit.com
twitter.com twitter.com
youtube.com youtube.com
custom:
- {{ env "NOMAD_TASK_DIR" }}/block
whiteLists: whiteLists:
custom: # Move to Gitea when deployed internally
- {{ env "NOMAD_TASK_DIR" }}/allow ads:
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }}
{{- end }}
clientGroupsBlock: clientGroupsBlock:
default: default:
- ads - ads
- custom - malware
- smarttv - smarttv
- wemo - wemo
@ -139,7 +105,7 @@ customDNS:
prometheus: prometheus:
enable: true enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-tls" -}}
redis: redis:
address: 127.0.0.1:6379 address: 127.0.0.1:6379
# password: "" # password: ""

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,19 +0,0 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

View File

@ -1,97 +0,0 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
# image = "alpine:3"
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

View File

@ -9,15 +9,11 @@ job "lldap" {
mode = "bridge" mode = "bridge"
port "web" { port "web" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
} }
port "ldap" { port "ldap" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
} }
port "tls" {} port "tls" {}
@ -50,75 +46,47 @@ job "lldap" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/lldap/lldap:v0.5" image = "nitnelave/lldap:latest"
ports = ["ldap", "web"] ports = ["ldap", "web"]
args = ["run", "--config-file", "$${NOMAD_TASK_DIR}/lldap_config.toml"] args = ["run", "--config-file", "${NOMAD_SECRETS_DIR}/lldap_config.toml"]
} }
env = { env = {
"LLDAP_VERBOSE" = "true" "LLDAP_VERBOSE" = "true"
"LLDAP_LDAP_PORT" = "$${NOMAD_PORT_ldap}" "LLDAP_LDAP_PORT" = "${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "$${NOMAD_PORT_web}" "LLDAP_HTTP_PORT" = "${NOMAD_PORT_web}"
"LLDAP_DATABASE_URL_FILE" = "$${NOMAD_SECRETS_DIR}/database_url.txt"
"LLDAP_KEY_SEED_FILE" = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
"LLDAP_JWT_SECRET_FILE" = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
"LLDAP_USER_PASS_FILE" = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
"LLDAP_SMTP_OPTIONS__PASSWORD_FILE" = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
} }
template { template {
data = <<EOH data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}" ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "secrets/ldap" -}} {{ with nomadVar "nomad/jobs/lldap" -}}
database_url = "mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}"
key_seed = "{{ .key_seed }}"
jwt_secret = "{{ .jwt_secret }}"
ldap_user_dn = "{{ .admin_user }}" ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}" ldap_user_email = "{{ .admin_email }}"
{{ end -}} ldap_user_pass = "{{ .admin_password }}"
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options] [smtp_options]
from = "{{ .smtp_from }}" from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}" reply_to = "{{ .smtp_reply_to }}"
enable_password_reset = true enable_password_reset = true
{{ end -}} {{- end }}
{{ with nomadVar "secrets/smtp" -}}
server = "{{ .server }}" # TODO: Better access to SMTP creds using nomad ACLs
port = {{ .port }} {{ with nomadVar "nomad/jobs" -}}
tls_required = {{ .tls.Value | toLower }} server = "{{ .smtp_server }}"
user = "{{ .user }}" port = {{ .smtp_port }}
tls_required = {{ .smtp_tls.Value | toLower }}
user = "{{ .smtp_user }}"
password = "{{ .smtp_password }}"
{{ end -}} {{ end -}}
EOH EOH
destination = "$${NOMAD_TASK_DIR}/lldap_config.toml" destination = "${NOMAD_SECRETS_DIR}/lldap_config.toml"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/database_url.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .key_seed }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .jwt_secret }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
change_mode = "restart" change_mode = "restart"
} }
@ -144,7 +112,7 @@ user = "{{ .user }}"
"2m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=${NOMAD_SECRETS_DIR}/my.cnf < ${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }
@ -154,11 +122,12 @@ user = "{{ .user }}"
host=127.0.0.1 host=127.0.0.1
port=3306 port=3306
user=root user=root
{{ with nomadVar "secrets/mysql" -}} # TODO: Use via lesser scoped access
{{ with nomadVar "nomad/jobs/lldap/lldap/bootstrap" -}}
password={{ .mysql_root_password }} password={{ .mysql_root_password }}
{{ end -}} {{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf" destination = "${NOMAD_SECRETS_DIR}/my.cnf"
} }
template { template {
@ -177,7 +146,7 @@ GRANT ALL ON `{{ .db_name }}`.*
SELECT 'NOOP'; SELECT 'NOOP';
{{ end -}} {{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql" destination = "${NOMAD_SECRETS_DIR}/bootstrap.sql"
} }
resources { resources {
@ -195,9 +164,9 @@ SELECT 'NOOP';
} }
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -205,6 +174,15 @@ SELECT 'NOOP';
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -215,7 +193,7 @@ delay = yes
accept = {{ env "NOMAD_PORT_tls" }} accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }} connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt PSKsecrets = {{ env "NOMAD_TASK_DIR" }}/stunnel_psk.txt
[mysql_client] [mysql_client]
client = yes client = yes
@ -225,23 +203,23 @@ connect = {{ .Address }}:{{ .Port }}
{{- end }} {{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf" destination = "${NOMAD_TASK_DIR}/stunnel.conf"
} }
template { template {
data = <<EOF data = <<EOF
{{ range nomadVarList "secrets/ldap/allowed_psks" -}} {{ with nomadVar "nomad/jobs/lldap/lldap/stunnel" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }} {{ .allowed_psks }}
{{ end -}} {{- end }}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt" destination = "${NOMAD_TASK_DIR}/stunnel_psk.txt"
} }
template { template {
data = <<EOF data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/lldap" }}{{ .psk }}{{ end -}} {{- with nomadVar "nomad/jobs/lldap/lldap/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt" destination = "${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
} }
} }

View File

@ -3,27 +3,31 @@ auth_enabled: false
server: server:
http_listen_port: 3100 http_listen_port: 3100
common: ingester:
ring: lifecycler:
instance_addr: 127.0.0.1 address: 127.0.0.1
kvstore: ring:
store: inmemory kvstore:
replication_factor: 1 store: inmemory
path_prefix: /tmp/loki replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
max_transfer_retries: 0
schema_config: schema_config:
configs: configs:
- from: 2020-05-15 - from: 2018-04-15
store: boltdb-shipper store: boltdb
object_store: filesystem object_store: filesystem
schema: v11 schema: v11
index: index:
prefix: index_ prefix: index_
period: 24h period: 168h
storage_config: storage_config:
boltdb_shipper: boltdb:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem: filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks directory: {{ env "NOMAD_TASK_DIR" }}/chunks
@ -34,8 +38,8 @@ limits_config:
reject_old_samples_max_age: 168h reject_old_samples_max_age: 168h
chunk_store_config: chunk_store_config:
max_look_back_period: 168h max_look_back_period: 0s
table_manager: table_manager:
retention_deletes_enabled: true retention_deletes_enabled: false
retention_period: 168h retention_period: 0s

View File

@ -1,19 +1,13 @@
module "loki" { module "loki" {
source = "../services/service" source = "../services/service"
detach = false
name = "loki"
image = "grafana/loki:2.8.7"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
name = "loki"
image = "grafana/loki:2.2.1"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100 service_port = 3100
ingress = true ingress = true
use_wesher = var.use_wesher sticky_disk = true
service_check = { # healthcheck = "/ready"
path = "/ready"
}
sticky_disk = true
templates = [ templates = [
{ {
data = file("${path.module}/loki-config.yml") data = file("${path.module}/loki-config.yml")

View File

@ -1,14 +1,21 @@
module "blocky" { module "blocky" {
source = "./blocky" source = "./blocky"
use_wesher = var.use_wesher base_hostname = var.base_hostname
# Not in this module # Not in this module
# depends_on = [module.databases] # depends_on = [module.databases]
} }
module "traefik" { module "traefik" {
source = "./traefik" source = "./traefik"
base_hostname = var.base_hostname
}
module "metrics" {
source = "./metrics"
# Not in this module
# depends_on = [module.databases]
} }
resource "nomad_job" "nomad-client-stalker" { resource "nomad_job" "nomad-client-stalker" {
@ -18,10 +25,12 @@ resource "nomad_job" "nomad-client-stalker" {
resource "nomad_job" "syslog-ng" { resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad") jobspec = file("${path.module}/syslogng.nomad")
depends_on = [module.loki]
} }
resource "nomad_job" "ddclient" { resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad") jobspec = file("${path.module}/ddclient.nomad")
} }
resource "nomad_job" "lldap" {
jobspec = file("${path.module}/lldap.nomad")
}

View File

@ -1,95 +0,0 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}
resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
use_wesher = var.use_wesher
})
depends_on = [nomad_job.prometheus]
}
resource "nomad_acl_policy" "grafana_smtp_secrets" {
name = "grafana-secrets-smtp"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "grafana_mysql_bootstrap_secrets" {
name = "grafana-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "mysql-bootstrap"
}
}
resource "random_password" "grafana_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "grafana_mysql_psk" {
path = "secrets/mysql/allowed_psks/grafana"
items = {
psk = "grafana:${resource.random_password.grafana_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "grafana_mysql_psk" {
name = "grafana-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/grafana" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "stunnel"
}
}

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"h1:tAb2gwW+oZ8/t2j7lExdqpNrxmaWsHbyA2crFWClPb0=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,20 +1,14 @@
job "exporters" { job "exporters" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 55
group "promtail" { group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 3
network { network {
mode = "bridge" mode = "bridge"
port "promtail" { port "promtail" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
to = 9080 to = 9080
} }
} }
@ -25,8 +19,8 @@ job "exporters" {
port = "promtail" port = "promtail"
meta { meta {
nomad_dc = "$${NOMAD_DC}" nomad_dc = "${NOMAD_DC}"
nomad_node_name = "$${node.unique.name}" nomad_node_name = "${node.unique.name}"
} }
tags = [ tags = [
@ -44,8 +38,8 @@ job "exporters" {
} }
config { config {
image = "grafana/promtail:2.9.1" image = "grafana/promtail:2.7.1"
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"] args = ["-config.file=${NOMAD_TASK_DIR}/promtail.yml"]
ports = ["promtail"] ports = ["promtail"]
# Bind mount host machine-id and log directories # Bind mount host machine-id and log directories
@ -133,7 +127,7 @@ scrape_configs:
- source_labels: ['__journal_com_hashicorp_nomad_task_name'] - source_labels: ['__journal_com_hashicorp_nomad_task_name']
target_label: nomad_task_name target_label: nomad_task_name
EOF EOF
destination = "$${NOMAD_TASK_DIR}/promtail.yml" destination = "${NOMAD_TASK_DIR}/promtail.yml"
} }
resources { resources {

View File

@ -8,9 +8,7 @@ job "grafana" {
mode = "bridge" mode = "bridge"
port "web" { port "web" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
to = 3000 to = 3000
} }
} }
@ -28,6 +26,7 @@ job "grafana" {
tags = [ tags = [
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure", "traefik.http.routers.grafana.entryPoints=websecure",
# "traefik.http.routers.grafana.middlewares=authelia@nomad",
] ]
} }
@ -40,8 +39,8 @@ job "grafana" {
} }
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -49,6 +48,15 @@ job "grafana" {
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -66,15 +74,17 @@ PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
destination = "$${NOMAD_TASK_DIR}/stunnel.conf" destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
} }
# TODO: Get psk for backup jobs despite multiple job declarations
# Probably should use variable ACLs to grant each node job to this path
template { template {
data = <<EOF data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/grafana" }}{{ .psk }}{{ end -}} {{- with nomadVar "nomad/jobs/grafana/grafana/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt" destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
} }
} }
task "mysql-bootstrap" { task "grafana-bootstrap" {
driver = "docker" driver = "docker"
lifecycle { lifecycle {
@ -99,7 +109,7 @@ EOF
host=127.0.0.1 host=127.0.0.1
port=3306 port=3306
user=root user=root
{{ with nomadVar "secrets/mysql" -}} {{ with nomadVar "nomad/jobs" -}}
password={{ .mysql_root_password }} password={{ .mysql_root_password }}
{{ end -}} {{ end -}}
EOF EOF
@ -133,25 +143,22 @@ SELECT 'NOOP';
driver = "docker" driver = "docker"
config { config {
image = "grafana/grafana:10.0.10" image = "grafana/grafana:9.4.2"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"] ports = ["web"]
} }
env = { env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel", "GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini", "GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini"
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning", "GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning"
} }
template { template {
data = <<EOF data = <<EOF
{{ with nomadVar "secrets/smtp" -}}
GF_SMTP_USER={{ .user }}
GF_SMTP_PASSWORD={{ .password }}
{{ end -}}
{{ with nomadVar "nomad/jobs/grafana" -}} {{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }} GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_SMTP_USER={{ .smtp_user }}
GF_SMTP_PASSWORD={{ .smtp_password }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }} GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
@ -163,7 +170,7 @@ GF_DATABASE_HOST=127.0.0.1:3306
GF_DATABASE_NAME={{ .db_name }} GF_DATABASE_NAME={{ .db_name }}
GF_DATABASE_USER={{ .db_user }} GF_DATABASE_USER={{ .db_user }}
GF_DATABASE_PASSWORD={{ .db_pass }} GF_DATABASE_PASSWORD={{ .db_pass }}
{{ end -}} {{- end }}
SLACK_BOT_URL={{ .slack_bot_url }} SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }} SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }} SLACK_HOOK_URL={{ .slack_hook_url }}
@ -188,7 +195,7 @@ SLACK_HOOK_URL={{ .slack_hook_url }}
} }
config { config {
image = "alpine:3.17" image = "alpine"
args = ["$${NOMAD_TASK_DIR}/startup.sh"] args = ["$${NOMAD_TASK_DIR}/startup.sh"]
} }
@ -256,7 +263,7 @@ ${file(join("/", [module_path, "grafana", config_file]))}
# Set owner to grafana uid # Set owner to grafana uid
# uid = 472 # uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces # Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if endswith(config_file, ".json") ~} %{ if length(regexall("dashboard", config_file)) > 0 ~}
left_delimiter = "<<<<" left_delimiter = "<<<<"
right_delimiter = ">>>>" right_delimiter = ">>>>"
%{ endif } %{ endif }
@ -272,11 +279,6 @@ ${file(join("/", [module_path, "grafana", config_file]))}
task "grafana-image-renderer" { task "grafana-image-renderer" {
driver = "docker" driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config { config {
image = "grafana/grafana-image-renderer:3.6.1" image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"] ports = ["renderer"]

View File

@ -20,8 +20,8 @@ data = /var/lib/grafana
# Directory where grafana will automatically scan and look for plugins # Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins ;plugins = /var/lib/grafana/plugins
# folder that contains PROVISIONING config files that grafana will apply on startup and while running. # folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = from_env ; provisioning = /etc/grafana/provisioning
#################################### Server #################################### #################################### Server ####################################
[server] [server]
@ -43,7 +43,7 @@ provisioning = from_env
# The full public facing url you use in browser, used for redirects and emails # The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path) # If you use reverse proxy and sub path specify full url (with sub path)
root_url = https://grafana.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }} root_url = https://grafana.thefij.rocks
# Log web requests # Log web requests
;router_logging = false ;router_logging = false
@ -264,16 +264,12 @@ name = Authelia
client_id = grafana client_id = grafana
client_secret = from_env client_secret = from_env
scopes = openid profile email groups scopes = openid profile email groups
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization auth_url = https://authelia.thefij.rocks/api/oidc/authorization
token_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/token token_url = https://authelia.thefij.rocks/api/oidc/token
api_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/userinfo api_url = https://authelia.thefij.rocks/api/oidc/userinfo
login_attribute_path = preferred_username login_attribute_path = preferred_username
groups_attribute_path = groups groups_attribute_path = groups
name_attribute_path = name name_attribute_path = name
# Role attribute path is not working
role_attribute_path = contains(groups[*], 'admin') && 'Admin' || contains(groups[*], 'grafana-admin') && 'Admin' || contains(groups[*], 'grafana-editor') && 'Editor' || contains(groups[*], 'developer') && 'Editor'
allow_assign_grafana_admin = true
skip_org_role_sync = true
use_pkce = true use_pkce = true
;team_ids = ;team_ids =
@ -441,7 +437,7 @@ enabled = true
provider = s3 provider = s3
[external_image_storage.s3] [external_image_storage.s3]
endpoint = https://minio.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }} endpoint = https://minio.thefij.rocks
bucket = grafana-images bucket = grafana-images
region = us-east-1 region = us-east-1
path_style_access = true path_style_access = true

View File

@ -104,7 +104,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": false, "exemplar": false,
"expr": "sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": true, "instant": true,
"interval": "", "interval": "",
@ -458,7 +458,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_blacklist_cache) / sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_blacklist_cache) / sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -533,7 +533,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -753,7 +753,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_cache_entry_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_cache_entry_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -1162,7 +1162,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": false, "exemplar": false,
"expr": "sum(time() -blocky_last_list_group_refresh)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(time() -blocky_last_list_group_refresh)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": true, "instant": true,
"interval": "", "interval": "",
@ -1224,7 +1224,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"interval": "", "interval": "",
"legendFormat": "", "legendFormat": "",

View File

@ -0,0 +1,783 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.5.5"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "panel",
"id": "piechart",
"name": "Pie chart v2",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "singlestat",
"name": "Singlestat",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Traefik dashboard prometheus",
"editable": true,
"gnetId": 4475,
"graphTooltip": 0,
"id": null,
"iteration": 1620932097756,
"links": [],
"panels": [
{
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 10,
"title": "$backend stats",
"type": "row"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 1
},
"id": 2,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value",
"percent"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "traefik_service_requests_total{service=\"$service\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"title": "$service return code",
"type": "piechart"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"format": "ms",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 1
},
"id": 4,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"exemplar": true,
"expr": "sum(traefik_service_request_duration_seconds_sum{service=\"$service\"}) / sum(traefik_service_requests_total{service=\"$service\"}) * 1000",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "",
"title": "$service response time",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "avg"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 8
},
"hiddenSeries": false,
"id": 3,
"legend": {
"alignAsTable": true,
"avg": true,
"current": false,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total{service=\"$service\"}[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "Total requests $service",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Total requests over 5min $service",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 12,
"panels": [],
"title": "Global stats",
"type": "row"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 16
},
"hiddenSeries": false,
"id": 5,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Status code 200 over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 16
},
"hiddenSeries": false,
"id": 6,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ method }} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Others status code over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total[5m])) by (service) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ service }}",
"refId": "A"
}
],
"title": "Requests by service",
"type": "piechart"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ entrypoint }}",
"refId": "A"
}
],
"title": "Requests by protocol",
"type": "piechart"
}
],
"schemaVersion": 27,
"style": "dark",
"tags": [
"traefik",
"prometheus"
],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "label_values(service)",
"description": null,
"error": null,
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "service",
"options": [],
"query": {
"query": "label_values(service)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "",
"description": null,
"error": null,
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "entrypoint",
"options": [],
"query": {
"query": "label_values(entrypoint)",
"refId": "Prometheus-entrypoint-Variable-Query"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Traefik",
"uid": "qPdAviJmz",
"version": 10
}

View File

@ -5,4 +5,4 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
options: options:
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default path: /etc/grafana/provisioning/dashboards/default

27
core/metrics/metrics.tf Normal file
View File

@ -0,0 +1,27 @@
resource "nomad_job" "exporters" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/exporters.nomad")
}
resource "nomad_job" "prometheus" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/prometheus.nomad")
}
resource "nomad_job" "grafana" {
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
})
depends_on = [nomad_job.prometheus]
}

View File

@ -8,16 +8,12 @@ job "prometheus" {
mode = "bridge" mode = "bridge"
port "web" { port "web" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
to = 9090 to = 9090
} }
port "pushgateway" { port "pushgateway" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
static = 9091 static = 9091
} }
} }
@ -37,36 +33,12 @@ job "prometheus" {
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure", "traefik.http.routers.prometheus.entryPoints=websecure",
] ]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
service { service {
name = "pushgateway" name = "pushgateway"
provider = "nomad" provider = "nomad"
port = "pushgateway" port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
task "prometheus" { task "prometheus" {
@ -76,8 +48,8 @@ job "prometheus" {
image = "prom/prometheus:v2.43.0" image = "prom/prometheus:v2.43.0"
ports = ["web"] ports = ["web"]
args = [ args = [
"--config.file=$${NOMAD_TASK_DIR}/prometheus.yml", "--config.file=${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=$${NOMAD_ALLOC_DIR}/data/tsdb", "--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090", "--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries", "--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles", "--web.console.templates=/usr/share/prometheus/consoles",
@ -140,7 +112,7 @@ scrape_configs:
EOF EOF
change_mode = "signal" change_mode = "signal"
change_signal = "SIGHUP" change_signal = "SIGHUP"
destination = "$${NOMAD_TASK_DIR}/prometheus.yml" destination = "${NOMAD_TASK_DIR}/prometheus.yml"
} }
resources { resources {
@ -156,7 +128,7 @@ scrape_configs:
image = "prom/pushgateway" image = "prom/pushgateway"
ports = ["pushgateway"] ports = ["pushgateway"]
args = [ args = [
"--persistence.file=$${NOMAD_ALLOC_DIR}/pushgateway-persistence", "--persistence.file=${NOMAD_ALLOC_DIR}/pushgateway-persistence",
] ]
} }

View File

@ -24,8 +24,7 @@ job "nomad-client-stalker" {
resources { resources {
cpu = 10 cpu = 10
memory = 15 memory = 10
memory_max = 30
} }
} }
} }

View File

@ -27,11 +27,13 @@ job "syslogng" {
driver = "docker" driver = "docker"
meta = { meta = {
"diun.sort_tags" = "semver"
"diun.watch_repo" = true "diun.watch_repo" = true
"diun.include_tags" = "^[0-9]+\\.[0-9]+\\.[0-9]+$"
} }
config { config {
image = "grafana/promtail:2.9.1" image = "grafana/promtail:2.7.1"
ports = ["main", "metrics"] ports = ["main", "metrics"]
args = ["--config.file=/etc/promtail/promtail.yml"] args = ["--config.file=/etc/promtail/promtail.yml"]
@ -70,7 +72,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 50 memory = 20
} }
} }
} }
@ -134,7 +136,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 50 memory = 10
} }
} }
} }

View File

@ -2,20 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.0" version = "1.4.17"
hashes = [ hashes = [
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=", "h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454", "zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605", "zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc", "zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3", "zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88", "zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269", "zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3", "zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169", "zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a", "zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69", "zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9", "zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
] ]
} }

View File

@ -1,3 +1,9 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
job "traefik" { job "traefik" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
@ -14,15 +20,13 @@ job "traefik" {
update { update {
max_parallel = 1 max_parallel = 1
canary = 1 # canary = 1
auto_promote = false # auto_promote = true
auto_revert = true auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
} }
group "traefik" { group "traefik" {
count = 2 count = 1
network { network {
port "web" { port "web" {
@ -37,17 +41,12 @@ job "traefik" {
static = 514 static = 514
} }
port "gitssh" {
static = 2222
}
port "metrics" {}
dns { dns {
servers = [ servers = [
"192.168.2.101", "192.168.2.101",
"192.168.2.102", "192.168.2.102",
"192.168.2.30", "192.168.2.30",
"192.168.2.170",
] ]
} }
} }
@ -57,42 +56,39 @@ job "traefik" {
sticky = true sticky = true
} }
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
port = "web"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
task "traefik" { task "traefik" {
driver = "docker" driver = "docker"
service { meta = {
name = "traefik" "diun.sort_tags" = "semver"
provider = "nomad" "diun.watch_repo" = true
port = "web" "diun.include_tags" = "^[0-9]+\\.[0-9]+$"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
} }
config { config {
image = "traefik:3.0" image = "traefik:2.9"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"] ports = ["web", "websecure"]
network_mode = "host" network_mode = "host"
mount { mount {
@ -106,20 +102,6 @@ job "traefik" {
target = "/etc/traefik/usersfile" target = "/etc/traefik/usersfile"
source = "secrets/usersfile" source = "secrets/usersfile"
} }
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
} }
template { template {
@ -142,9 +124,12 @@ job "traefik" {
[entryPoints.websecure] [entryPoints.websecure]
address = ":443" address = ":443"
[entryPoints.websecure.http.tls] [entryPoints.websecure.http.tls]
certResolver = "letsEncrypt"
[[entryPoints.websecure.http.tls.domains]]
main = "*.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>"
[entryPoints.metrics] [entryPoints.metrics]
address = ":<< env "NOMAD_PORT_metrics" >>" address = ":8989"
[entryPoints.syslogtcp] [entryPoints.syslogtcp]
address = ":514" address = ":514"
@ -152,9 +137,6 @@ job "traefik" {
[entryPoints.syslogudp] [entryPoints.syslogudp]
address = ":514/udp" address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api] [api]
dashboard = true dashboard = true
@ -174,9 +156,31 @@ job "traefik" {
exposedByDefault = false exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)" defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint] [providers.nomad.endpoint]
address = "unix:///secrets/api.sock" address = "http://<< env "attr.unique.network.ip-address" >>:4646"
<< if nomadVarExists "nomad/jobs/traefik" ->>
[certificatesResolvers.letsEncrypt.acme]
email = "<< with nomadVar "nomad/jobs/traefik" >><< .acme_email >><< end >>"
# Store in /local because /secrets doesn't persist with ephemeral disk
storage = "/local/acme.json"
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
provider = "cloudflare"
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
delayBeforeCheck = 0
<<- end >>
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml" destination = "local/config/traefik.toml"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
} }
template { template {
@ -187,39 +191,23 @@ job "traefik" {
entryPoints = ["websecure"] entryPoints = ["websecure"]
service = "nomad" service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
[http.routers.hass]
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}}
{{ range $service, $url := . }}
[http.routers.{{ $service }}]
entryPoints = ["websecure"] entryPoints = ["websecure"]
service = "{{ $service }}" service = "hass"
rule = "Host(`{{ $service }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`hass.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ end }}
{{- end }}{{ end }}{{ end }}
[http.services] [http.services]
[http.services.nomad] [http.services.nomad]
[http.services.nomad.loadBalancer] [http.services.nomad.loadBalancer]
[[http.services.nomad.loadBalancer.servers]] [[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646" url = "http://127.0.0.1:4646"
[http.services.hass]
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}} [http.services.hass.loadBalancer]
{{ range $service, $url := . }} [[http.services.hass.loadBalancer.servers]]
[http.services.{{ $service }}] url = "http://192.168.3.65:8123"
[http.services.{{ $service }}.loadBalancer]
[[http.services.{{ $service }}.loadBalancer.servers]]
url = "{{ $url }}"
{{ end }}
{{- end }}{{ end }}{{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml" destination = "local/config/conf/route-hashi.toml"
change_mode = "noop" change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
} }
template { template {
@ -255,39 +243,7 @@ job "traefik" {
{{ end -}} {{ end -}}
{{- end }} {{- end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml" destination = "local/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop" change_mode = "noop"
} }
@ -297,11 +253,12 @@ EOF
{{ with nomadVar "nomad/jobs/traefik" }} {{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }} {{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth] [http.middlewares.basic-auth.basicAuth]
# TODO: Reference secrets mount
usersFile = "/etc/traefik/usersfile" usersFile = "/etc/traefik/usersfile"
{{- end }} {{- end }}
{{- end }} {{- end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml" destination = "local/config/conf/middlewares.toml"
change_mode = "noop" change_mode = "noop"
} }
@ -311,7 +268,7 @@ EOF
{{ .usersfile }} {{ .usersfile }}
{{- end }} {{- end }}
EOH EOH
destination = "${NOMAD_SECRETS_DIR}/usersfile" destination = "secrets/usersfile"
change_mode = "noop" change_mode = "noop"
} }

View File

@ -1,36 +1,16 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
resource "nomad_job" "traefik" { resource "nomad_job" "traefik" {
hcl2 {
enabled = true
vars = {
"base_hostname" = var.base_hostname,
}
}
jobspec = file("${path.module}/traefik.nomad") jobspec = file("${path.module}/traefik.nomad")
} }
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}

View File

@ -3,9 +3,3 @@ variable "base_hostname" {
description = "Base hostname to serve content from" description = "Base hostname to serve content from"
default = "dev.homelab" default = "dev.homelab"
} }
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,40 +1,40 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/consul" {
version = "2.0.0" version = "2.15.1"
hashes = [ hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=", "h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72", "zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199", "zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a", "zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc", "zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf", "zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251", "zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc", "zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599", "zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c", "zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543", "zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e", "zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/nomad" {
version = "3.5.1" version = "1.4.17"
hashes = [ hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", "h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", "zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", "zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", "zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", "zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", "zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", "zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", "zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", "zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", "zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", "zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

View File

@ -1,123 +0,0 @@
resource "nomad_job" "lldap" {
jobspec = templatefile("${path.module}/lldap.nomad", {
use_wesher = var.use_wesher,
})
depends_on = [resource.nomad_job.mysql-server]
# Block until deployed as there are servics dependent on this one
detach = false
}
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp"
description = "Give access to SMTP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "lldap"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "lldap_mysql_bootstrap_secrets" {
name = "lldap-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "bootstrap"
}
}
resource "random_password" "lldap_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_mysql_psk" {
path = "secrets/mysql/allowed_psks/lldap"
items = {
psk = "lldap:${resource.random_password.lldap_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "lldap_mysql_psk" {
name = "lldap-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/lldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "stunnel"
}
}

46
databases/main.tf Normal file
View File

@ -0,0 +1,46 @@
resource "nomad_job" "mysql-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "postgres-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "rediscommander" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/rediscommander.nomad")
}

View File

@ -81,9 +81,9 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -91,6 +91,15 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel ${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -108,9 +117,9 @@ PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
template { template {
data = <<EOF data = <<EOF
{{ range nomadVarList "secrets/mysql/allowed_psks" -}} {{ with nomadVar "nomad/jobs/mysql-server" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }} {{ .allowed_psks }}
{{ end -}} {{- end }}
EOF EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt" destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
} }

View File

@ -1,41 +0,0 @@
resource "nomad_job" "mysql-server" {
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_mysql" {
name = "secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
path "secrets/mysql/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.mysql-server.id
job_id = "mysql-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "mysql_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_mysql_psk" {
path = "secrets/mysql/allowed_psks/mysql"
items = {
psk = "mysql:${resource.random_password.mysql_mysql_psk.result}"
}
}

View File

@ -73,8 +73,7 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
resources { resources {
cpu = 500 cpu = 500
memory = 700 memory = 400
memory_max = 1200
} }
} }
@ -82,9 +81,9 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -92,6 +91,15 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel ${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -109,9 +117,9 @@ PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
template { template {
data = <<EOF data = <<EOF
{{ range nomadVarList "secrets/postgres/allowed_psks" -}} {{ with nomadVar "nomad/jobs/postgres-server/postgres-server/stunnel" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }} {{ .allowed_psks }}
{{ end -}} {{- end }}
EOF EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt" destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
} }

View File

@ -1,41 +0,0 @@
resource "nomad_job" "postgres-server" {
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_postgres" {
name = "secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
path "secrets/postgres/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.postgres-server.id
job_id = "postgres-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "postgres_postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_postgres_psk" {
path = "secrets/postgres/allowed_psks/postgres"
items = {
psk = "postgres:${resource.random_password.postgres_postgres_psk.result}"
}
}

View File

@ -35,7 +35,7 @@ job "redis-${name}" {
resources { resources {
cpu = 100 cpu = 100
memory = 64 memory = 128
memory_max = 512 memory_max = 512
} }
} }
@ -44,14 +44,23 @@ job "redis-${name}" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
cpu = 50 cpu = 100
memory = 15 memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel $${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
} }
template { template {

View File

@ -1,12 +0,0 @@
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one
detach = false
}

View File

@ -0,0 +1,99 @@
job "rediscommander" {
datacenters = ["dc1"]
type = "service"
group "rediscommander" {
count = 1
network {
mode = "bridge"
port "main" {
host_network = "wesher"
to = 8081
}
}
service {
name = "rediscommander"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.rediscommander.entryPoints=websecure",
]
}
task "rediscommander" {
driver = "docker"
config {
image = "rediscommander/redis-commander:latest"
ports = ["main"]
}
template {
data = <<EOH
REDIS_HOSTS=stunnel:127.0.0.1:6379
EOH
env = true
destination = "env"
}
resources {
cpu = 50
memory = 50
}
}
task "redis-stunnel" {
driver = "docker"
config {
image = "alpine:3.17"
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[redis_client]
client = yes
accept = 127.0.0.1:6379
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/rediscommander" -}}
{{ .redis_stunnel_psk }}
{{- end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

34
main.tf
View File

@ -1,34 +0,0 @@
module "databases" {
source = "./databases"
use_wesher = var.use_wesher
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}
module "services" {
source = "./services"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# NOTE: It may be possible to flip this and core so core templates don't
# need to be rerendered every time a service goes up or down.
depends_on = [module.databases, module.core]
}
module "backups" {
source = "./backups"
use_wesher = var.use_wesher
depends_on = [module.databases, module.services, module.core]
}

View File

@ -1,128 +0,0 @@
#! /usr/bin/env python3
from os import environ
from time import sleep
from typing import Any
from typing import cast
from argparse import ArgumentParser
import requests
NOMAD_ADDR = environ.get("NOMAD_ADDR", "http://127.0.0.1:4646")
NOMAD_TOKEN = environ.get("NOMAD_TOKEN")
def nomad_req(
*path: str,
params: dict[str, Any] | None = None,
data: dict[str, Any] | None = None,
method="GET",
) -> list[dict[str, Any]] | dict[str, Any] | str:
headers = {
"Content-Type": "application/json",
}
if NOMAD_TOKEN:
headers["X-Nomad-Token"] = NOMAD_TOKEN
response = requests.request(
method,
f"{NOMAD_ADDR}/v1/{'/'.join(path)}",
params=params,
json=data,
headers=headers,
)
try:
response.raise_for_status()
except requests.exceptions.RequestException as ex:
print(response.text)
raise ex
try:
return response.json()
except requests.exceptions.JSONDecodeError:
return response.text
def wait_for_job_alloc_status(job_id: str, status: str):
allocs = nomad_req("job", job_id, "allocations")
allocs = cast(list[dict[str, Any]], allocs)
while not all(alloc["ClientStatus"] == status for alloc in allocs):
print(f"Waiting for all allocs to reach {status}...")
sleep(5)
allocs = nomad_req("job", job_id, "allocations")
allocs = cast(list[dict[str, Any]], allocs)
def wait_for_eval_status(eval_id: str, status: str):
eval = nomad_req("evaluation", eval_id)
eval = cast(dict[str, Any], eval)
while eval["Status"] != status:
print(f"Waiting for eval to reach {status}...")
sleep(5)
eval = nomad_req("evaluation", eval_id)
eval = cast(dict[str, Any], eval)
parser = ArgumentParser(
description="Execute one off backups and restores of services",
)
parser.add_argument("service_name", help="Name of the service to backup or restore")
parser.add_argument("-a", "--action", default="backup", choices=("backup", "restore"), help="Action to take, backup or restore")
parser.add_argument("-s", "--snapshot", default="latest", help="Backup snapshot to restore, if restore is the chosen action")
parser.add_argument("-x", "--extra-safe", action="store_true", help="Perform extra safe backup or restore by stoping target job first")
args = parser.parse_args()
service_name = args.service_name
service_info = nomad_req("service", service_name, params={"choose": "1|backups"})
if not service_info:
print(f"Could not find service {service_name}")
exit(1)
service_info = cast(list[dict[str, Any]], service_info)
node_id = service_info[0]["NodeID"]
job_id = service_info[0]["JobID"]
node = nomad_req("node", node_id)
node = cast(dict[str, Any], node)
node_name = node["Name"]
backup_job_name = f"backup-oneoff-{node_name}"
backup_job = nomad_req("job", backup_job_name)
if not backup_job:
print(f"Could not find backup job {backup_job_name} for {service_name}")
if args.extra_safe:
print("Stopping job allocs")
stop_job = nomad_req("job", job_id, method="DELETE")
print(stop_job)
wait_for_job_alloc_status(job_id, "complete")
backup_job = cast(dict[str, Any], backup_job)
backup_job_id = backup_job["ID"]
dispatch = nomad_req(
"job",
backup_job_id,
"dispatch",
data={
"Payload": None,
"Meta": {
"job_name": service_name,
"task": args.action,
"snapshot": args.snapshot,
},
},
method="POST",
)
dispatch = cast(dict[str, Any], dispatch)
print(dispatch)
if args.extra_safe:
print(f"Wait for {args.action} to finish")
wait_for_eval_status(dispatch["EvalID"], "complete")
print("Backup complete. Verify success and restart job")
# If auto restarting, get versions and "revert" to version n-1 since n will be the recently stopped version

View File

@ -1,99 +0,0 @@
#! /usr/bin/env python3
from argparse import ArgumentParser
from os import environ
from typing import Any
from typing import cast
import requests
NOMAD_ADDR = environ.get("NOMAD_ADDR", "http://127.0.0.1:4646")
NOMAD_TOKEN = environ.get("NOMAD_TOKEN")
def nomad_req(
*path: str, params: dict[str, Any] | None = None, method="GET"
) -> list[dict[str, Any]] | dict[str, Any] | str:
headers = {}
if NOMAD_TOKEN:
headers["X-Nomad-Token"] = NOMAD_TOKEN
response = requests.request(
method,
f"{NOMAD_ADDR}/v1/{'/'.join(path)}",
params=params,
headers=headers,
)
response.raise_for_status()
try:
return response.json()
except requests.exceptions.JSONDecodeError:
return response.text
def extract_job_services(job: dict[str, Any]) -> dict[str, str]:
services: dict[str, str] = dict()
for group in job["TaskGroups"]:
for service in group.get("Services") or []:
services[service["Name"]] = group["Name"]
for task in group["Tasks"]:
for service in task.get("Services") or []:
services[service["Name"]] = group["Name"]
return services
exit_code = 0
parser = ArgumentParser(
description="Checks for missing services and optionally restarts their allocs.",
)
parser.add_argument("-r", "--restart", action="store_true", help="Restart allocs for missing services")
args = parser.parse_args()
for job in nomad_req("jobs"):
job = cast(dict[str, Any], job)
if job["Type"] in ("batch", "sysbatch"):
continue
if job["Status"] != "running":
print(f"WARNING: job {job['Name']} is {job['Status']}")
continue
job_detail = nomad_req("job", job["ID"])
job_detail = cast(dict[str, Any], job_detail)
expected_services = extract_job_services(job_detail)
found_services: set[str] = set()
for service in nomad_req("job", job_detail["ID"], "services"):
service = cast(dict[str, Any], service)
found_services.add(service["ServiceName"])
missing_services = set(expected_services) - found_services
restart_groups: set[str] = set()
for missing_service in missing_services:
print(f"ERROR: Missing service {missing_service} for job {job_detail['Name']}")
# print(job)
exit_code = 1
# Add group associated with missing service to set
restart_groups.add(expected_services[missing_service])
if not restart_groups or not args.restart:
continue
# Get allocts for groups that are missing services
restart_allocs: set[str] = set()
for allocation in nomad_req("job", job_detail["ID"], "allocations"):
allocation = cast(dict[str, Any], allocation)
if allocation["ClientStatus"] == "running" and allocation["TaskGroup"] in restart_groups:
restart_allocs.add(allocation["ID"])
# Restart allocs associated with missing services
for allocation in restart_allocs:
print(f"INFO: Restarting allocation {allocation}")
nomad_req("client", "allocation", allocation, "restart")
exit(exit_code)

View File

@ -1,72 +0,0 @@
#! /usr/bin/env python3
from argparse import ArgumentParser
from os import environ
from typing import Any
from typing import cast
import requests
NOMAD_ADDR = environ.get("NOMAD_ADDR", "http://127.0.0.1:4646")
NOMAD_TOKEN = environ.get("NOMAD_TOKEN")
def nomad_req(
*path: str, params: dict[str, Any] | None = None, method="GET"
) -> list[dict[str, Any]] | dict[str, Any] | str:
headers = {}
if NOMAD_TOKEN:
headers["X-Nomad-Token"] = NOMAD_TOKEN
response = requests.request(
method,
f"{NOMAD_ADDR}/v1/{'/'.join(path)}",
params=params,
headers=headers,
)
response.raise_for_status()
try:
return response.json()
except requests.exceptions.JSONDecodeError:
return response.text
exit_code = 0
parser = ArgumentParser(
description="Checks for orphaned services and optionally deletes them.",
)
parser.add_argument("-d", "--delete", action="store_true", help="Delete orphan services")
args = parser.parse_args()
for namespace in nomad_req("services"):
namespace = cast(dict[str, Any], namespace)
for service in namespace["Services"]:
service_name = service["ServiceName"]
for service_instance in nomad_req("service", service_name):
service_instance = cast(dict[str, Any], service_instance)
service_id = service_instance["ID"]
alloc_id = service_instance["AllocID"]
alloc_found = True
try:
alloc = nomad_req("allocation", alloc_id)
continue
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
alloc_found = False
message = f"alloc {alloc_id} not found for {service_name}."
if args.delete:
message += f" Deleting {service_id}"
print(message)
else:
raise e
if not alloc_found and args.delete:
nomad_req("service", service_name, service_id, method="DELETE")
exit(exit_code)

5
services.tf Normal file
View File

@ -0,0 +1,5 @@
module "services" {
source = "./services"
depends_on = [module.databases, module.core]
}

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1" version = "1.4.19"
hashes = [ hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=", "h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb", "zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1", "zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9", "zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa", "zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d", "zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f", "zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39", "zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082", "zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196", "zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f", "zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
] ]
} }

View File

@ -6,7 +6,6 @@ module "adminer" {
ingress = true ingress = true
service_port = 8080 service_port = 8080
use_wesher = var.use_wesher
use_mysql = true use_mysql = true
use_postgres = true use_postgres = true

View File

@ -0,0 +1,21 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -31,9 +31,7 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
mode = "bridge" mode = "bridge"
port "metrics" { port "metrics" {
%{~ if use_wesher ~}
host_network = "wesher" host_network = "wesher"
%{~ endif ~}
to = 8080 to = 8080
} }
} }
@ -44,11 +42,6 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
source = "all-volumes" source = "all-volumes"
} }
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service { service {
name = "backup" name = "backup"
provider = "nomad" provider = "nomad"
@ -62,8 +55,6 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
task "backup" { task "backup" {
driver = "docker" driver = "docker"
shutdown_delay = "5m"
volume_mount { volume_mount {
volume = "all-volumes" volume = "all-volumes"
destination = "/data" destination = "/data"
@ -71,7 +62,7 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
} }
config { config {
image = "iamthefij/resticscheduler:0.4.0" image = "iamthefij/resticscheduler:0.2.0"
ports = ["metrics"] ports = ["metrics"]
args = [ args = [
%{ if batch_node != null ~} %{ if batch_node != null ~}
@ -87,97 +78,55 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
] ]
} }
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/resticscheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
env = { env = {
RCLONE_CHECKERS = "2" "RCLONE_CHECKERS" = "2"
RCLONE_TRANSFERS = "2" "RCLONE_TRANSFERS" = "2"
RCLONE_FTP_CONCURRENCY = "5" "RCLONE_FTP_CONCURRENCY" = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
} }
template { template {
data = <<EOF data = <<EOF
MYSQL_HOST=127.0.0.1 MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306 MYSQL_PORT=3306
{{ with nomadVar "secrets/mysql" }} # TODO: Move this to new mysql root pass path
{{ with nomadVar "nomad/jobs" }}
MYSQL_USER=root MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }} MYSQL_PASSWORD={{ .mysql_root_password }}
{{ end -}} {{ end -}}
{{ with nomadVar "secrets/postgres" }} {{ with nomadVar (print "nomad/jobs/" (env "NOMAD_JOB_ID")) -}}
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (index (env "NOMAD_JOB_ID" | split "/") 0)) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }} BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }} RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }} RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }} RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}} {{ end -}}
EOF EOF
destination = "secrets/db.env" destination = "secrets/db.env"
env = true env = true
} }
template { template {
# Build jobs based on node # Build jobs based on node
data = <<EOF data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }} # Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
%{ for job_file in fileset(module_path, "jobs/*.hcl") ~} %{~ for job_file in fileset(module_path, "jobs/*.hcl") }
{{ range nomadService 1 "backups" "${trimsuffix(basename(job_file), ".hcl")}" -}} {{ range nomadService "${trimsuffix(basename(job_file), ".hcl")}" -}}
# ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }} # ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}} {{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/${job_file}")} ${file("${module_path}/${job_file}")}
{{- end }}
{{ end -}} {{- end }}
{{ end -}}
%{ endfor ~} %{ endfor ~}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
EOF EOF
destination = "local/node-jobs.hcl" destination = "local/node-jobs.hcl"
} }
resources { resources {
cpu = 50 cpu = 50
memory = 500 memory = 256
} }
} }
@ -190,8 +139,8 @@ job "Dummy" {
} }
config { config {
image = "iamthefij/stunnel:latest" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -199,6 +148,15 @@ job "Dummy" {
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -208,35 +166,22 @@ delay = yes
[mysql_client] [mysql_client]
client = yes client = yes
accept = 127.0.0.1:3306 accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" }} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }} connect = {{ .Address }}:{{ .Port }}
{{ end }} {{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[postgres_client]
client = yes
accept = 127.0.0.1:5432
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "postgres-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/postgres_stunnel_psk.txt
EOF EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf" destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
} }
# TODO: Get psk for backup jobs despite multiple job declarations
# Probably should use variable ACLs to grant each node job to this path
template { template {
data = <<EOF data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/backups" }}{{ .psk }}{{ end -}} {{- with nomadVar (print "nomad/jobs/" (env "NOMAD_JOB_ID")) }}{{ .mysql_stunnel_psk }}{{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt" destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
} }
template {
data = <<EOF
{{- with nomadVar "secrets/postgres/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/postgres_stunnel_psk.txt"
}
} }
} }
} }

View File

@ -0,0 +1,28 @@
resource "nomad_job" "backup" {
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
})
}

View File

@ -2,12 +2,8 @@ job "authelia" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia" repo = "rclone::ftp,env_auth:/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local authelia dir" { task "Create local authelia dir" {

View File

@ -2,12 +2,8 @@ job "grafana" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/grafana" repo = "rclone::ftp,env_auth:/nomad/grafana"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local grafana dir" { task "Create local grafana dir" {

View File

@ -2,20 +2,16 @@ job "lidarr" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr" repo = "rclone::ftp,env_auth:/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Backup main database" { task "Backup main database" {
postgres "Backup database" { postgres "Backup database" {
hostname = env("POSTGRES_HOST") hostname = env("MYSQL_HOST")
port = env("POSTGRES_PORT") port = env("MYSQL_PORT")
username = env("POSTGRES_USER") username = env("MYSQL_USER")
password = env("POSTGRES_PASSWORD") password = env("MYSQL_PASSWORD")
database = "lidarr" database = "lidarr"
no_tablespaces = true no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql" dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql"
@ -24,10 +20,10 @@ job "lidarr" {
task "Backup logs database" { task "Backup logs database" {
postgres "Backup database" { postgres "Backup database" {
hostname = env("POSTGRES_HOST") hostname = env("MYSQL_HOST")
port = env("POSTGRES_PORT") port = env("MYSQL_PORT")
username = env("POSTGRES_USER") username = env("MYSQL_USER")
password = env("POSTGRES_PASSWORD") password = env("MYSQL_PASSWORD")
database = "lidarr-logs" database = "lidarr-logs"
no_tablespaces = true no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql" dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql"
@ -38,11 +34,7 @@ job "lidarr" {
paths = ["/data/nas-container/lidarr"] paths = ["/data/nas-container/lidarr"]
backup_opts { backup_opts {
Exclude = [ Exclude = ["lidarr_backup_*.zip"]
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad" Host = "nomad"
} }

View File

@ -2,12 +2,8 @@ job "lldap" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap" repo = "rclone::ftp,env_auth:/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local backup dir" { task "Create local backup dir" {

View File

@ -2,12 +2,8 @@ job "nzbget" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget" repo = "rclone::ftp,env_auth:/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
backup { backup {

View File

@ -2,12 +2,8 @@ job "photoprism" {
schedule = "10 * * * *" schedule = "10 * * * *"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism" repo = "rclone::ftp,env_auth:/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local photoprism dir" { task "Create local photoprism dir" {
@ -36,9 +32,6 @@ job "photoprism" {
backup_opts { backup_opts {
Host = "nomad" Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
} }
restore_opts { restore_opts {

View File

@ -2,12 +2,8 @@ job "sabnzbd" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd" repo = "rclone::ftp,env_auth:/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
backup { backup {

View File

@ -0,0 +1,52 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
}
task "Backup main database" {
sqlite "Backup database" {
path = "/data/sonarr/sonarr.db"
dump_to = "/data/sonarr/Backups/sonarr.db.bak"
}
}
task "Backup logs database" {
sqlite "Backup database" {
path = "/data/sonarr/logs.db"
dump_to = "/data/sonarr/Backups/logs.db.bak"
}
}
backup {
# Not dunping sqlite because sonarr makes dumps
paths = ["/data/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -2,27 +2,19 @@ module "diun" {
source = "./service" source = "./service"
name = "diun" name = "diun"
image = "crazymax/diun:4.27" image = "crazymax/diun:4.24"
args = ["serve", "--log-level=debug"] args = ["serve", "--log-level=debug"]
sticky_disk = true
env = { env = {
DIUN_DB_PATH = "$${NOMAD_ALLOC_DIR}/data/diun.db" DIUN_DB_PATH = "$${NOMAD_TASK_DIR}/diun.db"
DIUN_WATCH_SCHEDULE = "0 */6 * * *" DIUN_WATCH_SCHEDULE = "0 */6 * * *"
DIUN_PROVIDERS_NOMAD_WATCHBYDEFAULT = true DIUN_PROVIDERS_NOMAD_WATCHBYDEFAULT = true
DIUN_DEFAULTS_WATCHREPO = true
DIUN_DEFAULTS_SORTTAGS = "semver"
DIUN_DEFAULTS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$"
# Nomad API # Nomad API
NOMAD_ADDR = "unix:///secrets/api.sock" # TODO: Use socket in $NOMAD_SECRETS_DIR/api.sock when we can assign workload ACLs with Terraform to
DIUN_PROVIDERS_NOMAD = true # allow read access. Will need to update template to allow passing token by env
DIUN_PROVIDERS_NOMAD_SECRETID = "$${NOMAD_TOKEN}" NOMAD_ADDR = "http://$${attr.unique.network.ip-address}:4646/"
} DIUN_PROVIDERS_NOMAD = true
task_identity = {
env = true
} }
templates = [ templates = [
@ -39,16 +31,3 @@ module "diun" {
}, },
] ]
} }
resource "nomad_acl_policy" "diun_query_jobs" {
name = "diun-query-jobs"
description = "Allow diun to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = module.diun.job_id
}
}

View File

@ -1,116 +0,0 @@
module "gitea" {
source = "./service"
name = "git"
image = "gitea/gitea:1.21"
resources = {
cpu = 200
memory = 512
}
env = {
# Custom files should be part of the task
GITEA_WORK_DIR = "$${NOMAD_TASK_DIR}"
GITEA_CUSTOM = "$${NOMAD_TASK_DIR}/custom"
}
ingress = true
service_port = 3000
use_wesher = var.use_wesher
ports = [
{
name = "ssh"
to = 22
}
]
service_check = {
path = "/api/healthz"
}
custom_services = [
{
name = "git-ssh"
port = "ssh"
tags = [
"traefik.enable=true",
"traefik.tcp.routers.git-ssh.entryPoints=gitssh",
"traefik.tcp.routers.git-ssh.rule=HostSNI(`*`)",
"traefik.tcp.routers.git-ssh.tls=false",
]
},
]
use_smtp = true
mysql_bootstrap = {
enabled = true
}
host_volumes = [
{
name = "gitea-data"
dest = "/data"
read_only = false
},
]
# TODO: Bootstrap OIDC with
# su -- git gitea admin auth add-oauth --name authelia --provider openidConnect --key gitea --secret "{{ .oidc_secret }}" --auto-discover-url https://authelia.thefij.rocks/.well-known/openid-configuration --skip-local-2fa
templates = [
{
data = <<EOF
{{ with nomadVar "nomad/jobs/git" }}
GITEA__server__DOMAIN=git.thefij.rocks
GITEA__server__SSH_PORT=2222
GITEA__server__ROOT_URL=https://git.thefij.rocks
GITEA__security__INSTALL_LOCK=true
GITEA__database__DB_TYPE=mysql
GITEA__database__HOST=127.0.0.1:3306
GITEA__database__NAME={{ .db_name }}
GITEA__database__USER={{ .db_user }}
GITEA__service__DISABLE_REGISTRATION=false
GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION=true
GITEA__service__SHOW_REGISTRATION_BUTTON=false
GITEA__openid__ENABLE_OPENID_SIGNIN=true
GITEA__openid__ENABLE_OPENID_SIGNUP=true
GITEA__openid__WHITELISTED_URIS=authelia.thefij.rocks
GITEA__log__ROOT_PATH={{ env "NOMAD_TASK_DIR" }}/log
GITEA__mailer__ENABLED=true
GITEA__mailer__FROM={{ .smtp_sender }}
GITEA__session__provider=db
{{ end }}
EOF
env = true
mount = false
dest = "env"
},
# TODO: Gitea writes these out to the ini file in /local anyway
# Find some way to get it to write to /secrets
{
data = <<EOF
{{ with nomadVar "nomad/jobs/git" }}
GITEA__security__SECRET_KEY="{{ .secret_key }}"
GITEA__database__PASSWD={{ .db_pass }}
{{ end }}
{{ with nomadVar "secrets/smtp" }}
GITEA__mailer__SMTP_ADDR={{ .server }}
GITEA__mailer__SMTP_PORT={{ .port }}
GITEA__mailer__USER={{ .user }}
GITEA__mailer__PASSWD={{ .password }}
{{ end }}
EOF
env = true
mount = false
dest = "env"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
}
]
}

206
services/ip-dvr.nomad Normal file
View File

@ -0,0 +1,206 @@
job "ipdvr" {
region = "global"
datacenters = ["dc1"]
type = "service"
group "sabnzbd" {
network {
mode = "bridge"
port "main" {
host_network = "wesher"
to = 8080
}
}
volume "sabnzbd-config" {
type = "host"
read_only = false
source = "sabnzbd-config"
}
volume "media-downloads" {
type = "host"
read_only = false
source = "media-downloads"
}
service {
name = "sabnzbd"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.sabnzbd.entryPoints=websecure",
]
}
task "sabnzbd" {
driver = "docker"
config {
image = "linuxserver/sabnzbd"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "sabnzbd-config"
destination = "/config"
read_only = false
}
volume_mount {
volume = "media-downloads"
destination = "/downloads"
read_only = false
}
resources {
cpu = 400
memory = 500
memory_max = 800
}
}
}
group "nzbget" {
network {
mode = "bridge"
port "main" {
host_network = "wesher"
static = 6789
}
}
volume "nzbget-config" {
type = "host"
read_only = false
source = "nzbget-config"
}
volume "media-downloads" {
type = "host"
read_only = false
source = "media-downloads"
}
service {
name = "nzbget"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.nzbget.entryPoints=websecure",
]
}
task "nzbget" {
driver = "docker"
config {
image = "linuxserver/nzbget"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "nzbget-config"
destination = "/config"
read_only = false
}
volume_mount {
volume = "media-downloads"
destination = "/downloads"
read_only = false
}
resources {
cpu = 200
memory = 300
memory_max = 500
}
}
}
group "sonarr" {
network {
mode = "bridge"
port "main" {
host_network = "wesher"
to = 8989
}
}
volume "sonarr-data" {
type = "host"
read_only = false
source = "sonarr-data"
}
volume "media-write" {
type = "host"
read_only = false
source = "media-write"
}
service {
name = "sonarr"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.sonarr.entryPoints=websecure",
]
}
task "sonarr" {
driver = "docker"
config {
image = "linuxserver/sonarr"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "sonarr-data"
destination = "/config"
read_only = false
}
volume_mount {
volume = "media-write"
destination = "/media"
read_only = false
}
resources {
cpu = 100
memory = 450
memory_max = 700
}
}
}
}

View File

@ -1,25 +0,0 @@
module "languagetool" {
source = "./service"
name = "languagetool"
image = "ghcr.io/erikvl87/docker-languagetool/languagetool:4.8"
ingress = true
service_port = 8010
use_wesher = var.use_wesher
env = {
Java_Xmx = "512m"
}
service_check = {
path = "/v2/healthcheck"
}
# Possibility to use a volume over nfs to host n-gram datasets
# https://github.com/Erikvl87/docker-languagetool/pkgs/container/docker-languagetool%2Flanguagetool#using-n-gram-datasets
resources = {
cpu = 100
memory = 512
}
}

View File

@ -2,11 +2,10 @@ module "lidarr" {
source = "./service" source = "./service"
name = "lidarr" name = "lidarr"
image = "lscr.io/linuxserver/lidarr:1.3.5" image = "linuxserver/lidarr"
ingress = true ingress = true
service_port = 8686 service_port = 8686
use_wesher = var.use_wesher
use_postgres = true use_postgres = true
postgres_bootstrap = { postgres_bootstrap = {
@ -40,4 +39,9 @@ module "lidarr" {
cpu = 500 cpu = 500
memory = 1500 memory = 1500
} }
stunnel_resources = {
cpu = 100
memory = 100
}
} }

View File

@ -0,0 +1,7 @@
module "backups" {
source = "./backups"
}
resource "nomad_job" "ipdvr" {
jobspec = file("${path.module}/ip-dvr.nomad")
}

View File

@ -6,8 +6,6 @@ module "media-library" {
args = ["caddy", "file-server", "--root", "/mnt/media", "--browse"] args = ["caddy", "file-server", "--root", "/mnt/media", "--browse"]
ingress = true ingress = true
service_port = 80 service_port = 80
use_wesher = var.use_wesher
host_volumes = [ host_volumes = [
{ {
name = "media-read" name = "media-read"

View File

@ -0,0 +1,21 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -0,0 +1,60 @@
job "multimedia" {
datacenters = ["dc1"]
type = "service"
group "multimedia" {
count = 1
network {
mode = "bridge"
port "web" {
host_network = "wesher"
to = 80
}
}
volume "media-read" {
type = "host"
read_only = true
source = "media-read"
}
service {
name = "library"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.library.entryPoints=websecure",
]
}
task "main" {
driver = "docker"
volume_mount {
volume = "media-read"
destination = "/mnt/media"
read_only = true
}
config {
image = "caddy"
args = [
"caddy",
"file-server",
"--root",
"/mnt/media",
"--browse",
]
ports = ["web"]
}
resources {
cpu = 50
memory = 250
}
}
}
}

7
services/media/media.tf Normal file
View File

@ -0,0 +1,7 @@
resource "nomad_job" "caddy" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/caddy.nomad")
}

Some files were not shown because too many files have changed in this diff Show More